diff --git a/cfg/cis-1.24-microk8s/config.yaml b/cfg/cis-1.24-microk8s/config.yaml
new file mode 100644
index 000000000..b7839455a
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/config.yaml
@@ -0,0 +1,2 @@
+---
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.24-microk8s/controlplane.yaml b/cfg/cis-1.24-microk8s/controlplane.yaml
new file mode 100644
index 000000000..49c4e0016
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/controlplane.yaml
@@ -0,0 +1,46 @@
+---
+controls:
+version: "cis-1.24"
+id: 3
+text: "Control Plane Configuration"
+type: "controlplane"
+groups:
+  - id: 3.1
+    text: "Authentication and Authorization"
+    checks:
+      - id: 3.1.1
+        text: "Client certificate authentication should not be used for users (Manual)"
+        type: "manual"
+        remediation: |
+          Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
+          implemented in place of client certificates.
+        scored: false
+
+  - id: 3.2
+    text: "Logging"
+    checks:
+      - id: 3.2.1
+        text: "Ensure that a minimal audit policy is created (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-policy-file"
+              set: true
+        remediation: |
+          Create an audit policy file for your cluster.
+        scored: false
+
+      - id: 3.2.2
+        text: "Ensure that the audit policy covers key security concerns (Manual)"
+        type: "manual"
+        remediation: |
+          Review the audit policy provided for the cluster and ensure that it covers
+          at least the following areas,
+          - Access to Secrets managed by the cluster. Care should be taken to only
+            log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
+            order to avoid risk of logging sensitive data.
+          - Modification of Pod and Deployment objects.
+          - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
+          For most requests, minimally logging at the Metadata level is recommended
+          (the most basic level of logging).
+        scored: false
diff --git a/cfg/cis-1.24-microk8s/etcd.yaml b/cfg/cis-1.24-microk8s/etcd.yaml
new file mode 100644
index 000000000..b3ba93e37
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/etcd.yaml
@@ -0,0 +1,121 @@
+---
+controls:
+version: "cis-1.24"
+id: 2
+text: "Etcd Node Configuration"
+type: "etcd"
+groups:
+  - id: 2
+    text: "Etcd Node Configuration"
+    checks:
+      - id: 2.1
+        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--cert-file"
+              env: "ETCD_CERT_FILE"
+            - flag: "--key-file"
+              env: "ETCD_KEY_FILE"
+        remediation: |
+          Not applicable. MicroK8s used dqlite and the communication to this service is done through a
+          local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible
+          to users with root permissions.
+        scored: false
+
+      - id: 2.2
+        text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-cert-auth"
+              env: "ETCD_CLIENT_CERT_AUTH"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Not applicable. MicroK8s used dqlite and the communication to this service is done through a
+          local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible
+          to users with root permissions.
+        scored: false
+
+      - id: 2.3
+        text: "Ensure that the --auto-tls argument is not set to true (Automated)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--auto-tls"
+              env: "ETCD_AUTO_TLS"
+              set: false
+            - flag: "--auto-tls"
+              env: "ETCD_AUTO_TLS"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Not applicable. MicroK8s used dqlite and the communication to this service is done through a
+          local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible
+          to users with root permissions.
+        scored: false
+
+      - id: 2.4
+        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
+        audit: "if test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt && test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.key; then echo 'certs-found'; fi"
+        tests:
+          test_items:
+            - flag: "certs-found"
+        remediation: |
+          The certificate pair for dqlite and tls peer communication is
+          /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt and
+          /var/snap/microk8s/current/var/kubernetes/backend/cluster.key.
+        scored: true
+
+      - id: 2.5
+        text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
+        audit: "/bin/cat $etcdconf | /bin/grep enable-tls || true"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-tls"
+              compare:
+                op: eq
+                value: true
+            - flag: "--enable-tls"
+              set: false
+        remediation: |
+          MicroK8s used dqlite and tls peer communication uses is TLS if the --enable-tls is set in
+          /var/snap/microk8s/current/args/k8s-dqlite, set to true by default.
+        scored: true
+
+      - id: 2.6
+        text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--peer-auto-tls"
+              env: "ETCD_PEER_AUTO_TLS"
+              set: false
+            - flag: "--peer-auto-tls"
+              env: "ETCD_PEER_AUTO_TLS"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates
+          created upon the snap creation.
+        scored: false
+
+      - id: 2.7
+        text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          test_items:
+            - flag: "--trusted-ca-file"
+              env: "ETCD_TRUSTED_CA_FILE"
+        remediation: |
+          Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates
+          created upon the snap creation.
+        scored: false
diff --git a/cfg/cis-1.24-microk8s/master.yaml b/cfg/cis-1.24-microk8s/master.yaml
new file mode 100644
index 000000000..d7f537333
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/master.yaml
@@ -0,0 +1,948 @@
+---
+controls:
+version: "cis-1.24"
+id: 1
+text: "Control Plane Security Configuration"
+type: "master"
+groups:
+  - id: 1.1
+    text: "Control Plane Node Configuration Files"
+    checks:
+      - id: 1.1.1
+        text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          control plane node.
+          For example, chmod 644 $apiserverconf
+        scored: true
+
+      - id: 1.1.2
+        text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chown root:root $apiserverconf
+        scored: true
+
+      - id: 1.1.3
+        text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chmod 600 $controllermanagerconf
+        scored: true
+
+      - id: 1.1.4
+        text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chown root:root $controllermanagerconf
+        scored: true
+
+      - id: 1.1.5
+        text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chmod 600 $schedulerconf
+        scored: true
+
+      - id: 1.1.6
+        text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chown root:root $schedulerconf
+        scored: true
+
+      - id: 1.1.7
+        text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'"
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chmod 600 $etcdconf
+        scored: true
+
+      - id: 1.1.8
+        text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chown root:root $etcdconf
+        scored: true
+
+      - id: 1.1.9
+        text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
+        audit: |
+          find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chmod 600 <path/to/cni/files>
+        scored: false
+
+      - id: 1.1.10
+        text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
+        audit: |
+          ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
+          find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chown root:root <path/to/cni/files>
+        scored: false
+
+      # Etcd is not running on MicroK8s master nodes
+      - id: 1.1.11
+        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
+        audit: |
+          DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/'
+          if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
+          stat -c permissions=%a "$DATA_DIR"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "700"
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
+          from the command 'ps -ef | grep etcd'.
+          Run the below command (based on the etcd data directory found above). For example,
+          chmod 700 /var/snap/microk8s/current/var/kubernetes/backend/
+        scored: true
+
+      # Etcd is not running on MicroK8s master nodes
+      - id: 1.1.12
+        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
+        audit: |
+          DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/'
+          if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
+          stat -c %U:%G "$DATA_DIR"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
+          from the command 'ps -ef | grep etcd'.
+          Run the below command (based on the etcd data directory found above).
+          For example, chown root:root /var/snap/microk8s/current/var/kubernetes/backend/
+        scored: true
+
+      - id: 1.1.13
+        text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c permissions=%a /var/snap/microk8s/current/credentials/client.config; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chmod 600 /var/snap/microk8s/current/credentials/client.config
+        scored: true
+
+      - id: 1.1.14
+        text: "Ensure that the admin.conf file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c %U:%G /var/snap/microk8s/current/credentials/client.config; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example, chown root:root /var/snap/microk8s/current/credentials/client.config
+        scored: true
+
+      - id: 1.1.15
+        text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chmod 600 $schedulerkubeconfig
+        scored: true
+
+      - id: 1.1.16
+        text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chown root:root $schedulerkubeconfig
+        scored: true
+
+      - id: 1.1.17
+        text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chmod 600 $controllermanagerkubeconfig
+        scored: true
+
+      - id: 1.1.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chown root:root $controllermanagerkubeconfig
+        scored: true
+
+      - id: 1.1.19
+        text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
+        audit: "find /var/snap/microk8s/current/certs/ | xargs stat -c %U:%G"
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chown -R root:root /var/snap/microk8s/current/certs/
+        scored: true
+
+      - id: 1.1.20
+        text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)"
+        audit: "find /var/snap/microk8s/current/certs/ -name '*.crt' | xargs stat -c permissions=%a"
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chmod -R 600 /var/snap/microk8s/current/certs/*.crt
+        scored: false
+
+      - id: 1.1.21
+        text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
+        audit: "find /var/snap/microk8s/current/certs/ -name '*.key' | xargs stat -c permissions=%a"
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the control plane node.
+          For example,
+          chmod -R 600 /var/snap/microk8s/current/certs/*.key
+        scored: false
+
+  - id: 1.2
+    text: "API Server"
+    checks:
+      - id: 1.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--anonymous-auth"
+              compare:
+                op: eq
+                value: false
+            - flag: "--anonymous-auth"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the below parameter.
+          --anonymous-auth=false
+        scored: false
+
+      - id: 1.2.2
+        text: "Ensure that the --token-auth-file parameter is not set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--token-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the control plane node and remove the --token-auth-file=<filename> parameter.
+        scored: true
+
+      - id: 1.2.3
+        text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: nothave
+                value: "DenyServiceExternalIPs"
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and remove the `DenyServiceExternalIPs`
+          from enabled admission plugins.
+        scored: true
+
+      - id: 1.2.4
+        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--kubelet-client-certificate"
+            - flag: "--kubelet-client-key"
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and kubelets. Then, edit API server pod specification file
+          $apiserverconf on the control plane node and set the
+          kubelet client certificate and key parameters as below.
+          --kubelet-client-certificate=<path/to/client-certificate-file>
+          --kubelet-client-key=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.2.5
+        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--kubelet-certificate-authority"
+        remediation: |
+          Follow the Kubernetes documentation and setup the TLS connection between
+          the apiserver and kubelets. Then, edit the API server pod specification file
+          $apiserverconf on the control plane node and set the
+          --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
+          --kubelet-certificate-authority=<ca-string>
+        scored: true
+
+      - id: 1.2.6
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: nothave
+                value: "AlwaysAllow"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
+          One such example could be as below.
+          --authorization-mode=RBAC
+        scored: true
+
+      - id: 1.2.7
+        text: "Ensure that the --authorization-mode argument includes Node (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "Node"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --authorization-mode parameter to a value that includes Node.
+          --authorization-mode=Node,RBAC
+        scored: true
+
+      - id: 1.2.8
+        text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "RBAC"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
+          for example `--authorization-mode=Node,RBAC`.
+        scored: true
+
+      - id: 1.2.9
+        text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "EventRateLimit"
+        remediation: |
+          Follow the Kubernetes documentation and set the desired limits in a configuration file.
+          Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --enable-admission-plugins=...,EventRateLimit,...
+          --admission-control-config-file=<path/to/configuration/file>
+        scored: false
+
+      - id: 1.2.10
+        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: nothave
+                value: AlwaysAdmit
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
+          value that does not include AlwaysAdmit.
+        scored: true
+
+      - id: 1.2.11
+        text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "AlwaysPullImages"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --enable-admission-plugins parameter to include
+          AlwaysPullImages.
+          --enable-admission-plugins=...,AlwaysPullImages,...
+        scored: false
+
+      - id: 1.2.12
+        text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "SecurityContextDeny"
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "PodSecurityPolicy"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --enable-admission-plugins parameter to include
+          SecurityContextDeny, unless PodSecurityPolicy is already in place.
+          --enable-admission-plugins=...,SecurityContextDeny,...
+        scored: false
+
+      - id: 1.2.13
+        text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--disable-admission-plugins"
+              compare:
+                op: nothave
+                value: "ServiceAccount"
+            - flag: "--disable-admission-plugins"
+              set: false
+        remediation: |
+          Follow the documentation and create ServiceAccount objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
+          value that does not include ServiceAccount.
+        scored: true
+
+      - id: 1.2.14
+        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--disable-admission-plugins"
+              compare:
+                op: nothave
+                value: "NamespaceLifecycle"
+            - flag: "--disable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --disable-admission-plugins parameter to
+          ensure it does not include NamespaceLifecycle.
+        scored: true
+
+      - id: 1.2.15
+        text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "NodeRestriction"
+        remediation: |
+          Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --enable-admission-plugins parameter to a
+          value that includes NodeRestriction.
+          --enable-admission-plugins=...,NodeRestriction,...
+        scored: true
+
+      - id: 1.2.16
+        text: "Ensure that the --secure-port argument is not set to 0 (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--secure-port"
+              compare:
+                op: gt
+                value: 0
+            - flag: "--secure-port"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and either remove the --secure-port parameter or
+          set it to a different (non-zero) desired port.
+        scored: true
+
+      - id: 1.2.17
+        text: "Ensure that the --profiling argument is set to false (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.2.18
+        text: "Ensure that the --audit-log-path argument is set (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-path"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --audit-log-path parameter to a suitable path and
+          file where you would like audit logs to be written, for example,
+          --audit-log-path=/var/log/apiserver/audit.log
+        scored: true
+
+      - id: 1.2.19
+        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxage"
+              compare:
+                op: gte
+                value: 30
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --audit-log-maxage parameter to 30
+          or as an appropriate number of days, for example,
+          --audit-log-maxage=30
+        scored: true
+
+      - id: 1.2.20
+        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxbackup"
+              compare:
+                op: gte
+                value: 10
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
+          value. For example,
+          --audit-log-maxbackup=10
+        scored: true
+
+      - id: 1.2.21
+        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxsize"
+              compare:
+                op: gte
+                value: 100
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
+          For example, to set it as 100 MB, --audit-log-maxsize=100
+        scored: true
+
+      - id: 1.2.22
+        text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        type: manual
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          and set the below parameter as appropriate and if needed.
+          For example, --request-timeout=300s
+        scored: false
+
+      - id: 1.2.23
+        text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--service-account-lookup"
+              set: false
+            - flag: "--service-account-lookup"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the below parameter.
+          --service-account-lookup=true
+          Alternatively, you can delete the --service-account-lookup parameter from this file so
+          that the default takes effect.
+        scored: true
+
+      - id: 1.2.24
+        text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-key-file"
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --service-account-key-file parameter
+          to the public key file for service accounts. For example,
+          --service-account-key-file=<filename>
+        scored: true
+
+      # MicroK8s does not use etcd. The API server talk to a local dqlite instance
+      - id: 1.2.25
+        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--etcd-certfile"
+            - flag: "--etcd-keyfile"
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the etcd certificate and key file parameters.
+          --etcd-certfile=<path/to/client-certificate-file>
+          --etcd-keyfile=<path/to/client-key-file>
+        scored: false
+
+      - id: 1.2.26
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--tls-cert-file"
+            - flag: "--tls-private-key-file"
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the TLS certificate and private key file parameters.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+        scored: true
+
+      - id: 1.2.27
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-ca-file"
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the client certificate authority file.
+          --client-ca-file=<path/to/client-ca-file>
+        scored: true
+
+      - id: 1.2.28
+        text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--etcd-cafile"
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the etcd certificate authority file parameter.
+          --etcd-cafile=<path/to/ca-file>
+        scored: false
+
+      - id: 1.2.29
+        text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--encryption-provider-config"
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          Then, edit the API server pod specification file $apiserverconf
+          on the control plane node and set the --encryption-provider-config parameter to the path of that file.
+          For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
+        scored: false
+
+      - id: 1.2.30
+        text: "Ensure that encryption providers are appropriately configured (Manual)"
+        audit: |
+          ENCRYPTION_PROVIDER_CONFIG=$(cat $apiserverconf | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
+          if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
+        tests:
+          test_items:
+            - flag: "provider"
+              compare:
+                op: valid_elements
+                value: "aescbc,kms,secretbox"
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          In this file, choose aescbc, kms or secretbox as the encryption provider.
+        scored: false
+
+      - id: 1.2.31
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
+        audit: "cat $apiserverconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--tls-cipher-suites"
+              compare:
+                op: valid_elements
+                value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
+        remediation: |
+          Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
+          on the control plane node and set the below parameter.
+          --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,
+          TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+          TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+          TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+          TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+          TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+          TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,
+          TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384
+        scored: false
+
+  - id: 1.3
+    text: "Controller Manager"
+    checks:
+      - id: 1.3.1
+        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--terminated-pod-gc-threshold"
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
+          for example, --terminated-pod-gc-threshold=10
+        scored: false
+
+      - id: 1.3.2
+        text: "Ensure that the --profiling argument is set to false (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.3.3
+        text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--use-service-account-credentials"
+              compare:
+                op: noteq
+                value: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node to set the below parameter.
+          --use-service-account-credentials=true
+        scored: true
+
+      - id: 1.3.4
+        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-private-key-file"
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and set the --service-account-private-key-file parameter
+          to the private key file for service accounts.
+          --service-account-private-key-file=<filename>
+        scored: true
+
+      - id: 1.3.5
+        text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--root-ca-file"
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
+          --root-ca-file=<path/to/file>
+        scored: true
+
+      - id: 1.3.6
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: nothave
+                value: "RotateKubeletServerCertificate=false"
+              set: true
+            - flag: "--feature-gates"
+              set: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
+          --feature-gates=RotateKubeletServerCertificate=true
+        scored: true
+
+      - id: 1.3.7
+        text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
+        audit: "cat $controllermanagerconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--bind-address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+            - flag: "--bind-address"
+              set: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the control plane node and ensure the correct value for the --bind-address parameter
+        scored: true
+
+  - id: 1.4
+    text: "Scheduler"
+    checks:
+      - id: 1.4.1
+        text: "Ensure that the --profiling argument is set to false (Automated)"
+        audit: "cat $schedulerconf | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf file
+          on the control plane node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.4.2
+        text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
+        audit: "cat $schedulerconf | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--bind-address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+            - flag: "--bind-address"
+              set: false
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          on the control plane node and ensure the correct value for the --bind-address parameter
+        scored: true
diff --git a/cfg/cis-1.24-microk8s/node.yaml b/cfg/cis-1.24-microk8s/node.yaml
new file mode 100644
index 000000000..7b9278a54
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/node.yaml
@@ -0,0 +1,462 @@
+---
+controls:
+version: "cis-1.24"
+id: 4
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+  - id: 4.1
+    text: "Worker Node Configuration Files"
+    checks:
+      - id: 4.1.1
+        text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi' "
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example, chmod 600 $kubeletsvc
+        scored: true
+
+      - id: 4.1.2
+        text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi' "
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chown root:root $kubeletsvc
+        scored: true
+
+      - id: 4.1.3
+        text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
+        audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi' "
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "permissions"
+              set: true
+              compare:
+                op: bitmask
+                value: "600"
+            - flag: "$proxykubeconfig"
+              set: false
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chmod 600 $proxykubeconfig
+        scored: false
+
+      - id: 4.1.4
+        text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
+        audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi' "
+        tests:
+          bin_op: or
+          test_items:
+            - flag: root:root
+            - flag: "$proxykubeconfig"
+              set: false
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example, chown root:root $proxykubeconfig
+        scored: false
+
+      - id: 4.1.5
+        text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
+        audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi' "
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chmod 600 $kubeletkubeconfig
+        scored: true
+
+      - id: 4.1.6
+        text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
+        audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi' "
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chown root:root $kubeletkubeconfig
+        scored: true
+
+      - id: 4.1.7
+        text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)"
+        audit: |
+          CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}')
+          if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
+          if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the following command to modify the file permissions of the
+          --client-ca-file chmod 600 <filename>
+        scored: false
+
+      - id: 4.1.8
+        text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
+        audit: |
+          CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}')
+          if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
+          if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
+        tests:
+          test_items:
+            - flag: root:root
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the following command to modify the ownership of the --client-ca-file.
+          chown root:root <filename>
+        scored: false
+
+      - id: 4.1.9
+        text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)"
+        audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi' "
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          Run the following command (using the config file location identified in the Audit step)
+          chmod 600 $kubeletconf
+        scored: false
+
+      - id: 4.1.10
+        text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)"
+        audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi' "
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          Run the following command (using the config file location identified in the Audit step)
+          chown root:root $kubeletconf
+        scored: false
+
+  - id: 4.2
+    text: "Kubelet"
+    checks:
+      - id: 4.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              path: "{.authentication.anonymous.enabled}"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
+          `false`.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          `--anonymous-auth=false`
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.2
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --authorization-mode
+              path: "{.authorization.mode}"
+              compare:
+                op: nothave
+                value: AlwaysAllow
+        remediation: |
+          If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
+          using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --authorization-mode=Webhook
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.3
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --client-ca-file
+              path: "{.authentication.x509.clientCAFile}"
+        remediation: |
+          If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
+          the location of the client CA file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --client-ca-file=<path/to/client-ca-file>
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.4
+        text: "Verify that the --read-only-port argument is set to 0 (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--read-only-port"
+              path: "{.readOnlyPort}"
+              compare:
+                op: eq
+                value: 0
+            - flag: "--read-only-port"
+              path: "{.readOnlyPort}"
+              set: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --read-only-port=0
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.5
+        text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              path: "{.streamingConnectionIdleTimeout}"
+              compare:
+                op: noteq
+                value: 0
+            - flag: --streaming-connection-idle-timeout
+              path: "{.streamingConnectionIdleTimeout}"
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
+          value other than 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --streaming-connection-idle-timeout=5m
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.6
+        text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --protect-kernel-defaults
+              path: "{.protectKernelDefaults}"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --protect-kernel-defaults=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.7
+        text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --make-iptables-util-chains
+              path: "{.makeIPTablesUtilChains}"
+              compare:
+                op: eq
+                value: true
+            - flag: --make-iptables-util-chains
+              path: "{.makeIPTablesUtilChains}"
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove the --make-iptables-util-chains argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.8
+        text: "Ensure that the --hostname-override argument is not set (Manual)"
+        # This is one of those properties that can only be set as a command line argument.
+        # To check if the property is set as expected, we need to parse the kubelet command
+        # instead reading the Kubelet Configuration file.
+        audit: "cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --hostname-override
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and remove the --hostname-override argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.9
+        text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --event-qps
+              path: "{.eventRecordQPS}"
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.10
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cert-file
+              path: "{.tlsCertFile}"
+            - flag: --tls-private-key-file
+              path: "{.tlsPrivateKeyFile}"
+        remediation: |
+          If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
+          of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
+          to the location of the corresponding private key file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.11
+        text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --rotate-certificates
+              path: "{.rotateCertificates}"
+              compare:
+                op: eq
+                value: true
+            - flag: --rotate-certificates
+              path: "{.rotateCertificates}"
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
+          remove it altogether to use the default value.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
+          variable.
+          Based on your system, restart the kubelet service. For example,
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 4.2.12
+        text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              path: "{.featureGates.RotateKubeletServerCertificate}"
+              compare:
+                op: nothave
+                value: false
+            - flag: RotateKubeletServerCertificate
+              path: "{.featureGates.RotateKubeletServerCertificate}"
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+          --feature-gates=RotateKubeletServerCertificate=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 4.2.13
+        text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
+        audit: "cat $kubeletconf"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cipher-suites
+              path: "{range .tlsCipherSuites[:]}{}{','}{end}"
+              compare:
+                op: valid_elements
+                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        remediation: |
+          If using a Kubelet config file, edit the file to set `TLSCipherSuites` to
+          TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          or to a subset of these values.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the --tls-cipher-suites parameter as follows, or to a subset of these values.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
diff --git a/cfg/cis-1.24-microk8s/policies.yaml b/cfg/cis-1.24-microk8s/policies.yaml
new file mode 100644
index 000000000..3ab3eb4b0
--- /dev/null
+++ b/cfg/cis-1.24-microk8s/policies.yaml
@@ -0,0 +1,269 @@
+---
+controls:
+version: "cis-1.24"
+id: 5
+text: "Kubernetes Policies"
+type: "policies"
+groups:
+  - id: 5.1
+    text: "RBAC and Service Accounts"
+    checks:
+      - id: 5.1.1
+        text: "Ensure that the cluster-admin role is only used where required (Manual)"
+        type: "manual"
+        remediation: |
+          Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
+          if they need this role or if they could use a role with fewer privileges.
+          Where possible, first bind users to a lower privileged role and then remove the
+          clusterrolebinding to the cluster-admin role :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 5.1.2
+        text: "Minimize access to secrets (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove get, list and watch access to Secret objects in the cluster.
+        scored: false
+
+      - id: 5.1.3
+        text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible replace any use of wildcards in clusterroles and roles with specific
+          objects or actions.
+        scored: false
+
+      - id: 5.1.4
+        text: "Minimize access to create pods (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove create access to pod objects in the cluster.
+        scored: false
+
+      - id: 5.1.5
+        text: "Ensure that default service accounts are not actively used. (Manual)"
+        type: "manual"
+        remediation: |
+          Create explicit service accounts wherever a Kubernetes workload requires specific access
+          to the Kubernetes API server.
+          Modify the configuration of each default service account to include this value
+          automountServiceAccountToken: false
+        scored: false
+
+      - id: 5.1.6
+        text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
+        type: "manual"
+        remediation: |
+          Modify the definition of pods and service accounts which do not need to mount service
+          account tokens to disable it.
+        scored: false
+
+      - id: 5.1.7
+        text: "Avoid use of system:masters group (Manual)"
+        type: "manual"
+        remediation: |
+          Remove the system:masters group from all users in the cluster.
+        scored: false
+
+      - id: 5.1.8
+        text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove the impersonate, bind and escalate rights from subjects.
+        scored: false
+
+  - id: 5.2
+    text: "Pod Security Standards"
+    checks:
+      - id: 5.2.1
+        text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
+        type: "manual"
+        remediation: |
+          Ensure that either Pod Security Admission or an external policy control system is in place
+          for every namespace which contains user workloads.
+        scored: false
+
+      - id: 5.2.2
+        text: "Minimize the admission of privileged containers (Manual)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of privileged containers.
+        scored: false
+
+      - id: 5.2.3
+        text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of `hostPID` containers.
+        scored: false
+
+      - id: 5.2.4
+        text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of `hostIPC` containers.
+        scored: false
+
+      - id: 5.2.5
+        text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of `hostNetwork` containers.
+        scored: false
+
+      - id: 5.2.6
+        text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
+        scored: false
+
+      - id: 5.2.7
+        text: "Minimize the admission of root containers (Automated)"
+        type: "manual"
+        remediation: |
+          Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
+          or `MustRunAs` with the range of UIDs not including 0, is set.
+        scored: false
+
+      - id: 5.2.8
+        text: "Minimize the admission of containers with the NET_RAW capability (Automated)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of containers with the `NET_RAW` capability.
+        scored: false
+
+      - id: 5.2.9
+        text: "Minimize the admission of containers with added capabilities (Automated)"
+        type: "manual"
+        remediation: |
+          Ensure that `allowedCapabilities` is not present in policies for the cluster unless
+          it is set to an empty array.
+        scored: false
+
+      - id: 5.2.10
+        text: "Minimize the admission of containers with capabilities assigned (Manual)"
+        type: "manual"
+        remediation: |
+          Review the use of capabilities in applications running on your cluster. Where a namespace
+          contains applications which do not require any Linux capabities to operate consider adding
+          a PSP which forbids the admission of containers which do not drop all capabilities.
+        scored: false
+
+      - id: 5.2.11
+        text: "Minimize the admission of Windows HostProcess containers (Manual)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
+        scored: false
+
+      - id: 5.2.12
+        text: "Minimize the admission of HostPath volumes (Manual)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of containers with `hostPath` volumes.
+        scored: false
+
+      - id: 5.2.13
+        text: "Minimize the admission of containers which use HostPorts (Manual)"
+        type: "manual"
+        remediation: |
+          Add policies to each namespace in the cluster which has user workloads to restrict the
+          admission of containers which use `hostPort` sections.
+        scored: false
+
+  - id: 5.3
+    text: "Network Policies and CNI"
+    checks:
+      - id: 5.3.1
+        text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
+        type: "manual"
+        remediation: |
+          If the CNI plugin in use does not support network policies, consideration should be given to
+          making use of a different plugin, or finding an alternate mechanism for restricting traffic
+          in the Kubernetes cluster.
+        scored: false
+
+      - id: 5.3.2
+        text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: false
+
+  - id: 5.4
+    text: "Secrets Management"
+    checks:
+      - id: 5.4.1
+        text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
+        type: "manual"
+        remediation: |
+          If possible, rewrite application code to read Secrets from mounted secret files, rather than
+          from environment variables.
+        scored: false
+
+      - id: 5.4.2
+        text: "Consider external secret storage (Manual)"
+        type: "manual"
+        remediation: |
+          Refer to the Secrets management options offered by your cloud provider or a third-party
+          secrets management solution.
+        scored: false
+
+  - id: 5.5
+    text: "Extensible Admission Control"
+    checks:
+      - id: 5.5.1
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and setup image provenance.
+        scored: false
+
+  - id: 5.7
+    text: "General Policies"
+    checks:
+      - id: 5.7.1
+        text: "Create administrative boundaries between resources using namespaces (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create namespaces for objects in your deployment as you need
+          them.
+        scored: false
+
+      - id: 5.7.2
+        text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
+        type: "manual"
+        remediation: |
+          Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
+          An example is as below:
+            securityContext:
+              seccompProfile:
+                type: RuntimeDefault
+        scored: false
+
+      - id: 5.7.3
+        text: "Apply SecurityContext to your Pods and Containers (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
+          suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 5.7.4
+        text: "The default namespace should not be used (Manual)"
+        type: "manual"
+        remediation: |
+          Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+          resources and that all new resources are created in a specific namespace.
+        scored: false
diff --git a/cfg/config.yaml b/cfg/config.yaml
index c224e53be..fc95ba56e 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -288,6 +288,7 @@ version_mapping:
   "aks-1.0": "aks-1.0"
   "ack-1.0": "ack-1.0"
   "cis-1.6-k3s": "cis-1.6-k3s"
+  "cis-1.24-microk8s": "cis-1.24-microk8s"
   "tkgi-1.2.53": "tkgi-1.2.53"
   "k3s-cis-1.7": "k3s-cis-1.7"
   "k3s-cis-1.23": "k3s-cis-1.23"
@@ -336,6 +337,12 @@ target_mapping:
     - "controlplane"
     - "etcd"
     - "policies"
+  "cis-1.24-microk8s":
+    - "master"
+    - "etcd"
+    - "node"
+    - "controlplane"
+    - "policies"
   "cis-1.7":
     - "master"
     - "node"
diff --git a/docs/architecture.md b/docs/architecture.md
index a464afb97..ba5a51c6c 100644
--- a/docs/architecture.md
+++ b/docs/architecture.md
@@ -13,24 +13,25 @@ Check the contents of the benchmark directory under `cfg` to see which targets a
 
 The following table shows the valid targets based on the CIS Benchmark version.
 
-| CIS Benchmark | Targets |
-|---------------|---------|
-| cis-1.5       | master, controlplane, node, etcd, policies |
-| cis-1.6       | master, controlplane, node, etcd, policies |
-| cis-1.20      | master, controlplane, node, etcd, policies |
-| cis-1.23      | master, controlplane, node, etcd, policies |
-| cis-1.24      | master, controlplane, node, etcd, policies |
-| cis-1.7       | master, controlplane, node, etcd, policies |
-| gke-1.0       | master, controlplane, node, etcd, policies, managedservices |
-| gke-1.2.0     | controlplane, node, policies, managedservices |
-| eks-1.0.1     | controlplane, node, policies, managedservices |
-| eks-1.1.0     | controlplane, node, policies, managedservices |
-| eks-1.2.0     | controlplane, node, policies, managedservices |
-| ack-1.0       | master, controlplane, node, etcd, policies, managedservices |
-| aks-1.0       | controlplane, node, policies, managedservices |
-| rh-0.7        | master,node|
-| rh-1.0        | master, controlplane, node, etcd, policies |
-| cis-1.6-k3s   | master, controlplane, node, etcd, policies |
+| CIS Benchmark        | Targets |
+|----------------------|---------|
+| cis-1.5              | master, controlplane, node, etcd, policies |
+| cis-1.6              | master, controlplane, node, etcd, policies |
+| cis-1.20             | master, controlplane, node, etcd, policies |
+| cis-1.23             | master, controlplane, node, etcd, policies |
+| cis-1.24             | master, controlplane, node, etcd, policies |
+| cis-1.7              | master, controlplane, node, etcd, policies |
+| gke-1.0              | master, controlplane, node, etcd, policies, managedservices |
+| gke-1.2.0            | controlplane, node, policies, managedservices |
+| eks-1.0.1            | controlplane, node, policies, managedservices |
+| eks-1.1.0            | controlplane, node, policies, managedservices |
+| eks-1.2.0            | controlplane, node, policies, managedservices |
+| ack-1.0              | master, controlplane, node, etcd, policies, managedservices |
+| aks-1.0              | controlplane, node, policies, managedservices |
+| rh-0.7               | master,node|
+| rh-1.0               | master, controlplane, node, etcd, policies |
+| cis-1.6-k3s          | master, controlplane, node, etcd, policies |
+| cis-1.24-microk8s    | master, controlplane, node, etcd, policies |
 
 The following table shows the valid DISA STIG versions