diff --git a/README.md b/README.md
index 874e4664990f19ef0c34c7938fa18dd9f5f8b7e1..5ab857b1e1d3703e9918f01e14a3cb48c2e46c20 100644
--- a/README.md
+++ b/README.md
@@ -56,9 +56,7 @@ kube-bench supports the tests for Kubernetes as defined in the [CIS Kubernetes B
 
 | CIS Kubernetes Benchmark | kube-bench config | Kubernetes versions |
 |---|---|---|
-| [1.3.0](https://workbench.cisecurity.org/benchmarks/602) | cis-1.3 | 1.11-1.12 |
-| [1.4.1](https://workbench.cisecurity.org/benchmarks/2351) | cis-1.4 | 1.13-1.14 |
-| [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 |
+| [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15- |
 | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16- |
 | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE |
 | [EKS 1.0.0](https://workbench.cisecurity.org/benchmarks/5190) | eks-1.0 | EKS |
@@ -99,18 +97,18 @@ Or run kube-bench against a worker node using the tests for Kubernetes version 1
 kube-bench node --version 1.13
 ```
 
-`kube-bench` will map the `--version` to the corresponding CIS Benchmark version as indicated by the mapping table above. For example, if you specify `--version 1.13`, this is mapped to CIS Benchmark version `cis-1.14`.
+`kube-bench` will map the `--version` to the corresponding CIS Benchmark version as indicated by the mapping table above. For example, if you specify `--version 1.15`, this is mapped to CIS Benchmark version `cis-1.15`.
 
 Alternatively, you can specify `--benchmark` to run a specific CIS Benchmark version:
 
 ```
-kube-bench node --benchmark cis-1.4
+kube-bench node --benchmark cis-1.5
 ```
 
 If you want to target specific CIS Benchmark `target` (i.e master, node, etcd, etc...)
 you can use the `run --targets` subcommand.
 ```
-kube-bench --benchmark cis-1.4 run --targets master,node
+kube-bench --benchmark cis-1.5 run --targets master,node
 ```
 or
 ```
@@ -120,8 +118,6 @@ kube-bench --benchmark cis-1.5 run --targets master,node,etcd,policies
 The following table shows the valid targets based on the CIS Benchmark version.
 | CIS Benchmark | Targets |
 |---|---|
-| cis-1.3| master, node |
-| cis-1.4| master, node |
 | cis-1.5| master, controlplane, node, etcd, policies |
 | cis-1.6| master, controlplane, node, etcd, policies |
 | gke-1.0| master, controlplane, node, etcd, policies, managedservices |
@@ -130,7 +126,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
 If no targets are specified, `kube-bench` will determine the appropriate targets based on the CIS Benchmark version.
 
 `controls` for the various versions of CIS Benchmark can be found in directories
-with same name as the CIS Benchmark versions under `cfg/`, for example `cfg/cis-1.4`.
+with same name as the CIS Benchmark versions under `cfg/`, for example `cfg/cis-1.5`.
 
 **Note:**  **`It is an error to specify both --version and --benchmark flags together`**
 
diff --git a/cfg/cis-1.3/config.yaml b/cfg/cis-1.3/config.yaml
deleted file mode 100644
index b7839455a64a067a15886474164b6d4476c49133..0000000000000000000000000000000000000000
--- a/cfg/cis-1.3/config.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.3/master.yaml b/cfg/cis-1.3/master.yaml
deleted file mode 100644
index 2239ceed79c5cccc9bb1bf6a523d144466227b55..0000000000000000000000000000000000000000
--- a/cfg/cis-1.3/master.yaml
+++ /dev/null
@@ -1,1414 +0,0 @@
----
-controls:
-version: 1.11
-id: 1
-text: "Master Node Security Configuration"
-type: "master"
-groups:
-  - id: 1.1
-    text: "API Server"
-    checks:
-      - id: 1.1.1
-        text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--anonymous-auth"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --anonymous-auth=false
-        scored: true
-
-      - id: 1.1.2
-        text: "Ensure that the --basic-auth-file argument is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--basic-auth-file"
-              set: false
-        remediation: |
-          Follow the documentation and configure alternate mechanisms for authentication. Then,
-          edit the API server pod specification file $apiserverconf
-          on the master node and remove the --basic-auth-file=<filename>
-          parameter.
-        scored: true
-
-      - id: 1.1.3
-        text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-allow-any-token"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --insecure-allow-any-token
-          parameter.
-        scored: true
-
-      - id: 1.1.4
-        text: "Ensure that the --kubelet-https argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--kubelet-https"
-              compare:
-                op: eq
-                value: true
-              set: true
-            - flag: "--kubelet-https"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --kubelet-https parameter.
-        scored: true
-
-      - id: 1.1.5
-        text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-bind-address"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --insecure-bind-address
-          parameter.
-        scored: true
-
-      - id: 1.1.6
-        text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-port"
-              compare:
-                op: eq
-                value: 0
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          apiserver.yaml on the master node and set the below parameter.
-          --insecure-port=0
-        scored: true
-
-      - id: 1.1.7
-        text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--secure-port"
-              compare:
-                op: gt
-                value: 0
-              set: true
-            - flag: "--secure-port"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and either remove the --secure-port parameter or
-          set it to a different (non-zero) desired port.
-        scored: true
-
-      - id: 1.1.8
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.1.9
-        text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--repair-malformed-updates"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --repair-malformed-updates=false
-        scored: true
-
-      - id: 1.1.10
-        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: nothave
-                value: AlwaysAdmit
-              set: true
-            - flag: "--enable-admission-plugins"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that does not include AlwaysAdmit.
-        scored: true
-
-      - id: 1.1.11
-        text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "AlwaysPullImages"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins to
-          include AlwaysPullImages.
-          --enable-admission-plugins=...,AlwaysPullImages,...
-        scored: true
-
-      - id: 1.1.12
-        text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "DenyEscalatingExec"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes DenyEscalatingExec.
-          --enable-admission-plugins=...,DenyEscalatingExec,...
-        scored: true
-
-      - id: 1.1.13
-        text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "SecurityContextDeny"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to
-          include SecurityContextDeny.
-          --enable-admission-plugins=...,SecurityContextDeny,...
-        scored: true
-
-      - id: 1.1.14
-        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--disable-admission-plugins"
-              compare:
-                op: nothave
-                value: "NamespaceLifecycle"
-              set: true
-            - flag: "--disable-admission-plugins"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --disable-admission-plugins parameter to
-          ensure it does not include NamespaceLifecycle.
-          --disable-admission-plugins=...,NamespaceLifecycle,...
-        scored: true
-
-      - id: 1.1.15
-        text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-path"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-path parameter to a suitable
-          path and file where you would like audit logs to be written, for example:
-          --audit-log-path=/var/log/apiserver/audit.log
-        scored: true
-
-      - id: 1.1.16
-        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxage"
-              compare:
-                op: gte
-                value: 30
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxage parameter to 30 or
-          as an appropriate number of days: --audit-log-maxage=30
-        scored: true
-
-      - id: 1.1.17
-        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxbackup"
-              compare:
-                op: gte
-                value: 10
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxbackup parameter to 10
-          or to an appropriate value.
-          --audit-log-maxbackup=10
-        scored: true
-
-      - id: 1.1.18
-        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxsize"
-              compare:
-                op: gte
-                value: 100
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxsize parameter to an
-          appropriate size in MB. For example, to set it as 100 MB:
-          --audit-log-maxsize=100
-        scored: true
-
-      - id: 1.1.19
-        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--authorization-mode"
-              compare:
-                op: nothave
-                value: "AlwaysAllow"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --authorization-mode parameter to
-          values other than AlwaysAllow. One such example could be as below.
-          --authorization-mode=RBAC
-        scored: true
-
-      - id: 1.1.20
-        text: "Ensure that the --token-auth-file parameter is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--token-auth-file"
-              set: false
-        remediation: |
-          Follow the documentation and configure alternate mechanisms for authentication. Then,
-          edit the API server pod specification file $apiserverconf
-          on the master node and remove the --token-auth-file=<filename>
-          parameter.
-        scored: true
-
-      - id: 1.1.21
-        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--kubelet-certificate-authority"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and setup the TLS connection between the
-          apiserver and kubelets. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the --kubelet-certificate-authority
-          parameter to the path to the cert file for the certificate authority.
-          --kubelet-certificate-authority=<ca-string>
-        scored: true
-
-      - id: 1.1.22
-        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--kubelet-client-certificate"
-              set: true
-            - flag: "--kubelet-client-key"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and kubelets. Then, edit API server pod specification file
-          $apiserverconf on the master node and set the
-          kubelet client certificate and key parameters as below.
-          --kubelet-client-certificate=<path/to/client-certificate-file>
-          --kubelet-client-key=<path/to/client-key-file>
-        scored: true
-
-      - id: 1.1.23
-        text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--service-account-lookup"
-              compare:
-                op: eq
-                value: true
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --service-account-lookup=true
-        scored: true
-
-      - id: 1.1.24
-        text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "PodSecurityPolicy"
-              set: true
-        remediation: |
-          Follow the documentation and create Pod Security Policy objects as per your environment.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes PodSecurityPolicy :
-          --enable-admission-plugins=...,PodSecurityPolicy,...
-          Then restart the API Server.
-        scored: true
-
-      - id: 1.1.25
-        text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--service-account-key-file"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --service-account-key-file parameter
-          to the public key file for service accounts:
-          --service-account-key-file=<filename>
-        scored: true
-
-      - id: 1.1.26
-        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
-          appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--etcd-certfile"
-              set: true
-            - flag: "--etcd-keyfile"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and etcd. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the etcd
-          certificate and key file parameters.
-          --etcd-certfile=<path/to/client-certificate-file>
-          --etcd-keyfile=<path/to/client-key-file>
-        scored: true
-
-      - id: 1.1.27
-        text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "ServiceAccount"
-              set: true
-            - flag: "--enable-admission-plugins"
-              set: false
-        remediation: |
-          Follow the documentation and create ServiceAccount objects as per your environment.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes ServiceAccount.
-          --enable-admission-plugins=...,ServiceAccount,...
-        scored: true
-
-      - id: 1.1.28
-        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
-        as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--tls-cert-file"
-              set: true
-            - flag: "--tls-private-key-file"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the TLS certificate and private key file
-          parameters.
-          --tls-cert-file=<path/to/tls-certificate-file>
-          --tls-private-key-file=<path/to/tls-key-file>
-        scored: true
-
-      - id: 1.1.29
-        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--client-ca-file"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the client certificate authority file.
-          --client-ca-file=<path/to/client-ca-file>
-        scored: true
-
-      - id: 1.1.30
-        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--tls-cipher-suites"
-              compare:
-                op: has
-                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        scored: false
-
-      - id: 1.1.31
-        text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--etcd-cafile"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and etcd. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the etcd
-          certificate authority file parameter.
-          --etcd-cafile=<path/to/ca-file>
-        scored: true
-
-      - id: 1.1.32
-        text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--authorization-mode"
-              compare:
-                op: has
-                value: "Node"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --authorization-mode parameter to a
-          value that includes Node.
-          --authorization-mode=Node,RBAC
-        scored: true
-
-      - id: 1.1.33
-        text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "NodeRestriction"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and configure NodeRestriction plug-in on
-          kubelets. Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes NodeRestriction.
-          --enable-admission-plugins=...,NodeRestriction,...
-        scored: true
-
-      - id: 1.1.34
-        text: "Ensure that the --experimental-encryption-provider-config argument is
-        set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--experimental-encryption-provider-config"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and configure a EncryptionConfig file.
-          Then, edit the API server pod specification file $apiserverconf on the
-          master node and set the --experimental-encryption-provider-config parameter
-          to the path of that file:
-          --experimental-encryption-provider-config=</path/to/EncryptionConfig/File>
-        scored: true
-
-      - id: 1.1.35
-        text: "Ensure that the encryption provider is set to aescbc (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
-          choose aescbc as the encryption provider.
-          For example,
-          kind: EncryptionConfig
-          apiVersion: v1
-          resources:
-            - resources:
-              - secrets
-                providers:
-                - aescbc:
-                    keys:
-                    - name: key1
-                      secret: <32-byte base64-encoded secret>
-        scored: true
-
-      - id: 1.1.36
-        text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "EventRateLimit"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set the desired limits in a
-          configuration file. Then, edit the API server pod specification file
-          $apiserverconf and set the below parameters.
-          --enable-admission-plugins=...,EventRateLimit,...
-          --admission-control-config-file=<path/to/configuration/file>
-        scored: true
-
-      - id: 1.1.37a
-        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--feature-gates"
-              compare:
-                op: nothave
-                value: "AdvancedAuditing=false"
-              set: true
-            - flag: "--feature-gates"
-              set: false
-        remediation: |
-          Follow the Kubernetes documentation and set the desired audit policy in the
-          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-          and set the below parameters.
-          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-        scored: true
-
-      - id: 1.1.37b
-        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--audit-policy-file"
-              compare:
-                op: eq
-                value: "/etc/kubernetes/audit-policy.yaml"
-              set: true
-            - flag: "--audit-policy-file"
-              compare:
-                op: eq
-                value: "/etc/kubernetes/audit-policy.yml"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set the desired audit policy in the
-          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-          and set the below parameters.
-          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-        scored: true
-
-      - id: 1.1.38
-        text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--request-timeout"
-              set: false
-            - flag: "--request-timeout"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          and set the below parameter as appropriate and if needed. For example,
-          --request-timeout=300s
-        scored: true
-
-      - id: 1.1.39
-        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--tls-cipher-suites"
-              compare:
-                op: eq
-                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-              set: true
-        remediation: |
-          Edit the API server pod specification file /etc/kubernetes/manifests
-          kube-apiserver.yaml on the master node and set the below parameter.
-          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        scored: false
-
-  - id: 1.2
-    text: "Scheduler"
-    checks:
-      - id: 1.2.1
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the Scheduler pod specification file $schedulerconf
-          file on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.2.2
-        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--address"
-              compare:
-                op: eq
-                value: "127.0.0.1"
-              set: true
-            - flag: "--address"
-              set: false
-        remediation: |
-          Edit the Scheduler pod specification file $schedulerconf
-          file on the master node and ensure the correct value for the
-          --address parameter.
-        scored: true
-
-  - id: 1.3
-    text: "Controller Manager"
-    checks:
-      - id: 1.3.1
-        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--terminated-pod-gc-threshold"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
-          --terminated-pod-gc-threshold=10
-        scored: true
-
-      - id: 1.3.2
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.3.3
-        text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--use-service-account-credentials"
-              compare:
-                op: noteq
-                value: false
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node to set the below parameter.
-          --use-service-account-credentials=true
-        scored: true
-
-      - id: 1.3.4
-        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--service-account-private-key-file"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --service-account-private-
-          key-file parameter to the private key file for service accounts.
-          --service-account-private-key-file=<filename>
-        scored: true
-
-      - id: 1.3.5
-        text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--root-ca-file"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --root-ca-file parameter to
-          the certificate bundle file.
-          --root-ca-file=<path/to/file>
-        scored: true
-
-      - id: 1.3.6
-        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--feature-gates"
-              compare:
-                op: eq
-                value: "RotateKubeletServerCertificate=true"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          controller-manager.yaml on the master node and set the --feature-gates parameter to
-          include RotateKubeletServerCertificate=true.
-          --feature-gates=RotateKubeletServerCertificate=true
-        scored: true
-
-      - id: 1.3.7
-        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--address"
-              compare:
-                op: eq
-                value: "127.0.0.1"
-              set: true
-            - flag: "--address"
-              set: false
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          controller-manager.yaml on the master node and ensure the correct value
-          for the --address parameter.
-        scored: true
-
-  - id: 1.4
-    text: "Configuration Files"
-    checks:
-      - id: 1.4.1
-        text: "Ensure that the API server pod specification file permissions are
-        set to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $apiserverconf
-        scored: true
-
-      - id: 1.4.2
-        text: "Ensure that the API server pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $apiserverconf
-        scored: true
-
-      - id: 1.4.3
-        text: "Ensure that the controller manager pod specification file
-        permissions are set to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $controllermanagerconf
-        scored: true
-
-      - id: 1.4.4
-        text: "Ensure that the controller manager pod specification file
-        ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $controllermanagerconf
-        scored: true
-
-      - id: 1.4.5
-        text: "Ensure that the scheduler pod specification file permissions are set
-        to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $schedulerconf
-        scored: true
-
-      - id: 1.4.6
-        text: "Ensure that the scheduler pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $schedulerconf
-        scored: true
-
-      - id: 1.4.7
-        text: "Ensure that the etcd pod specification file permissions are set to
-        644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $etcdconf
-        scored: true
-
-      - id: 1.4.8
-        text: "Ensure that the etcd pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $etcdconf
-        scored: true
-
-      - id: 1.4.9
-        text: "Ensure that the Container Network Interface file permissions are
-        set to 644 or more restrictive (Not Scored)"
-        audit: "stat -c permissions=%a <path/to/cni/files>"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 <path/to/cni/files>
-        scored: true
-
-      - id: 1.4.10
-        text: "Ensure that the Container Network Interface file ownership is set
-        to root:root (Not Scored)"
-        audit: "stat -c %U:%G <path/to/cni/files>"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root <path/to/cni/files>
-        scored: true
-
-      - id: 1.4.11
-        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
-        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "700"
-              set: true
-        remediation: |
-          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-          from the below command:
-          ps -ef | grep $etcdbin
-          Run the below command (based on the etcd data directory found above). For example,
-          chmod 700 /var/lib/etcd
-        scored: true
-
-      - id: 1.4.12
-        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
-        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
-        tests:
-          test_items:
-            - flag: "etcd:etcd"
-              set: true
-        remediation: |
-          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-          from the below command:
-          ps -ef | grep $etcdbin
-          Run the below command (based on the etcd data directory found above). For example,
-          chown etcd:etcd /var/lib/etcd
-        scored: true
-
-      - id: 1.4.13
-        text: "Ensure that the admin.conf file permissions are set to 644 or
-        more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 /etc/kubernetes/admin.conf
-        scored: true
-
-      - id: 1.4.14
-        text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root /etc/kubernetes/admin.conf
-        scored: true
-
-      - id: 1.4.15
-        text: "Ensure that the scheduler.conf file permissions are set to 644 or
-        more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c permissions=%a /etc/kubernetes/scheduler.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
-        scored: true
-
-      - id: 1.4.16
-        text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chown root:root /etc/kubernetes/scheduler.conf
-        scored: true
-
-      - id: 1.4.17
-        text: "Ensure that the controller-manager.conf file permissions are set
-        to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c permissions=%a /etc/kubernetes/controller-manager.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
-        scored: true
-
-      - id: 1.4.18
-        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
-        scored: true
-
-  - id: 1.5
-    text: "etcd"
-    checks:
-      - id: 1.5.1
-        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--cert-file"
-              set: true
-            - flag: "--key-file"
-              set: true
-        remediation: |
-          Follow the etcd service documentation and configure TLS encryption.
-          Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameters.
-          --ca-file=</path/to/ca-file>
-          --key-file=</path/to/key-file>
-        scored: true
-
-      - id: 1.5.2
-        text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--client-cert-auth"
-              compare:
-                op: noteq
-                value: false
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and set the below parameter.
-          --client-cert-auth="true"
-        scored: true
-
-      - id: 1.5.3
-        text: "Ensure that the --auto-tls argument is not set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--auto-tls"
-              set: false
-            - flag: "--auto-tls"
-              compare:
-                op: eq
-                value: false
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and either remove the --auto-tls parameter or set it to false.
-            --auto-tls=false
-        scored: true
-
-      - id: 1.5.4
-        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
-        set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--peer-cert-file"
-              set: true
-            - flag: "--peer-key-file"
-              set: true
-        remediation: |
-          Follow the etcd service documentation and configure peer TLS encryption as appropriate
-          for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameters.
-          --peer-client-file=</path/to/peer-cert-file>
-          --peer-key-file=</path/to/peer-key-file>
-        scored: true
-
-      - id: 1.5.5
-        text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--peer-client-cert-auth"
-              compare:
-                op: eq
-                value: true
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and set the below parameter.
-          --peer-client-cert-auth=true
-        scored: true
-
-      - id: 1.5.6
-        text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--peer-auto-tls"
-              set: false
-            - flag: "--peer-auto-tls"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and either remove the --peer-auto-tls parameter or set it to false.
-          --peer-auto-tls=false
-        scored: true
-
-      - id: 1.5.7
-        text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        type: "manual"
-        tests:
-          test_items:
-            - flag: "--trusted-ca-file"
-              set: true
-        remediation: |
-          [Manual test]
-          Follow the etcd documentation and create a dedicated certificate authority setup for the
-          etcd service.
-          Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameter.
-          --trusted-ca-file=</path/to/ca-file>
-        scored: false
-
-  - id: 1.6
-    text: "General Security Primitives"
-    checks:
-      - id: 1.6.1
-        text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Remove any unneeded clusterrolebindings :
-          kubectl delete clusterrolebinding [name]
-        scored: false
-
-      - id: 1.6.2
-        text: "Create administrative boundaries between resources using namespaces (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the documentation and create namespaces for objects in your deployment as you
-          need them.
-        scored: false
-
-      - id: 1.6.3
-        text: "Create network segmentation using Network Policies (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the documentation and create NetworkPolicy objects as you need them.
-        scored: false
-
-      - id: 1.6.4
-        text: "Ensure that the seccomp profile is set to docker/default in your pod
-        definitions (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
-          would need to enable alpha features in the apiserver by passing "--feature-
-          gates=AllAlpha=true" argument.
-          Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
-          parameter to "--feature-gates=AllAlpha=true"
-          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
-          Based on your system, restart the kube-apiserver service. For example:
-          systemctl restart kube-apiserver.service
-          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
-          example is as below:
-          apiVersion: v1
-          kind: Pod
-          metadata:
-            name: trustworthy-pod
-            annotations:
-              seccomp.security.alpha.kubernetes.io/pod: docker/default
-          spec:
-            containers:
-              - name: trustworthy-container
-                image: sotrustworthy:latest
-        scored: false
-
-      - id: 1.6.5
-        text: "Apply Security Context to Your Pods and Containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and apply security contexts to your pods. For a
-          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
-          Containers.
-        scored: false
-
-      - id: 1.6.6
-        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and setup image provenance.
-        scored: false
-
-      - id: 1.6.7
-        text: "Configure Network policies as appropriate (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and setup network policies as appropriate.
-          For example, you could create a "default" isolation policy for a Namespace by creating a
-          NetworkPolicy that selects all pods but does not allow any traffic:
-          apiVersion: networking.k8s.io/v1
-          kind: NetworkPolicy
-          metadata:
-            name: default-deny
-          spec:
-            podSelector:
-        scored: false
-
-      - id: 1.6.8
-        text: "Place compensating controls in the form of PSP and RBAC for
-        privileged containers usage (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
-        scored: false
-
-  - id: 1.7
-    text: "PodSecurityPolicies"
-    checks:
-      - id: 1.7.1
-        text: "Do not admit privileged containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.2
-        text: "Do not admit containers wishing to share the host process ID namespace (Scored)"
-        type: "manual"
-        remediation: |
-         [Manual test]
-         Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.3
-        text: "Do not admit containers wishing to share the host IPC namespace (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.4
-        text: "Do not admit containers wishing to share the host network namespace (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.5
-        text: "Do not admit containers with allowPrivilegeEscalation (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.6
-        text: "Do not admit root containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
-        scored: false
-
-      - id: 1.7.7
-        text: "Do not admit containers with dangerous capabilities (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
-        scored: false
diff --git a/cfg/cis-1.3/node.yaml b/cfg/cis-1.3/node.yaml
deleted file mode 100644
index ede4bcc4ac7b387dce27b21df8095101a3a69088..0000000000000000000000000000000000000000
--- a/cfg/cis-1.3/node.yaml
+++ /dev/null
@@ -1,500 +0,0 @@
----
-controls:
-version: "1.11"
-id: "2"
-text: Worker Node Security Configuration
-type: "node"
-groups:
-  - id: "2.1"
-    text: Kubelet
-    checks:
-      - id: 2.1.1
-        text: Ensure that the --allow-privileged argument is set to false (Scored)
-        audit: "/bin/ps -fC $kubeletbin "
-        tests:
-          test_items:
-            - flag: --allow-privileged
-              set: true
-              compare:
-                op: eq
-                value: false
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --allow-privileged=false
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.2
-        text: Ensure that the --anonymous-auth argument is set to false (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --anonymous-auth
-              path: '{.authentication.anonymous.enabled}'
-              set: true
-              compare:
-                op: eq
-                value: false
-        remediation: |
-          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
-          false .
-          If using executable arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --anonymous-auth=false
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.3
-        text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --authorization-mode
-              path: '{.authorization.mode}'
-              set: true
-              compare:
-                op: nothave
-                value: AlwaysAllow
-        remediation: |
-          If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
-          If using executable arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_AUTHZ_ARGS variable.
-          --authorization-mode=Webhook
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.4
-        text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --client-ca-file
-              path: '{.authentication.x509.clientCAFile}'
-              set: true
-        remediation: |
-          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
-          the location of the client CA file.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_AUTHZ_ARGS variable.
-          --client-ca-file=<path/to/client-ca-file>
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.5
-        text: Ensure that the --read-only-port argument is set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --read-only-port
-              path: '{.readOnlyPort}'
-              set: true
-              compare:
-                op: eq
-                value: 0
-        remediation: |
-          If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --read-only-port=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.6
-        text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --streaming-connection-idle-timeout
-              path: '{.streamingConnectionIdleTimeout}'
-              set: true
-              compare:
-                op: noteq
-                value: 0
-            - flag: --streaming-connection-idle-timeout
-              path: '{.streamingConnectionIdleTimeout}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
-          value other than 0.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --streaming-connection-idle-timeout=5m
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.7
-        text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --protect-kernel-defaults
-              path: '{.protectKernelDefaults}'
-              set: true
-              compare:
-                op: eq
-                value: true
-        remediation: |
-          If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --protect-kernel-defaults=true
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.8
-        text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --make-iptables-util-chains
-              path: '{.makeIPTablesUtilChains}'
-              set: true
-              compare:
-                op: eq
-                value: true
-            - flag: --make-iptables-util-chains
-              path: '{.makeIPTablesUtilChains}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          remove the --make-iptables-util-chains argument from the
-          KUBELET_SYSTEM_PODS_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.9
-        text: Ensure that the --hostname-override argument is not set (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --hostname-override
-              path: '{.hostnameOverride}'
-              set: false
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and remove the --hostname-override argument from the
-          KUBELET_SYSTEM_PODS_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.10
-        text: Ensure that the --event-qps argument is set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --event-qps
-              path: '{.eventRecordQPS}'
-              set: true
-              compare:
-                op: eq
-                value: 0
-        remediation: |
-          If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --event-qps=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.11
-        text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --tls-cert-file
-              path: '{.tlsCertFile}'
-              set: true
-            - flag: --tls-private-key-file
-              path: '{.tlsPrivateKeyFile}'
-              set: true
-          bin_op: and
-        remediation: |
-          If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
-          file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
-          corresponding private key file.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
-          --tls-cert-file=<path/to/tls-certificate-file>
-          file=<path/to/tls-key-file>
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.12
-        text: Ensure that the --cadvisor-port argument is set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --cadvisor-port
-              path: '{.cadvisorPort}'
-              set: true
-              compare:
-                op: eq
-                value: 0
-            - flag: --cadvisor-port
-              path: '{.cadvisorPort}'
-              set: false
-          bin_op: or
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
-          --cadvisor-port=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.13
-        text: Ensure that the --rotate-certificates argument is not set to false (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --rotate-certificates
-              path: '{.rotateCertificates}'
-              set: true
-              compare:
-                op: eq
-                value: true
-            - flag: --rotate-certificates
-              path: '{.rotateCertificates}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
-          If using command line arguments, edit the kubelet service file $kubeletsvc
-          on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.14
-        text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: RotateKubeletServerCertificate
-              path: '{.featureGates.RotateKubeletServerCertificate}'
-              set: true
-              compare:
-                op: eq
-                value: true
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
-          --feature-gates=RotateKubeletServerCertificate=true
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.15
-        text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --tls-cipher-suites
-              path: '{.tlsCipherSuites}'
-              set: true
-              compare:
-                op: valid_elements
-                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        remediation: |
-          If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-          If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
-          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        scored: false
-
-  - id: "2.2"
-    text: Configuration Files
-    checks:
-      - id: 2.2.1
-        text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $kubeletkubeconfig
-        scored: true
-
-      - id: 2.2.2
-        text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-              compare:
-                op: eq
-                value: root:root
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $kubeletkubeconfig
-        scored: true
-
-      - id: 2.2.3
-        text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $kubeletsvc
-        scored: true
-
-      - id: 2.2.4
-        text: Ensure that the kubelet service file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $kubeletsvc
-        scored: true
-
-      - id: 2.2.5
-        text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $proxykubeconfig
-        scored: true
-
-      - id: 2.2.6
-        text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $proxykubeconfig
-        scored: true
-
-      - id: 2.2.7
-        text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
-        type: manual
-        remediation: |
-          Run the following command to modify the file permissions of the --client-ca-file
-          chmod 644 <filename>
-        scored: true
-
-      - id: 2.2.8
-        text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
-        audit: |
-          CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}')
-          if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
-          if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-              compare:
-                op: eq
-                value: root:root
-        remediation: |
-          Run the following command to modify the ownership of the --client-ca-file .
-          chown root:root <filename>
-        scored: true
-
-      - id: 2.2.9
-        text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the following command (using the config file location identified in the Audit step)
-          chown root:root $kubeletconf
-        scored: true
-
-      - id: 2.2.10
-        text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the following command (using the config file location identified in the Audit step)
-          chmod 644 $kubeletconf
-        scored: true
diff --git a/cfg/cis-1.4/config.yaml b/cfg/cis-1.4/config.yaml
deleted file mode 100644
index b7839455a64a067a15886474164b6d4476c49133..0000000000000000000000000000000000000000
--- a/cfg/cis-1.4/config.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.4/master.yaml b/cfg/cis-1.4/master.yaml
deleted file mode 100644
index 4939ec6b3551deabbd8ccecb173017f60d6d9ffd..0000000000000000000000000000000000000000
--- a/cfg/cis-1.4/master.yaml
+++ /dev/null
@@ -1,1467 +0,0 @@
----
-controls:
-version: 1.13
-id: 1
-text: "Master Node Security Configuration"
-type: "master"
-groups:
-  - id: 1.1
-    text: "API Server"
-    checks:
-      - id: 1.1.1
-        text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--anonymous-auth"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --anonymous-auth=false
-        scored: false
-
-      - id: 1.1.2
-        text: "Ensure that the --basic-auth-file argument is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--basic-auth-file"
-              set: false
-        remediation: |
-          Follow the documentation and configure alternate mechanisms for authentication. Then,
-          edit the API server pod specification file $apiserverconf
-          on the master node and remove the --basic-auth-file=<filename>
-          parameter.
-        scored: true
-
-      - id: 1.1.3
-        text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-allow-any-token"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --insecure-allow-any-token
-          parameter.
-        scored: true
-
-      - id: 1.1.4
-        text: "Ensure that the --kubelet-https argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--kubelet-https"
-              compare:
-                op: eq
-                value: true
-              set: true
-            - flag: "--kubelet-https"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --kubelet-https parameter.
-        scored: true
-
-      - id: 1.1.5
-        text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-bind-address"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and remove the --insecure-bind-address
-          parameter.
-        scored: true
-
-      - id: 1.1.6
-        text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--insecure-port"
-              compare:
-                op: eq
-                value: 0
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          apiserver.yaml on the master node and set the below parameter.
-          --insecure-port=0
-        scored: true
-
-      - id: 1.1.7
-        text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--secure-port"
-              compare:
-                op: gt
-                value: 0
-              set: true
-            - flag: "--secure-port"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and either remove the --secure-port parameter or
-          set it to a different (non-zero) desired port.
-        scored: true
-
-      - id: 1.1.8
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.1.9
-        text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--repair-malformed-updates"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --repair-malformed-updates=false
-        scored: true
-
-      - id: 1.1.10
-        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: nothave
-                value: AlwaysAdmit
-              set: true
-            - flag: "--enable-admission-plugins"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that does not include AlwaysAdmit.
-        scored: true
-
-      - id: 1.1.11
-        text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "AlwaysPullImages"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins to
-          include AlwaysPullImages.
-          --enable-admission-plugins=...,AlwaysPullImages,...
-        scored: true
-
-      - id: 1.1.12
-        text: "[DEPRECATED] Ensure that the admission control plugin DenyEscalatingExec is set (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        type: "skip"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "DenyEscalatingExec"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes DenyEscalatingExec.
-          --enable-admission-plugins=...,DenyEscalatingExec,...
-        scored: false
-
-      - id: 1.1.13
-        text: "Ensure that the admission control plugin SecurityContextDeny is set (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "SecurityContextDeny"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to
-          include SecurityContextDeny.
-          --enable-admission-plugins=...,SecurityContextDeny,...
-        scored: false
-
-      - id: 1.1.14
-        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--disable-admission-plugins"
-              compare:
-                op: nothave
-                value: "NamespaceLifecycle"
-              set: true
-            - flag: "--disable-admission-plugins"
-              set: false
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --disable-admission-plugins parameter to
-          ensure it does not include NamespaceLifecycle.
-          --disable-admission-plugins=...,NamespaceLifecycle,...
-        scored: true
-
-      - id: 1.1.15
-        text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-path"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-path parameter to a suitable
-          path and file where you would like audit logs to be written, for example:
-          --audit-log-path=/var/log/apiserver/audit.log
-        scored: true
-
-      - id: 1.1.16
-        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxage"
-              compare:
-                op: gte
-                value: 30
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxage parameter to 30 or
-          as an appropriate number of days: --audit-log-maxage=30
-        scored: true
-
-      - id: 1.1.17
-        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxbackup"
-              compare:
-                op: gte
-                value: 10
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxbackup parameter to 10
-          or to an appropriate value.
-          --audit-log-maxbackup=10
-        scored: true
-
-      - id: 1.1.18
-        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--audit-log-maxsize"
-              compare:
-                op: gte
-                value: 100
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --audit-log-maxsize parameter to an
-          appropriate size in MB. For example, to set it as 100 MB:
-          --audit-log-maxsize=100
-        scored: true
-
-      - id: 1.1.19
-        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--authorization-mode"
-              compare:
-                op: nothave
-                value: "AlwaysAllow"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --authorization-mode parameter to
-          values other than AlwaysAllow. One such example could be as below.
-          --authorization-mode=RBAC
-        scored: true
-
-      - id: 1.1.20
-        text: "Ensure that the --token-auth-file parameter is not set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--token-auth-file"
-              set: false
-        remediation: |
-          Follow the documentation and configure alternate mechanisms for authentication. Then,
-          edit the API server pod specification file $apiserverconf
-          on the master node and remove the --token-auth-file=<filename>
-          parameter.
-        scored: true
-
-      - id: 1.1.21
-        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--kubelet-certificate-authority"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and setup the TLS connection between the
-          apiserver and kubelets. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the --kubelet-certificate-authority
-          parameter to the path to the cert file for the certificate authority.
-          --kubelet-certificate-authority=<ca-string>
-        scored: true
-
-      - id: 1.1.22
-        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--kubelet-client-certificate"
-              set: true
-            - flag: "--kubelet-client-key"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and kubelets. Then, edit API server pod specification file
-          $apiserverconf on the master node and set the
-          kubelet client certificate and key parameters as below.
-          --kubelet-client-certificate=<path/to/client-certificate-file>
-          --kubelet-client-key=<path/to/client-key-file>
-        scored: true
-
-      - id: 1.1.23
-        text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--service-account-lookup"
-              set: false
-            - flag: "--service-account-lookup"
-              compare:
-                op: eq
-                value: true
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --service-account-lookup=true
-        scored: true
-
-      - id: 1.1.24
-        text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "PodSecurityPolicy"
-              set: true
-        remediation: |
-          Follow the documentation and create Pod Security Policy objects as per your environment.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes PodSecurityPolicy :
-          --enable-admission-plugins=...,PodSecurityPolicy,...
-          Then restart the API Server.
-        scored: true
-
-      - id: 1.1.25
-        text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--service-account-key-file"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --service-account-key-file parameter
-          to the public key file for service accounts:
-          --service-account-key-file=<filename>
-        scored: true
-
-      - id: 1.1.26
-        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
-          appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--etcd-certfile"
-              set: true
-            - flag: "--etcd-keyfile"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and etcd. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the etcd
-          certificate and key file parameters.
-          --etcd-certfile=<path/to/client-certificate-file>
-          --etcd-keyfile=<path/to/client-key-file>
-        scored: true
-
-      - id: 1.1.27
-        text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--disable-admission-plugins"
-              compare:
-                op: nothave
-                value: "ServiceAccount"
-              set: true
-            - flag: "--disable-admission-plugins"
-              set: false
-        remediation: |
-          Follow the documentation and create ServiceAccount objects as per your environment.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes ServiceAccount.
-          --enable-admission-plugins=...,ServiceAccount,...
-        scored: true
-
-      - id: 1.1.28
-        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
-        as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--tls-cert-file"
-              set: true
-            - flag: "--tls-private-key-file"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the TLS certificate and private key file
-          parameters.
-          --tls-cert-file=<path/to/tls-certificate-file>
-          --tls-private-key-file=<path/to/tls-key-file>
-        scored: true
-
-      - id: 1.1.29
-        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--client-ca-file"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-          Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the client certificate authority file.
-          --client-ca-file=<path/to/client-ca-file>
-        scored: true
-
-      - id: 1.1.30
-        text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--etcd-cafile"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set up the TLS connection between the
-          apiserver and etcd. Then, edit the API server pod specification file
-          $apiserverconf on the master node and set the etcd
-          certificate authority file parameter.
-          --etcd-cafile=<path/to/ca-file>
-        scored: true
-
-      - id: 1.1.31
-        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--tls-cipher-suites"
-              compare:
-                op: has
-                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the below parameter.
-          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        scored: false
-
-      - id: 1.1.32
-        text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--authorization-mode"
-              compare:
-                op: has
-                value: "Node"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          on the master node and set the --authorization-mode parameter to a
-          value that includes Node.
-          --authorization-mode=Node,RBAC
-        scored: true
-
-      - id: 1.1.33
-        text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "NodeRestriction"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and configure NodeRestriction plug-in on
-          kubelets. Then, edit the API server pod specification file $apiserverconf
-          on the master node and set the --enable-admission-plugins parameter to a
-          value that includes NodeRestriction.
-          --enable-admission-plugins=...,NodeRestriction,...
-        scored: true
-
-      - id: 1.1.34
-        text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--encryption-provider-config"
-              set: true
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and configure a EncryptionConfig file.
-          Then, edit the API server pod specification file $apiserverconf on the
-          master node and set the --encryption-provider-config parameter
-          to the path of that file:
-          --encryption-provider-config=</path/to/EncryptionConfig/File>
-        scored: true
-
-      - id: 1.1.35
-        text: "Ensure that the encryption provider is set to aescbc (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
-          choose aescbc as the encryption provider.
-          For example,
-          kind: EncryptionConfig
-          apiVersion: v1
-          resources:
-            - resources:
-              - secrets
-                providers:
-                - aescbc:
-                    keys:
-                    - name: key1
-                      secret: <32-byte base64-encoded secret>
-        scored: true
-
-      - id: 1.1.36
-        text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--enable-admission-plugins"
-              compare:
-                op: has
-                value: "EventRateLimit"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set the desired limits in a
-          configuration file. Then, edit the API server pod specification file
-          $apiserverconf and set the below parameters.
-          --enable-admission-plugins=...,EventRateLimit,...
-          --admission-control-config-file=<path/to/configuration/file>
-        scored: true
-
-      - id: 1.1.37a
-        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--feature-gates"
-              compare:
-                op: nothave
-                value: "AdvancedAuditing=false"
-              set: true
-            - flag: "--feature-gates"
-              set: false
-        remediation: |
-          Follow the Kubernetes documentation and set the desired audit policy in the
-          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-          and set the below parameters.
-          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-        scored: true
-
-      - id: 1.1.37b
-        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--audit-policy-file"
-              compare:
-                op: eq
-                value: "/etc/kubernetes/audit-policy.yaml"
-              set: true
-            - flag: "--audit-policy-file"
-              compare:
-                op: eq
-                value: "/etc/kubernetes/audit-policy.yml"
-              set: true
-        remediation: |
-          Follow the Kubernetes documentation and set the desired audit policy in the
-          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-          and set the below parameters.
-          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-        scored: true
-
-      - id: 1.1.38
-        text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--request-timeout"
-              set: false
-            - flag: "--request-timeout"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverconf
-          and set the below parameter as appropriate and if needed. For example,
-          --request-timeout=300s
-        scored: true
-
-      - id: 1.1.39
-        text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
-        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--authorization-mode"
-              compare:
-                op: has
-                value: "RBAC"
-              set: true
-        remediation: |
-          Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC
-        scored: true
-
-  - id: 1.2
-    text: "Scheduler"
-    checks:
-      - id: 1.2.1
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the Scheduler pod specification file $schedulerconf
-          file on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.2.2
-        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--address"
-              compare:
-                op: eq
-                value: "127.0.0.1"
-              set: true
-            - flag: "--address"
-              set: false
-        remediation: |
-          Edit the Scheduler pod specification file $schedulerconf
-          file on the master node and ensure the correct value for the
-          --address parameter.
-        scored: true
-
-  - id: 1.3
-    text: "Controller Manager"
-    checks:
-      - id: 1.3.1
-        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--terminated-pod-gc-threshold"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
-          --terminated-pod-gc-threshold=10
-        scored: true
-
-      - id: 1.3.2
-        text: "Ensure that the --profiling argument is set to false (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--profiling"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the below parameter.
-          --profiling=false
-        scored: true
-
-      - id: 1.3.3
-        text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--use-service-account-credentials"
-              compare:
-                op: noteq
-                value: false
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node to set the below parameter.
-          --use-service-account-credentials=true
-        scored: true
-
-      - id: 1.3.4
-        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--service-account-private-key-file"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --service-account-private-
-          key-file parameter to the private key file for service accounts.
-          --service-account-private-key-file=<filename>
-        scored: true
-
-      - id: 1.3.5
-        text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--root-ca-file"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          on the master node and set the --root-ca-file parameter to
-          the certificate bundle file.
-          --root-ca-file=<path/to/file>
-        scored: true
-
-      - id: 1.3.6
-        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--feature-gates"
-              compare:
-                op: eq
-                value: "RotateKubeletServerCertificate=true"
-              set: true
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          controller-manager.yaml on the master node and set the --feature-gates parameter to
-          include RotateKubeletServerCertificate=true.
-          --feature-gates=RotateKubeletServerCertificate=true
-        scored: true
-
-      - id: 1.3.7
-        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--address"
-              compare:
-                op: eq
-                value: "127.0.0.1"
-              set: true
-            - flag: "--address"
-              set: false
-        remediation: |
-          Edit the Controller Manager pod specification file $controllermanagerconf
-          controller-manager.yaml on the master node and ensure the correct value
-          for the --address parameter.
-        scored: true
-
-  - id: 1.4
-    text: "Configuration Files"
-    checks:
-      - id: 1.4.1
-        text: "Ensure that the API server pod specification file permissions are
-        set to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $apiserverconf
-        scored: true
-
-      - id: 1.4.2
-        text: "Ensure that the API server pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $apiserverconf
-        scored: true
-
-      - id: 1.4.3
-        text: "Ensure that the controller manager pod specification file
-        permissions are set to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $controllermanagerconf
-        scored: true
-
-      - id: 1.4.4
-        text: "Ensure that the controller manager pod specification file
-        ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $controllermanagerconf
-        scored: true
-
-      - id: 1.4.5
-        text: "Ensure that the scheduler pod specification file permissions are set
-        to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $schedulerconf
-        scored: true
-
-      - id: 1.4.6
-        text: "Ensure that the scheduler pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $schedulerconf
-        scored: true
-
-      - id: 1.4.7
-        text: "Ensure that the etcd pod specification file permissions are set to
-        644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 $etcdconf
-        scored: true
-
-      - id: 1.4.8
-        text: "Ensure that the etcd pod specification file ownership is set to
-        root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root $etcdconf
-        scored: true
-
-      - id: 1.4.9
-        text: "Ensure that the Container Network Interface file permissions are
-        set to 644 or more restrictive (Not Scored)"
-        audit: "stat -c permissions=%a <path/to/cni/files>"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 <path/to/cni/files>
-        scored: true
-
-      - id: 1.4.10
-        text: "Ensure that the Container Network Interface file ownership is set
-        to root:root (Not Scored)"
-        audit: "stat -c %U:%G <path/to/cni/files>"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root <path/to/cni/files>
-        scored: true
-
-      - id: 1.4.11
-        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
-        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "700"
-              set: true
-        remediation: |
-          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-          from the below command:
-          ps -ef | grep $etcdbin
-          Run the below command (based on the etcd data directory found above). For example,
-          chmod 700 /var/lib/etcd
-        scored: true
-
-      - id: 1.4.12
-        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
-        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
-        tests:
-          test_items:
-            - flag: "etcd:etcd"
-              set: true
-        remediation: |
-          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-          from the below command:
-          ps -ef | grep $etcdbin
-          Run the below command (based on the etcd data directory found above). For example,
-          chown etcd:etcd /var/lib/etcd
-        scored: true
-
-      - id: 1.4.13
-        text: "Ensure that the admin.conf file permissions are set to 644 or
-        more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chmod 644 /etc/kubernetes/admin.conf
-        scored: true
-
-      - id: 1.4.14
-        text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the master node.
-          For example,
-          chown root:root /etc/kubernetes/admin.conf
-        scored: true
-
-      - id: 1.4.15
-        text: "Ensure that the scheduler.conf file permissions are set to 644 or
-        more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c permissions=%a /etc/kubernetes/scheduler.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
-        scored: true
-
-      - id: 1.4.16
-        text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chown root:root /etc/kubernetes/scheduler.conf
-        scored: true
-
-      - id: 1.4.17
-        text: "Ensure that the controller-manager.conf file permissions are set
-        to 644 or more restrictive (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c permissions=%a /etc/kubernetes/controller-manager.conf; fi'"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
-        scored: true
-
-      - id: 1.4.18
-        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
-        tests:
-          test_items:
-            - flag: "root:root"
-              compare:
-                op: eq
-                value: "root:root"
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the
-          master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
-        scored: true
-
-      - id: 1.4.19
-        text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
-        audit: "ls -laR /etc/kubernetes/pki/"
-        type: "manual"
-        tests:
-          test_items:
-            - flag: "root root"
-              compare:
-                op: eq
-                value: "root root"
-              set: true
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example, chown -R root:root /etc/kubernetes/pki/
-        scored: true
-
-      - id: 1.4.20
-        text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)"
-        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
-        type: "manual"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example, chmod -R 644 /etc/kubernetes/pki/*.crt
-        scored: true
-
-      - id: 1.4.21
-        text: "Ensure that the Kubernetes PKI key file permissions are set to 600 or more restrictive (Scored)"
-        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
-        type: "manual"
-        tests:
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "600"
-              set: true
-        remediation: |
-          [Manual test]
-          Run the below command (based on the file location on your system) on the master node.
-          For example, chmod -R 600 /etc/kubernetes/pki/*.key
-        scored: true
-
-  - id: 1.5
-    text: "etcd"
-    checks:
-      - id: 1.5.1
-        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--cert-file"
-              set: true
-            - flag: "--key-file"
-              set: true
-        remediation: |
-          Follow the etcd service documentation and configure TLS encryption.
-          Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameters.
-          --ca-file=</path/to/ca-file>
-          --key-file=</path/to/key-file>
-        scored: true
-
-      - id: 1.5.2
-        text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--client-cert-auth"
-              compare:
-                op: eq
-                value: true
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and set the below parameter.
-          --client-cert-auth="true"
-        scored: true
-
-      - id: 1.5.3
-        text: "Ensure that the --auto-tls argument is not set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--auto-tls"
-              set: false
-            - flag: "--auto-tls"
-              compare:
-                op: eq
-                value: false
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and either remove the --auto-tls parameter or set it to false.
-            --auto-tls=false
-        scored: true
-
-      - id: 1.5.4
-        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
-        set as appropriate (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: and
-          test_items:
-            - flag: "--peer-cert-file"
-              set: true
-            - flag: "--peer-key-file"
-              set: true
-        remediation: |
-          Follow the etcd service documentation and configure peer TLS encryption as appropriate
-          for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameters.
-          --peer-client-file=</path/to/peer-cert-file>
-          --peer-key-file=</path/to/peer-key-file>
-        scored: true
-
-      - id: 1.5.5
-        text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          test_items:
-            - flag: "--peer-client-cert-auth"
-              compare:
-                op: eq
-                value: true
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and set the below parameter.
-          --peer-client-cert-auth=true
-        scored: true
-
-      - id: 1.5.6
-        text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "--peer-auto-tls"
-              set: false
-            - flag: "--peer-auto-tls"
-              compare:
-                op: eq
-                value: false
-              set: true
-        remediation: |
-          Edit the etcd pod specification file $etcdconf on the master
-          node and either remove the --peer-auto-tls parameter or set it to false.
-          --peer-auto-tls=false
-        scored: true
-
-      - id: 1.5.7
-        text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
-        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-        type: "manual"
-        tests:
-          test_items:
-            - flag: "--trusted-ca-file"
-              set: true
-        remediation: |
-          [Manual test]
-          Follow the etcd documentation and create a dedicated certificate authority setup for the
-          etcd service.
-          Then, edit the etcd pod specification file $etcdconf on the
-          master node and set the below parameter.
-          --trusted-ca-file=</path/to/ca-file>
-        scored: false
-
-  - id: 1.6
-    text: "General Security Primitives"
-    checks:
-      - id: 1.6.1
-        text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Remove any unneeded clusterrolebindings :
-          kubectl delete clusterrolebinding [name]
-        scored: false
-
-      - id: 1.6.2
-        text: "Create administrative boundaries between resources using namespaces (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the documentation and create namespaces for objects in your deployment as you
-          need them.
-        scored: false
-
-      - id: 1.6.3
-        text: "Create network segmentation using Network Policies (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the documentation and create NetworkPolicy objects as you need them.
-        scored: false
-
-      - id: 1.6.4
-        text: "Ensure that the seccomp profile is set to docker/default in your pod
-        definitions (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
-          would need to enable alpha features in the apiserver by passing "--feature-
-          gates=AllAlpha=true" argument.
-          Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
-          parameter to "--feature-gates=AllAlpha=true"
-          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
-          Based on your system, restart the kube-apiserver service. For example:
-          systemctl restart kube-apiserver.service
-          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
-          example is as below:
-          apiVersion: v1
-          kind: Pod
-          metadata:
-            name: trustworthy-pod
-            annotations:
-              seccomp.security.alpha.kubernetes.io/pod: docker/default
-          spec:
-            containers:
-              - name: trustworthy-container
-                image: sotrustworthy:latest
-        scored: false
-
-      - id: 1.6.5
-        text: "Apply Security Context to Your Pods and Containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and apply security contexts to your pods. For a
-          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
-          Containers.
-        scored: false
-
-      - id: 1.6.6
-        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and setup image provenance.
-        scored: false
-
-      - id: 1.6.7
-        text: "Configure Network policies as appropriate (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow the Kubernetes documentation and setup network policies as appropriate.
-          For example, you could create a "default" isolation policy for a Namespace by creating a
-          NetworkPolicy that selects all pods but does not allow any traffic:
-          apiVersion: networking.k8s.io/v1
-          kind: NetworkPolicy
-          metadata:
-            name: default-deny
-          spec:
-            podSelector:
-        scored: false
-
-      - id: 1.6.8
-        text: "Place compensating controls in the form of PSP and RBAC for
-        privileged containers usage (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
-        scored: false
-
-  - id: 1.7
-    text: "PodSecurityPolicies"
-    checks:
-      - id: 1.7.1
-        text: "Do not admit privileged containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.2
-        text: "Do not admit containers wishing to share the host process ID namespace (Scored)"
-        type: "manual"
-        remediation: |
-         [Manual test]
-         Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.3
-        text: "Do not admit containers wishing to share the host IPC namespace (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.4
-        text: "Do not admit containers wishing to share the host network namespace (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.5
-        text: " Do not admit containers with allowPrivilegeEscalation (Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
-        scored: false
-
-      - id: 1.7.6
-        text: "Do not admit root containers (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
-        scored: false
-
-      - id: 1.7.7
-        text: "Do not admit containers with dangerous capabilities (Not Scored)"
-        type: "manual"
-        remediation: |
-          [Manual test]
-          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
-        scored: false
diff --git a/cfg/cis-1.4/node.yaml b/cfg/cis-1.4/node.yaml
deleted file mode 100644
index cad30b7225995af5e41e36ffc8331932f2e30757..0000000000000000000000000000000000000000
--- a/cfg/cis-1.4/node.yaml
+++ /dev/null
@@ -1,491 +0,0 @@
----
-controls:
-version: "1.13"
-id: "2"
-text: Worker Node Security Configuration
-type: "node"
-groups:
-  - id: "2.1"
-    text: Kubelet
-    checks:
-      - id: 2.1.1
-        text: Ensure that the --anonymous-auth argument is set to false (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: "--anonymous-auth"
-              path: '{.authentication.anonymous.enabled}'
-              set: true
-              compare:
-                op: eq
-                value: false
-        remediation: |
-          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
-          false .
-          If using executable arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --anonymous-auth=false
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.2
-        text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --authorization-mode
-              path: '{.authorization.mode}'
-              set: true
-              compare:
-                op: nothave
-                value: AlwaysAllow
-        remediation: |
-          If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
-          If using executable arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_AUTHZ_ARGS variable.
-          --authorization-mode=Webhook
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.3
-        text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --client-ca-file
-              path: '{.authentication.x509.clientCAFile}'
-              set: true
-        remediation: |
-          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
-          the location of the client CA file.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_AUTHZ_ARGS variable.
-          --client-ca-file=<path/to/client-ca-file>
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.4
-        text: Ensure that the --read-only-port argument is set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: "--read-only-port"
-              path: '{.readOnlyPort}'
-              set: true
-              compare:
-                op: eq
-                value: 0
-        remediation: |
-          If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --read-only-port=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.5
-        text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --streaming-connection-idle-timeout
-              path: '{.streamingConnectionIdleTimeout}'
-              set: true
-              compare:
-                op: noteq
-                value: 0
-            - flag: --streaming-connection-idle-timeout
-              path: '{.streamingConnectionIdleTimeout}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
-          value other than 0.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --streaming-connection-idle-timeout=5m
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.6
-        text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --protect-kernel-defaults
-              path: '{.protectKernelDefaults}'
-              set: true
-              compare:
-                op: eq
-                value: true
-        remediation: |
-          If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --protect-kernel-defaults=true
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.7
-        text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --make-iptables-util-chains
-              path: '{.makeIPTablesUtilChains}'
-              set: true
-              compare:
-                op: eq
-                value: true
-            - flag: --make-iptables-util-chains
-              path: '{.makeIPTablesUtilChains}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          remove the --make-iptables-util-chains argument from the
-          KUBELET_SYSTEM_PODS_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.8
-        text: Ensure that the --hostname-override argument is not set (Scored)
-        # This is one of those properties that can only be set as a command line argument.
-        # To check if the property is set as expected, we need to parse the kubelet command
-        # instead reading the Kubelet Configuration file.
-        audit: "/bin/ps -fC $kubeletbin "
-        tests:
-          test_items:
-            - flag: --hostname-override
-              set: false
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and remove the --hostname-override argument from the
-          KUBELET_SYSTEM_PODS_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.9
-        text: Ensure that the --event-qps argument is set to 0 (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --event-qps
-              path: '{.eventRecordQPS}'
-              set: true
-              compare:
-                op: eq
-                value: 0
-        remediation: |
-          If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-          --event-qps=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.10
-        text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --tls-cert-file
-              path: '{.tlsCertFile}'
-              set: true
-            - flag: --tls-private-key-file
-              path: '{.tlsPrivateKeyFile}'
-              set: true
-          bin_op: and
-        remediation: |
-          If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
-          file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
-          corresponding private key file.
-          If using command line arguments, edit the kubelet service file
-          $kubeletsvc on each worker node and
-          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
-          --tls-cert-file=<path/to/tls-certificate-file>
-          file=<path/to/tls-key-file>
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.11
-        text: '[DEPRECATED] Ensure that the --cadvisor-port argument is set to 0'
-        # This is one of those properties that can only be set as a command line argument.
-        # To check if the property is set as expected, we need to parse the kubelet command
-        # instead reading the Kubelet Configuration file.
-        audit: "/bin/ps -fC $kubeletbin "
-        type: skip
-        tests:
-          test_items:
-            - flag: --cadvisor-port
-              set: true
-              compare:
-                op: eq
-                value: 0
-            - flag: --cadvisor-port
-              set: false
-          bin_op: or
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
-          --cadvisor-port=0
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: false
-
-      - id: 2.1.12
-        text: Ensure that the --rotate-certificates argument is not set to false (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --rotate-certificates
-              path: '{.rotateCertificates}'
-              set: true
-              compare:
-                op: eq
-                value: true
-            - flag: --rotate-certificates
-              path: '{.rotateCertificates}'
-              set: false
-          bin_op: or
-        remediation: |
-          If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
-          If using command line arguments, edit the kubelet service file $kubeletsvc
-          on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.13
-        text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: RotateKubeletServerCertificate
-              path: '{.featureGates.RotateKubeletServerCertificate}'
-              set: true
-              compare:
-                op: eq
-                value: true
-        remediation: |
-          Edit the kubelet service file $kubeletsvc
-          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
-          --feature-gates=RotateKubeletServerCertificate=true
-          Based on your system, restart the kubelet service. For example:
-          systemctl daemon-reload
-          systemctl restart kubelet.service
-        scored: true
-
-      - id: 2.1.14
-        text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
-        audit: "/bin/ps -fC $kubeletbin"
-        audit_config: "/bin/cat $kubeletconf"
-        tests:
-          test_items:
-            - flag: --tls-cipher-suites
-              path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
-              set: true
-              compare:
-                op: valid_elements
-                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        remediation: |
-          If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-          If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
-          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-        scored: false
-
-  - id: "2.2"
-    text: Configuration Files
-    checks:
-      - id: 2.2.1
-        text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $kubeletkubeconfig
-        scored: true
-
-      - id: 2.2.2
-        text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-              compare:
-                op: eq
-                value: root:root
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $kubeletkubeconfig
-        scored: true
-
-      - id: 2.2.3
-        text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $kubeletsvc
-        scored: true
-
-      - id: 2.2.4
-        text: Ensure that the kubelet service file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $kubeletsvc
-        scored: true
-
-      - id: 2.2.5
-        text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chmod 644 $proxykubeconfig
-        scored: true
-
-      - id: 2.2.6
-        text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the below command (based on the file location on your system) on the each worker
-          node. For example,
-          chown root:root $proxykubeconfig
-        scored: true
-
-      - id: 2.2.7
-        text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
-        audit: "/bin/sh -c 'if test -e $kubeletcafile; then stat -c permissions=%a $kubeletcafile; fi'"
-        tests:
-          bin_op: or
-          test_items:
-            - flag: "permissions"
-              compare:
-                op: bitmask
-                value: "644"
-              set: true
-        remediation: |
-          Run the following command to modify the file permissions of the --client-ca-file
-          chmod 644 <filename>
-        scored: true
-
-      - id: 2.2.8
-        text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
-        audit: |
-          CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}')
-          if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
-          if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-              compare:
-                op: eq
-                value: root:root
-        remediation: |
-          Run the following command to modify the ownership of the --client-ca-file .
-          chown root:root <filename>
-        scored: true
-
-      - id: 2.2.9
-        text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
-        tests:
-          test_items:
-            - flag: root:root
-              set: true
-        remediation: |
-          Run the following command (using the config file location identified in the Audit step)
-          chown root:root $kubeletconf
-        scored: true
-
-      - id: 2.2.10
-        text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
-        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
-        tests:
-          test_items:
-            - flag: "permissions"
-              set: true
-              compare:
-                op: bitmask
-                value: "644"
-        remediation: |
-          Run the following command (using the config file location identified in the Audit step)
-          chmod 644 $kubeletconf
-        scored: true
diff --git a/cfg/config.yaml b/cfg/config.yaml
index e88a2c108caa597e169a309900185477c088f0eb..7170877c6d352f2ccf9c3be603f49e6bf00ab76d 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -189,10 +189,6 @@ managedservices:
   components: []
 
 version_mapping:
-  "1.11": "cis-1.3"
-  "1.12": "cis-1.3"
-  "1.13": "cis-1.4"
-  "1.14": "cis-1.4"
   "1.15": "cis-1.5"
   "1.16": "cis-1.6"
   "1.17": "cis-1.6"
@@ -204,12 +200,6 @@ version_mapping:
   "ocp-3.11": "rh-0.7"
 
 target_mapping:
-  "cis-1.3":
-    - "master"
-    - "node"
-  "cis-1.4":
-    - "master"
-    - "node"
   "cis-1.5":
     - "master"
     - "node"
diff --git a/cmd/common_test.go b/cmd/common_test.go
index 8355b100599d35eb9044d7ea8a4cbd2ab0c17015..15f4164b6e13e7d968db0e20eb067d6e17d15d55 100644
--- a/cmd/common_test.go
+++ b/cmd/common_test.go
@@ -206,10 +206,10 @@ func TestMapToCISVersion(t *testing.T) {
 		expErr      string
 	}{
 		{kubeVersion: "1.9", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.9"},
-		{kubeVersion: "1.11", succeed: true, exp: "cis-1.3"},
-		{kubeVersion: "1.12", succeed: true, exp: "cis-1.3"},
-		{kubeVersion: "1.13", succeed: true, exp: "cis-1.4"},
-		{kubeVersion: "1.14", succeed: true, exp: "cis-1.4"},
+		{kubeVersion: "1.11", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.11"},
+		{kubeVersion: "1.12", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.12"},
+		{kubeVersion: "1.13", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.13"},
+		{kubeVersion: "1.14", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.14"},
 		{kubeVersion: "1.15", succeed: true, exp: "cis-1.5"},
 		{kubeVersion: "1.16", succeed: true, exp: "cis-1.6"},
 		{kubeVersion: "1.17", succeed: true, exp: "cis-1.6"},
@@ -303,7 +303,7 @@ func TestGetBenchmarkVersion(t *testing.T) {
 
 	withFakeKubectl := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
 		execCode := `#!/bin/sh
-		echo "Server Version: v1.13.10"
+		echo "Server Version: v1.15.10"
 		`
 		restore, err := fakeExecutableInPath("kubectl", execCode)
 		if err != nil {
@@ -336,8 +336,8 @@ func TestGetBenchmarkVersion(t *testing.T) {
 	}{
 		{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
 		{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "", callFn: withNoPath, succeed: false},
-		{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.4", callFn: withFakeKubectl, succeed: true},
-		{n: "kubeVersion", kubeVersion: "1.11", benchmarkVersion: "", v: viperWithData, exp: "cis-1.3", callFn: withNoPath, succeed: true},
+		{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withFakeKubectl, succeed: true},
+		{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
 		{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
 		{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
 		{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
@@ -375,18 +375,6 @@ func TestValidTargets(t *testing.T) {
 		targets   []string
 		expected  bool
 	}{
-		{
-			name:      "cis-1.3 no etcd",
-			benchmark: "cis-1.3",
-			targets:   []string{"master", "etcd"},
-			expected:  false,
-		},
-		{
-			name:      "cis-1.4 valid",
-			benchmark: "cis-1.4",
-			targets:   []string{"master", "node"},
-			expected:  true,
-		},
 		{
 			name:      "cis-1.5 no dummy",
 			benchmark: "cis-1.5",
diff --git a/hack/node_only.yaml b/hack/node_only.yaml
index 0425bdba73177d6c39964645697187f03340d258..93e2c83a6fc4a9191c24242db9faf6dee84f9f13 100644
--- a/hack/node_only.yaml
+++ b/hack/node_only.yaml
@@ -64,10 +64,6 @@ node:
     defaultkubeconfig: "/etc/kubernetes/proxy.conf"
 
 version_mapping:
-  "1.11": "cis-1.3"
-  "1.12": "cis-1.3"
-  "1.13": "cis-1.4"
-  "1.14": "cis-1.4"
   "1.15": "cis-1.5"
   "1.16": "cis-1.6"
   "1.17": "cis-1.6"
diff --git a/integration/integration_test.go b/integration/integration_test.go
index 157938c9224e28ec7dd06cbb13b911ef3ce997e7..fb5782269a6b1c86f10d4bb27057ce7334dceae4 100644
--- a/integration/integration_test.go
+++ b/integration/integration_test.go
@@ -80,14 +80,6 @@ func testCheckCISWithKind(t *testing.T, testdataDir string) {
 	}
 }
 
-func TestCheckCIS13WithKind(t *testing.T) {
-	testCheckCISWithKind(t, "cis-1.3")
-}
-
-func TestCheckCIS14WithKind(t *testing.T) {
-	testCheckCISWithKind(t, "cis-1.4")
-}
-
 func TestCheckCIS15WithKind(t *testing.T) {
 	testCheckCISWithKind(t, "cis-1.5")
 }