From 5f34058dc789e481ccc3b49248ebb02763bdce40 Mon Sep 17 00:00:00 2001
From: James Ward <james@notjam.es>
Date: Mon, 6 Jan 2020 04:18:25 -0500
Subject: [PATCH] Support Linting YAML as part of Travis CI build (#554)

* add yamllint command to travis CI

installs and runs a linter across the YAML in the
project to ensure consistency in the written YAML.

this uses yamllint and the default yamllint config with
"truthy" and "line-length" disabled.

* run dos2unix on CRLF files

* YAMLLINT: remove trailing spaces

* YAMLLint: add YAML document start

* YAMLLint: too many spaces around bracket

* YAMLLint: fix indentation

* YAMLLint: remove duplicate key

* YAMLLint: newline at end of file

* YAMLLint: Too few spaces after comma

* YAMLLint: too many spaces after colon
---
 .goreleaser.yml                               |    5 +-
 .travis.yml                                   |    8 +-
 .yamllint.yaml                                |    6 +
 cfg/cis-1.3/config.yaml                       |    2 +-
 cfg/cis-1.3/master.yaml                       | 2956 ++++++++--------
 cfg/cis-1.3/node.yaml                         | 1024 +++---
 cfg/cis-1.4/config.yaml                       |    4 +-
 cfg/cis-1.4/master.yaml                       | 3098 ++++++++---------
 cfg/cis-1.4/node.yaml                         | 1024 +++---
 cfg/cis-1.5/config.yaml                       |    4 +-
 cfg/cis-1.5/controlplane.yaml                 |   52 +-
 cfg/cis-1.5/etcd.yaml                         |  246 +-
 cfg/cis-1.5/master.yaml                       | 2214 ++++++------
 cfg/cis-1.5/node.yaml                         |  952 ++---
 cfg/cis-1.5/policies.yaml                     |  464 +--
 cfg/config.yaml                               |    4 +-
 cfg/node_only.yaml                            |    2 +-
 cfg/rh-0.7/config.yaml                        |    2 +-
 cfg/rh-0.7/master.yaml                        | 2927 ++++++++--------
 cfg/rh-0.7/node.yaml                          |  752 ++--
 hack/debug.yaml                               |   65 +-
 hack/kind.yaml                                |   65 +-
 integration/testdata/add-tls-kind-k8s114.yaml |   22 +-
 job-eks.yaml                                  |   40 +-
 job-iks.yaml                                  |   43 +-
 job-master.yaml                               |   51 +-
 job-node.yaml                                 |   49 +-
 job.yaml                                      |   57 +-
 28 files changed, 8076 insertions(+), 8062 deletions(-)
 create mode 100644 .yamllint.yaml

diff --git a/.goreleaser.yml b/.goreleaser.yml
index f838562..1e89fe7 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,3 +1,4 @@
+---
 env:
   - GO111MODULE=on
   - KUBEBENCH_CFG=/etc/kube-bench/cfg
@@ -9,8 +10,8 @@ builds:
     goarch:
       - amd64
     ldflags:
-     - "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion={{.Version}}"
-     - "-X github.com/aquasecurity/kube-bench/cmd.cfgDir={{.Env.KUBEBENCH_CFG}}"
+      - "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion={{.Version}}"
+      - "-X github.com/aquasecurity/kube-bench/cmd.cfgDir={{.Env.KUBEBENCH_CFG}}"
 # Archive customization
 archive:
   format: tar.gz
diff --git a/.travis.yml b/.travis.yml
index e089304..423cfa2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,7 @@
 language: go
 
 services:
-  - docker 
+  - docker
 
 notifications:
   email: false
@@ -10,18 +10,20 @@ notifications:
 before_install:
   - sudo apt-get -qq update
   - sudo apt-get install -y rpm
+  - pip install --user yamllint==1.18.0
   - gem install --no-ri --no-rdoc fpm
   - go get -t -v ./...
 
 script:
+  - yamllint -c ./.yamllint.yaml .
   - GO111MODULE=on go test ./...
   - IMAGE_NAME=kube-bench make build-docker
-  - docker run -v `pwd`:/host kube-bench install 
+  - docker run -v `pwd`:/host kube-bench install
   - test -d cfg
   - test -f kube-bench
   - make tests
   - make integration-tests
-  
+
 after_success:
   - bash <(curl -s https://codecov.io/bash)
 deploy:
diff --git a/.yamllint.yaml b/.yamllint.yaml
new file mode 100644
index 0000000..34c9366
--- /dev/null
+++ b/.yamllint.yaml
@@ -0,0 +1,6 @@
+---
+extends: default
+
+rules:
+  line-length: disable
+  truthy: disable
diff --git a/cfg/cis-1.3/config.yaml b/cfg/cis-1.3/config.yaml
index 4cbf4cf..b783945 100644
--- a/cfg/cis-1.3/config.yaml
+++ b/cfg/cis-1.3/config.yaml
@@ -1,2 +1,2 @@
 ---
-## Version-specific settings that override the values in cfg/config.yaml
\ No newline at end of file
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.3/master.yaml b/cfg/cis-1.3/master.yaml
index c9b0ae3..8dbbe0f 100644
--- a/cfg/cis-1.3/master.yaml
+++ b/cfg/cis-1.3/master.yaml
@@ -5,1481 +5,1481 @@ id: 1
 text: "Master Node Security Configuration"
 type: "master"
 groups:
-- id: 1.1
-  text: "API Server"
-  checks:
-  - id: 1.1.1
-    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--anonymous-auth"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --anonymous-auth=false
-    scored: true
-
-  - id: 1.1.2
-    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--basic-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then,
-      edit the API server pod specification file $apiserverconf
-      on the master node and remove the --basic-auth-file=<filename>
-      parameter.
-    scored: true
-
-  - id: 1.1.3
-    text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag:  "--insecure-allow-any-token"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --insecure-allow-any-token
-      parameter.
-    scored: true
-
-  - id: 1.1.4
-    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--kubelet-https"
-        compare:
-          op: eq
-          value: true
-        set: true
-      - flag: "--kubelet-https"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --kubelet-https parameter.
-    scored: true
-
-  - id: 1.1.5
-    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-bind-address"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --insecure-bind-address
-      parameter.
-    scored: true
-
-  - id: 1.1.6
-    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-port"
-        compare:
-          op: eq
-          value: 0
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      apiserver.yaml on the master node and set the below parameter.
-      --insecure-port=0
-    scored: true
-
-  - id: 1.1.7
-    text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-        - flag:  "--secure-port"
-          compare:
-            op: gt
-            value: 0
-          set: true
-        - flag: "--secure-port"
-          set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and either remove the --secure-port parameter or
-      set it to a different (non-zero) desired port.
-    scored: true
-
-  - id: 1.1.8
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.1.9
-    text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--repair-malformed-updates"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --repair-malformed-updates=false
-    scored: true
-
-  - id: 1.1.10
-    text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: nothave
-          value: AlwaysAdmit
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that does not include AlwaysAdmit.
-    scored: true
-
-  - id: 1.1.11
-    text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "AlwaysPullImages"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins to
-      include AlwaysPullImages.
-      --enable-admission-plugins=...,AlwaysPullImages,...
-    scored: true
-
-  - id: 1.1.12
-    text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "DenyEscalatingExec"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes DenyEscalatingExec.
-      --enable-admission-plugins=...,DenyEscalatingExec,...
-    scored: true
-
-  - id: 1.1.13
-    text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "SecurityContextDeny"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to
-      include SecurityContextDeny.
-      --enable-admission-plugins=...,SecurityContextDeny,...
-    scored: true
-
-  - id: 1.1.14
-    text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--disable-admission-plugins"
-        compare:
-          op: nothave
-          value: "NamespaceLifecycle"
-        set: true
-      - flag: "--disable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --disable-admission-plugins parameter to
-      ensure it does not include NamespaceLifecycle.
-      --disable-admission-plugins=...,NamespaceLifecycle,...
-    scored: true
-
-  - id: 1.1.15
-    text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-path"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-path parameter to a suitable
-      path and file where you would like audit logs to be written, for example:
-      --audit-log-path=/var/log/apiserver/audit.log
-    scored: true
-
-  - id: 1.1.16
-    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxage"
-        compare:
-          op: gte
-          value: 30
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxage parameter to 30 or
-      as an appropriate number of days: --audit-log-maxage=30
-    scored: true
-
-  - id: 1.1.17
-    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxbackup"
-        compare:
-          op: gte
-          value: 10
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxbackup parameter to 10
-      or to an appropriate value.
-      --audit-log-maxbackup=10
-    scored: true
-
-  - id: 1.1.18
-    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxsize"
-        compare:
-          op: gte
-          value: 100
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxsize parameter to an
-      appropriate size in MB. For example, to set it as 100 MB:
-      --audit-log-maxsize=100
-    scored: true
-
-  - id: 1.1.19
-    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: nothave
-          value: "AlwaysAllow"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --authorization-mode parameter to
-      values other than AlwaysAllow. One such example could be as below.
-      --authorization-mode=RBAC
-    scored: true
-
-  - id: 1.1.20
-    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--token-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then,
-      edit the API server pod specification file $apiserverconf
-      on the master node and remove the --token-auth-file=<filename>
-      parameter.
-    scored: true
-
-  - id: 1.1.21
-    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--kubelet-certificate-authority"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and setup the TLS connection between the
-      apiserver and kubelets. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the --kubelet-certificate-authority
-      parameter to the path to the cert file for the certificate authority.
-      --kubelet-certificate-authority=<ca-string>
-    scored: true
-
-  - id: 1.1.22
-    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--kubelet-client-certificate"
-        set: true
-      - flag: "--kubelet-client-key"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and kubelets. Then, edit API server pod specification file
-      $apiserverconf on the master node and set the
-      kubelet client certificate and key parameters as below.
-      --kubelet-client-certificate=<path/to/client-certificate-file>
-      --kubelet-client-key=<path/to/client-key-file>
-    scored: true
-
-  - id: 1.1.23
-    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-lookup"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --service-account-lookup=true
-    scored: true
-
-  - id: 1.1.24
-    text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "PodSecurityPolicy"
-        set: true
-    remediation: |
-      Follow the documentation and create Pod Security Policy objects as per your environment.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes PodSecurityPolicy :
-      --enable-admission-plugins=...,PodSecurityPolicy,...
-      Then restart the API Server.
-    scored: true
-
-  - id: 1.1.25
-    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-key-file"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --service-account-key-file parameter
-      to the public key file for service accounts:
-      --service-account-key-file=<filename>
-    scored: true
-
-  - id: 1.1.26
-    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
-      appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--etcd-certfile"
-        set: true
-      - flag: "--etcd-keyfile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and etcd. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the etcd
-      certificate and key file parameters.
-      --etcd-certfile=<path/to/client-certificate-file>
-      --etcd-keyfile=<path/to/client-key-file>
-    scored: true
-
-  - id: 1.1.27
-    text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "ServiceAccount"
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Follow the documentation and create ServiceAccount objects as per your environment.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes ServiceAccount.
-      --enable-admission-plugins=...,ServiceAccount,...
-    scored: true
-
-  - id: 1.1.28
-    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
-    as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--tls-cert-file"
-        set: true
-      - flag: "--tls-private-key-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the TLS certificate and private key file
-      parameters.
-      --tls-cert-file=<path/to/tls-certificate-file>
-      --tls-private-key-file=<path/to/tls-key-file>
-    scored: true
-
-  - id: 1.1.29
-    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-ca-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the client certificate authority file.
-      --client-ca-file=<path/to/client-ca-file>
-    scored: true
-
-  - id: 1.1.30
-    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--tls-cipher-suites"
-        compare:
-          op: has
-          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    scored: false
-
-  - id: 1.1.31
-    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--etcd-cafile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and etcd. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the etcd
-      certificate authority file parameter.
-      --etcd-cafile=<path/to/ca-file>
-    scored: true
-
-  - id: 1.1.32
-    text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: has
-          value: "Node"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --authorization-mode parameter to a
-      value that includes Node.
-      --authorization-mode=Node,RBAC
-    scored: true
-
-  - id: 1.1.33
-    text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "NodeRestriction"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and configure NodeRestriction plug-in on
-      kubelets. Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes NodeRestriction.
-      --enable-admission-plugins=...,NodeRestriction,...
-    scored: true
-
-  - id: 1.1.34
-    text: "Ensure that the --experimental-encryption-provider-config argument is
-    set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--experimental-encryption-provider-config"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and configure a EncryptionConfig file.
-      Then, edit the API server pod specification file $apiserverconf on the
-      master node and set the --experimental-encryption-provider-config parameter
-      to the path of that file:
-      --experimental-encryption-provider-config=</path/to/EncryptionConfig/File>
-    scored: true
-
-  - id: 1.1.35
-    text: "Ensure that the encryption provider is set to aescbc (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
-      choose aescbc as the encryption provider.
-      For example,
-      kind: EncryptionConfig
-      apiVersion: v1
-      resources:
-        - resources:
-          - secrets
-            providers:
-            - aescbc:
-                keys:
-                - name: key1
-                  secret: <32-byte base64-encoded secret>
-    scored: true
-
-  - id: 1.1.36
-    text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "EventRateLimit"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set the desired limits in a
-      configuration file. Then, edit the API server pod specification file
-      $apiserverconf and set the below parameters.
-      --enable-admission-plugins=...,EventRateLimit,...
-      --admission-control-config-file=<path/to/configuration/file>
-    scored: true
-
-  - id: 1.1.37a
-    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--feature-gates"
-        compare:
-          op: nothave
-          value: "AdvancedAuditing=false"
-        set: true
-      - flag: "--feature-gates"
-        set: false
-    remediation: |
-      Follow the Kubernetes documentation and set the desired audit policy in the
-      /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-      and set the below parameters.
-      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-    scored: true
-
-  - id: 1.1.37b
-    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-policy-file"
-        compare:
-          op: eq
-          value: "/etc/kubernetes/audit-policy.yaml"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set the desired audit policy in the
-      /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-      and set the below parameters.
-      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-    scored: true
-
-  - id: 1.1.38
-    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--request-timeout"
-        set: false
-      - flag: "--request-timeout"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      and set the below parameter as appropriate and if needed. For example,
-      --request-timeout=300s
-    scored: true
-
-  - id: 1.1.39
-    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--tls-cipher-suites"
-        compare:
-          op: eq
-          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-        set: true
-    remediation: |
-      Edit the API server pod specification file /etc/kubernetes/manifests
-      kube-apiserver.yaml on the master node and set the below parameter.
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    scored: false
-
-- id: 1.2
-  text: "Scheduler"
-  checks:
-  - id: 1.2.1
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf
-      file on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.2.2
-    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--address"
-        set: false
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf
-      file on the master node and ensure the correct value for the
-      --address parameter.
-    scored: true
-
-- id: 1.3
-  text: "Controller Manager"
-  checks:
-  - id: 1.3.1
-    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--terminated-pod-gc-threshold"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
-      --terminated-pod-gc-threshold=10
-    scored: true
-
-  - id: 1.3.2
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.3.3
-    text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--use-service-account-credentials"
-        compare:
-          op: noteq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node to set the below parameter.
-      --use-service-account-credentials=true
-    scored: true
-
-  - id: 1.3.4
-    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-private-key-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --service-account-private-
-      key-file parameter to the private key file for service accounts.
-      --service-account-private-key-file=<filename>
-    scored: true
-
-  - id: 1.3.5
-    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--root-ca-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --root-ca-file parameter to
-      the certificate bundle file.
-      --root-ca-file=<path/to/file>
-    scored: true
-
-  - id: 1.3.6
-    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--feature-gates"
-        compare:
-          op: eq
-          value: "RotateKubeletServerCertificate=true"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      controller-manager.yaml on the master node and set the --feature-gates parameter to
-      include RotateKubeletServerCertificate=true.
-      --feature-gates=RotateKubeletServerCertificate=true
-    scored: true
-
-  - id: 1.3.7
-    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--address"
-        set: false
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      controller-manager.yaml on the master node and ensure the correct value
-      for the --address parameter.
-    scored: true
-
-- id: 1.4
-  text: "Configuration Files"
-  checks:
-  - id: 1.4.1
-    text: "Ensure that the API server pod specification file permissions are
-    set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $apiserverconf
-    scored: true
-
-  - id: 1.4.2
-    text: "Ensure that the API server pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $apiserverconf
-    scored: true
-
-  - id: 1.4.3
-    text: "Ensure that the controller manager pod specification file
-    permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $controllermanagerconf
-    scored: true
-
-  - id: 1.4.4
-    text: "Ensure that the controller manager pod specification file
-    ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $controllermanagerconf
-    scored: true
-
-  - id: 1.4.5
-    text: "Ensure that the scheduler pod specification file permissions are set
-    to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $schedulerconf
-    scored: true
-
-  - id: 1.4.6
-    text: "Ensure that the scheduler pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: "root:root"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $schedulerconf
-    scored: true
-
-  - id: 1.4.7
-    text: "Ensure that the etcd pod specification file permissions are set to
-    644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $etcdconf
-    scored: true
-
-  - id: 1.4.8
-    text: "Ensure that the etcd pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $etcdconf
-    scored: true
-
-  - id: 1.4.9
-    text: "Ensure that the Container Network Interface file permissions are
-    set to 644 or more restrictive (Not Scored)"
-    audit: "stat -c %a <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 <path/to/cni/files>
-    scored: true
-
-  - id: 1.4.10
-    text: "Ensure that the Container Network Interface file ownership is set
-    to root:root (Not Scored)"
-    audit: "stat -c %U:%G <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root <path/to/cni/files>
-    scored: true
-
-  - id: 1.4.11
-    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
-    tests:
-      test_items:
-      - flag: "700"
-        compare:
-          op: eq
-          value: "700"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-      from the below command:
-      ps -ef | grep $etcdbin
-      Run the below command (based on the etcd data directory found above). For example,
-      chmod 700 /var/lib/etcd
-    scored: true
-
-  - id: 1.4.12
-    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
-    tests:
-      test_items:
-      - flag: "etcd:etcd"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-      from the below command:
-      ps -ef | grep $etcdbin
-      Run the below command (based on the etcd data directory found above). For example,
-      chown etcd:etcd /var/lib/etcd
-    scored: true
-
-  - id: 1.4.13
-    text: "Ensure that the admin.conf file permissions are set to 644 or
-    more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 /etc/kubernetes/admin.conf
-    scored: true
-
-  - id: 1.4.14
-    text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root /etc/kubernetes/admin.conf
-    scored: true
-
-  - id: 1.4.15
-    text: "Ensure that the scheduler.conf file permissions are set to 644 or
-    more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
-    scored: true
-
-  - id: 1.4.16
-    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chown root:root /etc/kubernetes/scheduler.conf
-    scored: true
-
-  - id: 1.4.17
-    text: "Ensure that the controller-manager.conf file permissions are set
-    to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
-    scored: true
-
-  - id: 1.4.18
-    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
-    scored: true
-
-- id: 1.5
-  text: "etcd"
-  checks:
-  - id: 1.5.1
-    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--cert-file"
-        set: true
-      - flag:  "--key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure TLS encryption.
-      Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameters.
-      --ca-file=</path/to/ca-file>
-      --key-file=</path/to/key-file>
-    scored: true
-
-  - id: 1.5.2
-    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-cert-auth"
-        compare:
-          op: noteq
-          value: false
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --client-cert-auth="true"
-    scored: true
-
-  - id: 1.5.3
-    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--auto-tls"
-        set: false
-      - flag: "--auto-tls"
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --auto-tls parameter or set it to false.
-        --auto-tls=false
-    scored: true
-
-  - id: 1.5.4
-    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
-    set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--peer-cert-file"
-        set: true
-      - flag: "--peer-key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure peer TLS encryption as appropriate
-      for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameters.
-      --peer-client-file=</path/to/peer-cert-file>
-      --peer-key-file=</path/to/peer-key-file>
-    scored: true
-
-  - id: 1.5.5
-    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--peer-client-cert-auth"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --peer-client-cert-auth=true
-    scored: true
-
-  - id: 1.5.6
-    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--peer-auto-tls"
-        set: false
-      - flag: "--peer-auto-tls"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --peer-auto-tls parameter or set it to false.
-      --peer-auto-tls=false
-    scored: true
-
-  - id: 1.5.7
-    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    type: "manual"
-    tests:
-      test_items:
-      - flag: "--trusted-ca-file"
-        set: true
-    remediation: |
-      [Manual test]
-      Follow the etcd documentation and create a dedicated certificate authority setup for the
-      etcd service.
-      Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameter.
-      --trusted-ca-file=</path/to/ca-file>
-    scored: false
-
-- id: 1.6
-  text: "General Security Primitives"
-  checks:
-  - id: 1.6.1
-    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Remove any unneeded clusterrolebindings :
-      kubectl delete clusterrolebinding [name]
-    scored: false
-
-  - id: 1.6.2
-    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the documentation and create namespaces for objects in your deployment as you
-      need them.
-    scored: false
-
-  - id: 1.6.3
-    text: "Create network segmentation using Network Policies (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the documentation and create NetworkPolicy objects as you need them.
-    scored: false
-
-  - id: 1.6.4
-    text: "Ensure that the seccomp profile is set to docker/default in your pod
-    definitions (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
-      would need to enable alpha features in the apiserver by passing "--feature-
-      gates=AllAlpha=true" argument.
-      Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
-      parameter to "--feature-gates=AllAlpha=true"
-      KUBE_API_ARGS="--feature-gates=AllAlpha=true"
-      Based on your system, restart the kube-apiserver service. For example:
-      systemctl restart kube-apiserver.service
-      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
-      example is as below:
-      apiVersion: v1
-      kind: Pod
-      metadata:
-        name: trustworthy-pod
-        annotations:
-          seccomp.security.alpha.kubernetes.io/pod: docker/default
-      spec:
-        containers:
-          - name: trustworthy-container
-            image: sotrustworthy:latest
-    scored: false
-
-  - id: 1.6.5
-    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and apply security contexts to your pods. For a
-      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
-      Containers.
-    scored: false
-
-  - id: 1.6.6
-    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and setup image provenance.
-    scored: false
-
-  - id: 1.6.7
-    text: "Configure Network policies as appropriate (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and setup network policies as appropriate.
-      For example, you could create a "default" isolation policy for a Namespace by creating a
-      NetworkPolicy that selects all pods but does not allow any traffic:
-      apiVersion: networking.k8s.io/v1
-      kind: NetworkPolicy
-      metadata:
-        name: default-deny
-      spec:
-        podSelector:
-    scored: false
-
-  - id: 1.6.8
-    text: "Place compensating controls in the form of PSP and RBAC for
-    privileged containers usage (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
-    scored: false
-
-- id: 1.7
-  text: "PodSecurityPolicies"
-  checks:
-  - id: 1.7.1
-    text: "Do not admit privileged containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.2
-    text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-     [Manual test]
-     Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.3
-    text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.4
-    text: "Do not admit containers wishing to share the host network namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.5
-    text: "Do not admit containers with allowPrivilegeEscalation (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.6
-    text: "Do not admit root containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
-    scored: false
-
-  - id: 1.7.7
-    text: "Do not admit containers with dangerous capabilities (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
-    scored: false
+  - id: 1.1
+    text: "API Server"
+    checks:
+      - id: 1.1.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --anonymous-auth=false
+        scored: true
+
+      - id: 1.1.2
+        text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--basic-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --basic-auth-file=<filename>
+          parameter.
+        scored: true
+
+      - id: 1.1.3
+        text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-allow-any-token"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --insecure-allow-any-token
+          parameter.
+        scored: true
+
+      - id: 1.1.4
+        text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--kubelet-https"
+              compare:
+                op: eq
+                value: true
+              set: true
+            - flag: "--kubelet-https"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --kubelet-https parameter.
+        scored: true
+
+      - id: 1.1.5
+        text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-bind-address"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --insecure-bind-address
+          parameter.
+        scored: true
+
+      - id: 1.1.6
+        text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-port"
+              compare:
+                op: eq
+                value: 0
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          apiserver.yaml on the master node and set the below parameter.
+          --insecure-port=0
+        scored: true
+
+      - id: 1.1.7
+        text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--secure-port"
+              compare:
+                op: gt
+                value: 0
+              set: true
+            - flag: "--secure-port"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and either remove the --secure-port parameter or
+          set it to a different (non-zero) desired port.
+        scored: true
+
+      - id: 1.1.8
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.1.9
+        text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--repair-malformed-updates"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --repair-malformed-updates=false
+        scored: true
+
+      - id: 1.1.10
+        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: nothave
+                value: AlwaysAdmit
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that does not include AlwaysAdmit.
+        scored: true
+
+      - id: 1.1.11
+        text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "AlwaysPullImages"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins to
+          include AlwaysPullImages.
+          --enable-admission-plugins=...,AlwaysPullImages,...
+        scored: true
+
+      - id: 1.1.12
+        text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "DenyEscalatingExec"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes DenyEscalatingExec.
+          --enable-admission-plugins=...,DenyEscalatingExec,...
+        scored: true
+
+      - id: 1.1.13
+        text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "SecurityContextDeny"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to
+          include SecurityContextDeny.
+          --enable-admission-plugins=...,SecurityContextDeny,...
+        scored: true
+
+      - id: 1.1.14
+        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--disable-admission-plugins"
+              compare:
+                op: nothave
+                value: "NamespaceLifecycle"
+              set: true
+            - flag: "--disable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --disable-admission-plugins parameter to
+          ensure it does not include NamespaceLifecycle.
+          --disable-admission-plugins=...,NamespaceLifecycle,...
+        scored: true
+
+      - id: 1.1.15
+        text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-path"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-path parameter to a suitable
+          path and file where you would like audit logs to be written, for example:
+          --audit-log-path=/var/log/apiserver/audit.log
+        scored: true
+
+      - id: 1.1.16
+        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxage"
+              compare:
+                op: gte
+                value: 30
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxage parameter to 30 or
+          as an appropriate number of days: --audit-log-maxage=30
+        scored: true
+
+      - id: 1.1.17
+        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxbackup"
+              compare:
+                op: gte
+                value: 10
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxbackup parameter to 10
+          or to an appropriate value.
+          --audit-log-maxbackup=10
+        scored: true
+
+      - id: 1.1.18
+        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxsize"
+              compare:
+                op: gte
+                value: 100
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxsize parameter to an
+          appropriate size in MB. For example, to set it as 100 MB:
+          --audit-log-maxsize=100
+        scored: true
+
+      - id: 1.1.19
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: nothave
+                value: "AlwaysAllow"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to
+          values other than AlwaysAllow. One such example could be as below.
+          --authorization-mode=RBAC
+        scored: true
+
+      - id: 1.1.20
+        text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--token-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --token-auth-file=<filename>
+          parameter.
+        scored: true
+
+      - id: 1.1.21
+        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--kubelet-certificate-authority"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and setup the TLS connection between the
+          apiserver and kubelets. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the --kubelet-certificate-authority
+          parameter to the path to the cert file for the certificate authority.
+          --kubelet-certificate-authority=<ca-string>
+        scored: true
+
+      - id: 1.1.22
+        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--kubelet-client-certificate"
+              set: true
+            - flag: "--kubelet-client-key"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and kubelets. Then, edit API server pod specification file
+          $apiserverconf on the master node and set the
+          kubelet client certificate and key parameters as below.
+          --kubelet-client-certificate=<path/to/client-certificate-file>
+          --kubelet-client-key=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.1.23
+        text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-lookup"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --service-account-lookup=true
+        scored: true
+
+      - id: 1.1.24
+        text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "PodSecurityPolicy"
+              set: true
+        remediation: |
+          Follow the documentation and create Pod Security Policy objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes PodSecurityPolicy :
+          --enable-admission-plugins=...,PodSecurityPolicy,...
+          Then restart the API Server.
+        scored: true
+
+      - id: 1.1.25
+        text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-key-file"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --service-account-key-file parameter
+          to the public key file for service accounts:
+          --service-account-key-file=<filename>
+        scored: true
+
+      - id: 1.1.26
+        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
+          appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--etcd-certfile"
+              set: true
+            - flag: "--etcd-keyfile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and etcd. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the etcd
+          certificate and key file parameters.
+          --etcd-certfile=<path/to/client-certificate-file>
+          --etcd-keyfile=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.1.27
+        text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "ServiceAccount"
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Follow the documentation and create ServiceAccount objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes ServiceAccount.
+          --enable-admission-plugins=...,ServiceAccount,...
+        scored: true
+
+      - id: 1.1.28
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
+        as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--tls-cert-file"
+              set: true
+            - flag: "--tls-private-key-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the TLS certificate and private key file
+          parameters.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+        scored: true
+
+      - id: 1.1.29
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-ca-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the client certificate authority file.
+          --client-ca-file=<path/to/client-ca-file>
+        scored: true
+
+      - id: 1.1.30
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--tls-cipher-suites"
+              compare:
+                op: has
+                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        scored: false
+
+      - id: 1.1.31
+        text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--etcd-cafile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and etcd. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the etcd
+          certificate authority file parameter.
+          --etcd-cafile=<path/to/ca-file>
+        scored: true
+
+      - id: 1.1.32
+        text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "Node"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to a
+          value that includes Node.
+          --authorization-mode=Node,RBAC
+        scored: true
+
+      - id: 1.1.33
+        text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "NodeRestriction"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and configure NodeRestriction plug-in on
+          kubelets. Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes NodeRestriction.
+          --enable-admission-plugins=...,NodeRestriction,...
+        scored: true
+
+      - id: 1.1.34
+        text: "Ensure that the --experimental-encryption-provider-config argument is
+        set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--experimental-encryption-provider-config"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          Then, edit the API server pod specification file $apiserverconf on the
+          master node and set the --experimental-encryption-provider-config parameter
+          to the path of that file:
+          --experimental-encryption-provider-config=</path/to/EncryptionConfig/File>
+        scored: true
+
+      - id: 1.1.35
+        text: "Ensure that the encryption provider is set to aescbc (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
+          choose aescbc as the encryption provider.
+          For example,
+          kind: EncryptionConfig
+          apiVersion: v1
+          resources:
+            - resources:
+              - secrets
+                providers:
+                - aescbc:
+                    keys:
+                    - name: key1
+                      secret: <32-byte base64-encoded secret>
+        scored: true
+
+      - id: 1.1.36
+        text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "EventRateLimit"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set the desired limits in a
+          configuration file. Then, edit the API server pod specification file
+          $apiserverconf and set the below parameters.
+          --enable-admission-plugins=...,EventRateLimit,...
+          --admission-control-config-file=<path/to/configuration/file>
+        scored: true
+
+      - id: 1.1.37a
+        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: nothave
+                value: "AdvancedAuditing=false"
+              set: true
+            - flag: "--feature-gates"
+              set: false
+        remediation: |
+          Follow the Kubernetes documentation and set the desired audit policy in the
+          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+        scored: true
+
+      - id: 1.1.37b
+        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-policy-file"
+              compare:
+                op: eq
+                value: "/etc/kubernetes/audit-policy.yaml"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set the desired audit policy in the
+          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+        scored: true
+
+      - id: 1.1.38
+        text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--request-timeout"
+              set: false
+            - flag: "--request-timeout"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          and set the below parameter as appropriate and if needed. For example,
+          --request-timeout=300s
+        scored: true
+
+      - id: 1.1.39
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--tls-cipher-suites"
+              compare:
+                op: eq
+                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+              set: true
+        remediation: |
+          Edit the API server pod specification file /etc/kubernetes/manifests
+          kube-apiserver.yaml on the master node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        scored: false
+
+  - id: 1.2
+    text: "Scheduler"
+    checks:
+      - id: 1.2.1
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          file on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.2.2
+        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--address"
+              set: false
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          file on the master node and ensure the correct value for the
+          --address parameter.
+        scored: true
+
+  - id: 1.3
+    text: "Controller Manager"
+    checks:
+      - id: 1.3.1
+        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--terminated-pod-gc-threshold"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
+          --terminated-pod-gc-threshold=10
+        scored: true
+
+      - id: 1.3.2
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.3.3
+        text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--use-service-account-credentials"
+              compare:
+                op: noteq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node to set the below parameter.
+          --use-service-account-credentials=true
+        scored: true
+
+      - id: 1.3.4
+        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-private-key-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --service-account-private-
+          key-file parameter to the private key file for service accounts.
+          --service-account-private-key-file=<filename>
+        scored: true
+
+      - id: 1.3.5
+        text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--root-ca-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --root-ca-file parameter to
+          the certificate bundle file.
+          --root-ca-file=<path/to/file>
+        scored: true
+
+      - id: 1.3.6
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: eq
+                value: "RotateKubeletServerCertificate=true"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          controller-manager.yaml on the master node and set the --feature-gates parameter to
+          include RotateKubeletServerCertificate=true.
+          --feature-gates=RotateKubeletServerCertificate=true
+        scored: true
+
+      - id: 1.3.7
+        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--address"
+              set: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          controller-manager.yaml on the master node and ensure the correct value
+          for the --address parameter.
+        scored: true
+
+  - id: 1.4
+    text: "Configuration Files"
+    checks:
+      - id: 1.4.1
+        text: "Ensure that the API server pod specification file permissions are
+        set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $apiserverconf
+        scored: true
+
+      - id: 1.4.2
+        text: "Ensure that the API server pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $apiserverconf
+        scored: true
+
+      - id: 1.4.3
+        text: "Ensure that the controller manager pod specification file
+        permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $controllermanagerconf
+        scored: true
+
+      - id: 1.4.4
+        text: "Ensure that the controller manager pod specification file
+        ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $controllermanagerconf
+        scored: true
+
+      - id: 1.4.5
+        text: "Ensure that the scheduler pod specification file permissions are set
+        to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $schedulerconf
+        scored: true
+
+      - id: 1.4.6
+        text: "Ensure that the scheduler pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $schedulerconf
+        scored: true
+
+      - id: 1.4.7
+        text: "Ensure that the etcd pod specification file permissions are set to
+        644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $etcdconf
+        scored: true
+
+      - id: 1.4.8
+        text: "Ensure that the etcd pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $etcdconf
+        scored: true
+
+      - id: 1.4.9
+        text: "Ensure that the Container Network Interface file permissions are
+        set to 644 or more restrictive (Not Scored)"
+        audit: "stat -c %a <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 <path/to/cni/files>
+        scored: true
+
+      - id: 1.4.10
+        text: "Ensure that the Container Network Interface file ownership is set
+        to root:root (Not Scored)"
+        audit: "stat -c %U:%G <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root <path/to/cni/files>
+        scored: true
+
+      - id: 1.4.11
+        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
+        tests:
+          test_items:
+            - flag: "700"
+              compare:
+                op: eq
+                value: "700"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+          from the below command:
+          ps -ef | grep $etcdbin
+          Run the below command (based on the etcd data directory found above). For example,
+          chmod 700 /var/lib/etcd
+        scored: true
+
+      - id: 1.4.12
+        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
+        tests:
+          test_items:
+            - flag: "etcd:etcd"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+          from the below command:
+          ps -ef | grep $etcdbin
+          Run the below command (based on the etcd data directory found above). For example,
+          chown etcd:etcd /var/lib/etcd
+        scored: true
+
+      - id: 1.4.13
+        text: "Ensure that the admin.conf file permissions are set to 644 or
+        more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.4.14
+        text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.4.15
+        text: "Ensure that the scheduler.conf file permissions are set to 644 or
+        more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.4.16
+        text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chown root:root /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.4.17
+        text: "Ensure that the controller-manager.conf file permissions are set
+        to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
+        scored: true
+
+      - id: 1.4.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
+        scored: true
+
+  - id: 1.5
+    text: "etcd"
+    checks:
+      - id: 1.5.1
+        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--cert-file"
+              set: true
+            - flag: "--key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure TLS encryption.
+          Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameters.
+          --ca-file=</path/to/ca-file>
+          --key-file=</path/to/key-file>
+        scored: true
+
+      - id: 1.5.2
+        text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-cert-auth"
+              compare:
+                op: noteq
+                value: false
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --client-cert-auth="true"
+        scored: true
+
+      - id: 1.5.3
+        text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--auto-tls"
+              set: false
+            - flag: "--auto-tls"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --auto-tls parameter or set it to false.
+            --auto-tls=false
+        scored: true
+
+      - id: 1.5.4
+        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
+        set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--peer-cert-file"
+              set: true
+            - flag: "--peer-key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure peer TLS encryption as appropriate
+          for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameters.
+          --peer-client-file=</path/to/peer-cert-file>
+          --peer-key-file=</path/to/peer-key-file>
+        scored: true
+
+      - id: 1.5.5
+        text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--peer-client-cert-auth"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --peer-client-cert-auth=true
+        scored: true
+
+      - id: 1.5.6
+        text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--peer-auto-tls"
+              set: false
+            - flag: "--peer-auto-tls"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --peer-auto-tls parameter or set it to false.
+          --peer-auto-tls=false
+        scored: true
+
+      - id: 1.5.7
+        text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        type: "manual"
+        tests:
+          test_items:
+            - flag: "--trusted-ca-file"
+              set: true
+        remediation: |
+          [Manual test]
+          Follow the etcd documentation and create a dedicated certificate authority setup for the
+          etcd service.
+          Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameter.
+          --trusted-ca-file=</path/to/ca-file>
+        scored: false
+
+  - id: 1.6
+    text: "General Security Primitives"
+    checks:
+      - id: 1.6.1
+        text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Remove any unneeded clusterrolebindings :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 1.6.2
+        text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the documentation and create namespaces for objects in your deployment as you
+          need them.
+        scored: false
+
+      - id: 1.6.3
+        text: "Create network segmentation using Network Policies (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: false
+
+      - id: 1.6.4
+        text: "Ensure that the seccomp profile is set to docker/default in your pod
+        definitions (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+          would need to enable alpha features in the apiserver by passing "--feature-
+          gates=AllAlpha=true" argument.
+          Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
+          parameter to "--feature-gates=AllAlpha=true"
+          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+          Based on your system, restart the kube-apiserver service. For example:
+          systemctl restart kube-apiserver.service
+          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+          example is as below:
+          apiVersion: v1
+          kind: Pod
+          metadata:
+            name: trustworthy-pod
+            annotations:
+              seccomp.security.alpha.kubernetes.io/pod: docker/default
+          spec:
+            containers:
+              - name: trustworthy-container
+                image: sotrustworthy:latest
+        scored: false
+
+      - id: 1.6.5
+        text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and apply security contexts to your pods. For a
+          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 1.6.6
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and setup image provenance.
+        scored: false
+
+      - id: 1.6.7
+        text: "Configure Network policies as appropriate (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and setup network policies as appropriate.
+          For example, you could create a "default" isolation policy for a Namespace by creating a
+          NetworkPolicy that selects all pods but does not allow any traffic:
+          apiVersion: networking.k8s.io/v1
+          kind: NetworkPolicy
+          metadata:
+            name: default-deny
+          spec:
+            podSelector:
+        scored: false
+
+      - id: 1.6.8
+        text: "Place compensating controls in the form of PSP and RBAC for
+        privileged containers usage (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
+        scored: false
+
+  - id: 1.7
+    text: "PodSecurityPolicies"
+    checks:
+      - id: 1.7.1
+        text: "Do not admit privileged containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.2
+        text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+         [Manual test]
+         Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.3
+        text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.4
+        text: "Do not admit containers wishing to share the host network namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.5
+        text: "Do not admit containers with allowPrivilegeEscalation (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.6
+        text: "Do not admit root containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
+        scored: false
+
+      - id: 1.7.7
+        text: "Do not admit containers with dangerous capabilities (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+        scored: false
diff --git a/cfg/cis-1.3/node.yaml b/cfg/cis-1.3/node.yaml
index be93267..813e86f 100644
--- a/cfg/cis-1.3/node.yaml
+++ b/cfg/cis-1.3/node.yaml
@@ -1,541 +1,541 @@
 ---
 controls:
-version: "1.11"  
+version: "1.11"
 id: "2"
 text: Worker Node Security Configuration
 type: "node"
 groups:
-- id: "2.1"
-  text: Kubelet
-  checks:
-  - id: 2.1.1
-    text: Ensure that the --allow-privileged argument is set to false (Scored)
-    audit: "/bin/ps -fC $kubeletbin "
-    tests:
-      test_items:
-      - flag: --allow-privileged
-        set: true
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --allow-privileged=false
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+  - id: "2.1"
+    text: Kubelet
+    checks:
+      - id: 2.1.1
+        text: Ensure that the --allow-privileged argument is set to false (Scored)
+        audit: "/bin/ps -fC $kubeletbin "
+        tests:
+          test_items:
+            - flag: --allow-privileged
+              set: true
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --allow-privileged=false
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.2
-    text: Ensure that the --anonymous-auth argument is set to false (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --anonymous-auth
-        path: '{.authentication.anonymous.enabled}'
-        set: true
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
-      false .
-      If using executable arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --anonymous-auth=false
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.2
+        text: Ensure that the --anonymous-auth argument is set to false (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --anonymous-auth
+              path: '{.authentication.anonymous.enabled}'
+              set: true
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+          false .
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --anonymous-auth=false
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.3
-    text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --authorization-mode
-        path: '{.authorization.mode}'
-        set: true
-        compare:
-          op: nothave
-          value: AlwaysAllow
-    remediation: |
-      If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
-      If using executable arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_AUTHZ_ARGS variable.
-      --authorization-mode=Webhook
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.3
+        text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --authorization-mode
+              path: '{.authorization.mode}'
+              set: true
+              compare:
+                op: nothave
+                value: AlwaysAllow
+        remediation: |
+          If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --authorization-mode=Webhook
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.4
-    text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --client-ca-file
-        path: '{.authentication.x509.clientCAFile}'
-        set: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
-      the location of the client CA file.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_AUTHZ_ARGS variable.
-      --client-ca-file=<path/to/client-ca-file>
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.4
+        text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --client-ca-file
+              path: '{.authentication.x509.clientCAFile}'
+              set: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
+          the location of the client CA file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --client-ca-file=<path/to/client-ca-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.5
-    text: Ensure that the --read-only-port argument is set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --read-only-port
-        path: '{.readOnlyPort}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --read-only-port=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.5
+        text: Ensure that the --read-only-port argument is set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --read-only-port
+              path: '{.readOnlyPort}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --read-only-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.6
-    text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: true
-        compare:
-          op: noteq
-          value: 0
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
-      value other than 0.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --streaming-connection-idle-timeout=5m
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.6
+        text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: true
+              compare:
+                op: noteq
+                value: 0
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
+          value other than 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --streaming-connection-idle-timeout=5m
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.7
-    text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --protect-kernel-defaults
-        path: '{.protectKernelDefaults}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --protect-kernel-defaults=true
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.7
+        text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --protect-kernel-defaults
+              path: '{.protectKernelDefaults}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --protect-kernel-defaults=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.8
-    text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      remove the --make-iptables-util-chains argument from the
-      KUBELET_SYSTEM_PODS_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.8
+        text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove the --make-iptables-util-chains argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.9
-    text: Ensure that the --hostname-override argument is not set (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --hostname-override
-        path: '{.hostnameOverride}'
-        set: false
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and remove the --hostname-override argument from the
-      KUBELET_SYSTEM_PODS_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.9
+        text: Ensure that the --hostname-override argument is not set (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --hostname-override
+              path: '{.hostnameOverride}'
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and remove the --hostname-override argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.10
-    text: Ensure that the --event-qps argument is set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --event-qps
-        path: '{.eventRecordQPS}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --event-qps=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.10
+        text: Ensure that the --event-qps argument is set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --event-qps
+              path: '{.eventRecordQPS}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --event-qps=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.11
-    text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cert-file
-        path: '{.tlsCertFile}'
-        set: true
-      - flag: --tls-private-key-file
-        path: '{.tlsPrivateKeyFile}'
-        set: true
-      bin_op: and
-    remediation: |
-      If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
-      file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
-      corresponding private key file.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
-      --tls-cert-file=<path/to/tls-certificate-file>
-      file=<path/to/tls-key-file>
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.11
+        text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cert-file
+              path: '{.tlsCertFile}'
+              set: true
+            - flag: --tls-private-key-file
+              path: '{.tlsPrivateKeyFile}'
+              set: true
+          bin_op: and
+        remediation: |
+          If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
+          file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
+          corresponding private key file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          file=<path/to/tls-key-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.12
-    text: Ensure that the --cadvisor-port argument is set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --cadvisor-port
-        path: '{.cadvisorPort}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-      - flag: --cadvisor-port
-        path: '{.cadvisorPort}'
-        set: false
-      bin_op: or
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
-      --cadvisor-port=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.12
+        text: Ensure that the --cadvisor-port argument is set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --cadvisor-port
+              path: '{.cadvisorPort}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+            - flag: --cadvisor-port
+              path: '{.cadvisorPort}'
+              set: false
+          bin_op: or
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
+          --cadvisor-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.13
-    text: Ensure that the --rotate-certificates argument is not set to false (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
-      If using command line arguments, edit the kubelet service file $kubeletsvc
-      on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.13
+        text: Ensure that the --rotate-certificates argument is not set to false (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
+          If using command line arguments, edit the kubelet service file $kubeletsvc
+          on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.14
-    text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: RotateKubeletServerCertificate
-        path: '{.featureGates.RotateKubeletServerCertificate}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
-      --feature-gates=RotateKubeletServerCertificate=true
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.14
+        text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              path: '{.featureGates.RotateKubeletServerCertificate}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+          --feature-gates=RotateKubeletServerCertificate=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.15
-    text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cipher-suites
-        path: '{.tlsCipherSuites}'
-        set: true
-        compare:
-          op: valid_elements
-          value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    remediation: |
-      If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-      If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    scored: false
+      - id: 2.1.15
+        text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cipher-suites
+              path: '{.tlsCipherSuites}'
+              set: true
+              compare:
+                op: valid_elements
+                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        remediation: |
+          If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        scored: false
 
-- id: "2.2"
-  text: Configuration Files
-  checks:
-  - id: 2.2.1
-    text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $kubeletkubeconfig
-    scored: true
+  - id: "2.2"
+    text: Configuration Files
+    checks:
+      - id: 2.2.1
+        text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $kubeletkubeconfig
+        scored: true
 
-  - id: 2.2.2
-    text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $kubeletkubeconfig
-    scored: true
+      - id: 2.2.2
+        text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $kubeletkubeconfig
+        scored: true
 
-  - id: 2.2.3
-    text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $kubeletsvc
-    scored: true
+      - id: 2.2.3
+        text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $kubeletsvc
+        scored: true
 
-  - id: 2.2.4
-    text: Ensure that the kubelet service file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $kubeletsvc
-    scored: true
+      - id: 2.2.4
+        text: Ensure that the kubelet service file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $kubeletsvc
+        scored: true
 
-  - id: 2.2.5
-    text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $proxykubeconfig
-    scored: true
+      - id: 2.2.5
+        text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $proxykubeconfig
+        scored: true
 
-  - id: 2.2.6
-    text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $proxykubeconfig
-    scored: true
+      - id: 2.2.6
+        text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $proxykubeconfig
+        scored: true
 
-  - id: 2.2.7
-    text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
-    type: manual
-    remediation: |
-      Run the following command to modify the file permissions of the --client-ca-file
-      chmod 644 <filename>
-    scored: true
+      - id: 2.2.7
+        text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
+        type: manual
+        remediation: |
+          Run the following command to modify the file permissions of the --client-ca-file
+          chmod 644 <filename>
+        scored: true
 
-  - id: 2.2.8
-    text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the following command to modify the ownership of the --client-ca-file .
-      chown root:root <filename>
-    scored: true
+      - id: 2.2.8
+        text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the following command to modify the ownership of the --client-ca-file .
+          chown root:root <filename>
+        scored: true
 
-  - id: 2.2.9
-    text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step)
-      chown root:root $kubeletconf
-    scored: true
-    
-  - id: 2.2.10
-    text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step)
-      chmod 644 $kubeletconf
-    scored: true
+      - id: 2.2.9
+        text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chown root:root $kubeletconf
+        scored: true
+
+      - id: 2.2.10
+        text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chmod 644 $kubeletconf
+        scored: true
diff --git a/cfg/cis-1.4/config.yaml b/cfg/cis-1.4/config.yaml
index 4d9b1b8..b783945 100644
--- a/cfg/cis-1.4/config.yaml
+++ b/cfg/cis-1.4/config.yaml
@@ -1,2 +1,2 @@
----
-## Version-specific settings that override the values in cfg/config.yaml
\ No newline at end of file
+---
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.4/master.yaml b/cfg/cis-1.4/master.yaml
index dd3389d..af9f954 100644
--- a/cfg/cis-1.4/master.yaml
+++ b/cfg/cis-1.4/master.yaml
@@ -1,1549 +1,1549 @@
----
-controls:
-version: 1.13
-id: 1
-text: "Master Node Security Configuration"
-type: "master"
-groups:
-- id: 1.1
-  text: "API Server"
-  checks:
-  - id: 1.1.1
-    text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--anonymous-auth"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --anonymous-auth=false
-    scored: false
-
-  - id: 1.1.2
-    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--basic-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then,
-      edit the API server pod specification file $apiserverconf
-      on the master node and remove the --basic-auth-file=<filename>
-      parameter.
-    scored: true
-
-  - id: 1.1.3
-    text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag:  "--insecure-allow-any-token"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --insecure-allow-any-token
-      parameter.
-    scored: true
-
-  - id: 1.1.4
-    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--kubelet-https"
-        compare:
-          op: eq
-          value: true
-        set: true
-      - flag: "--kubelet-https"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --kubelet-https parameter.
-    scored: true
-
-  - id: 1.1.5
-    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-bind-address"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and remove the --insecure-bind-address
-      parameter.
-    scored: true
-
-  - id: 1.1.6
-    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-port"
-        compare:
-          op: eq
-          value: 0
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      apiserver.yaml on the master node and set the below parameter.
-      --insecure-port=0
-    scored: true
-
-  - id: 1.1.7
-    text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-        - flag:  "--secure-port"
-          compare:
-            op: gt
-            value: 0
-          set: true
-        - flag: "--secure-port"
-          set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and either remove the --secure-port parameter or
-      set it to a different (non-zero) desired port.
-    scored: true
-
-  - id: 1.1.8
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.1.9
-    text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--repair-malformed-updates"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --repair-malformed-updates=false
-    scored: true
-
-  - id: 1.1.10
-    text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: nothave
-          value: AlwaysAdmit
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that does not include AlwaysAdmit.
-    scored: true
-
-  - id: 1.1.11
-    text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "AlwaysPullImages"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins to
-      include AlwaysPullImages.
-      --enable-admission-plugins=...,AlwaysPullImages,...
-    scored: true
-
-  - id: 1.1.12
-    text: "[DEPRECATED] Ensure that the admission control plugin DenyEscalatingExec is set (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    type: "skip"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "DenyEscalatingExec"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes DenyEscalatingExec.
-      --enable-admission-plugins=...,DenyEscalatingExec,...
-    scored: false
-
-  - id: 1.1.13
-    text: "Ensure that the admission control plugin SecurityContextDeny is set (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "SecurityContextDeny"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to
-      include SecurityContextDeny.
-      --enable-admission-plugins=...,SecurityContextDeny,...
-    scored: false
-
-  - id: 1.1.14
-    text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--disable-admission-plugins"
-        compare:
-          op: nothave
-          value: "NamespaceLifecycle"
-        set: true
-      - flag: "--disable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --disable-admission-plugins parameter to
-      ensure it does not include NamespaceLifecycle.
-      --disable-admission-plugins=...,NamespaceLifecycle,...
-    scored: true
-
-  - id: 1.1.15
-    text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-path"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-path parameter to a suitable
-      path and file where you would like audit logs to be written, for example:
-      --audit-log-path=/var/log/apiserver/audit.log
-    scored: true
-
-  - id: 1.1.16
-    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxage"
-        compare:
-          op: gte
-          value: 30
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxage parameter to 30 or
-      as an appropriate number of days: --audit-log-maxage=30
-    scored: true
-
-  - id: 1.1.17
-    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxbackup"
-        compare:
-          op: gte
-          value: 10
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxbackup parameter to 10
-      or to an appropriate value.
-      --audit-log-maxbackup=10
-    scored: true
-
-  - id: 1.1.18
-    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxsize"
-        compare:
-          op: gte
-          value: 100
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --audit-log-maxsize parameter to an
-      appropriate size in MB. For example, to set it as 100 MB:
-      --audit-log-maxsize=100
-    scored: true
-
-  - id: 1.1.19
-    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: nothave
-          value: "AlwaysAllow"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --authorization-mode parameter to
-      values other than AlwaysAllow. One such example could be as below.
-      --authorization-mode=RBAC
-    scored: true
-
-  - id: 1.1.20
-    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--token-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then,
-      edit the API server pod specification file $apiserverconf
-      on the master node and remove the --token-auth-file=<filename>
-      parameter.
-    scored: true
-
-  - id: 1.1.21
-    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--kubelet-certificate-authority"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and setup the TLS connection between the
-      apiserver and kubelets. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the --kubelet-certificate-authority
-      parameter to the path to the cert file for the certificate authority.
-      --kubelet-certificate-authority=<ca-string>
-    scored: true
-
-  - id: 1.1.22
-    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--kubelet-client-certificate"
-        set: true
-      - flag: "--kubelet-client-key"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and kubelets. Then, edit API server pod specification file
-      $apiserverconf on the master node and set the
-      kubelet client certificate and key parameters as below.
-      --kubelet-client-certificate=<path/to/client-certificate-file>
-      --kubelet-client-key=<path/to/client-key-file>
-    scored: true
-
-  - id: 1.1.23
-    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--service-account-lookup"
-        set: false
-      - flag: "--service-account-lookup"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --service-account-lookup=true
-    scored: true
-
-  - id: 1.1.24
-    text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "PodSecurityPolicy"
-        set: true
-    remediation: |
-      Follow the documentation and create Pod Security Policy objects as per your environment.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes PodSecurityPolicy :
-      --enable-admission-plugins=...,PodSecurityPolicy,...
-      Then restart the API Server.
-    scored: true
-
-  - id: 1.1.25
-    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-key-file"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --service-account-key-file parameter
-      to the public key file for service accounts:
-      --service-account-key-file=<filename>
-    scored: true
-
-  - id: 1.1.26
-    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
-      appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--etcd-certfile"
-        set: true
-      - flag: "--etcd-keyfile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and etcd. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the etcd
-      certificate and key file parameters.
-      --etcd-certfile=<path/to/client-certificate-file>
-      --etcd-keyfile=<path/to/client-key-file>
-    scored: true
-
-  - id: 1.1.27
-    text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "ServiceAccount"
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Follow the documentation and create ServiceAccount objects as per your environment.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes ServiceAccount.
-      --enable-admission-plugins=...,ServiceAccount,...
-    scored: true
-
-  - id: 1.1.28
-    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
-    as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--tls-cert-file"
-        set: true
-      - flag: "--tls-private-key-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the TLS certificate and private key file
-      parameters.
-      --tls-cert-file=<path/to/tls-certificate-file>
-      --tls-private-key-file=<path/to/tls-key-file>
-    scored: true
-
-  - id: 1.1.29
-    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-ca-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
-      Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the client certificate authority file.
-      --client-ca-file=<path/to/client-ca-file>
-    scored: true
-
-  - id: 1.1.30
-    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--tls-cipher-suites"
-        compare:
-          op: has
-          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the below parameter.
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    scored: false
-
-  - id: 1.1.31
-    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--etcd-cafile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the
-      apiserver and etcd. Then, edit the API server pod specification file
-      $apiserverconf on the master node and set the etcd
-      certificate authority file parameter.
-      --etcd-cafile=<path/to/ca-file>
-    scored: true
-
-  - id: 1.1.32
-    text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: has
-          value: "Node"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      on the master node and set the --authorization-mode parameter to a
-      value that includes Node.
-      --authorization-mode=Node,RBAC
-    scored: true
-
-  - id: 1.1.33
-    text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "NodeRestriction"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and configure NodeRestriction plug-in on
-      kubelets. Then, edit the API server pod specification file $apiserverconf
-      on the master node and set the --enable-admission-plugins parameter to a
-      value that includes NodeRestriction.
-      --enable-admission-plugins=...,NodeRestriction,...
-    scored: true
-
-  - id: 1.1.34
-    text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--encryption-provider-config"
-        set: true
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and configure a EncryptionConfig file.
-      Then, edit the API server pod specification file $apiserverconf on the
-      master node and set the --encryption-provider-config parameter
-      to the path of that file:
-      --encryption-provider-config=</path/to/EncryptionConfig/File>
-    scored: true
-
-  - id: 1.1.35
-    text: "Ensure that the encryption provider is set to aescbc (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
-      choose aescbc as the encryption provider.
-      For example,
-      kind: EncryptionConfig
-      apiVersion: v1
-      resources:
-        - resources:
-          - secrets
-            providers:
-            - aescbc:
-                keys:
-                - name: key1
-                  secret: <32-byte base64-encoded secret>
-    scored: true
-
-  - id: 1.1.36
-    text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "EventRateLimit"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set the desired limits in a
-      configuration file. Then, edit the API server pod specification file
-      $apiserverconf and set the below parameters.
-      --enable-admission-plugins=...,EventRateLimit,...
-      --admission-control-config-file=<path/to/configuration/file>
-    scored: true
-
-  - id: 1.1.37a
-    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--feature-gates"
-        compare:
-          op: nothave
-          value: "AdvancedAuditing=false"
-        set: true
-      - flag: "--feature-gates"
-        set: false
-    remediation: |
-      Follow the Kubernetes documentation and set the desired audit policy in the
-      /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-      and set the below parameters.
-      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-    scored: true
-
-  - id: 1.1.37b
-    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-policy-file"
-        compare:
-          op: eq
-          value: "/etc/kubernetes/audit-policy.yaml"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set the desired audit policy in the
-      /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
-      and set the below parameters.
-      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
-    scored: true
-
-  - id: 1.1.38
-    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--request-timeout"
-        set: false
-      - flag: "--request-timeout"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf
-      and set the below parameter as appropriate and if needed. For example,
-      --request-timeout=300s
-    scored: true
-
-  - id: 1.1.39
-    text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: has
-          value: "RBAC"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC
-    scored: true
-
-- id: 1.2
-  text: "Scheduler"
-  checks:
-  - id: 1.2.1
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf
-      file on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.2.2
-    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--address"
-        set: false
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf
-      file on the master node and ensure the correct value for the
-      --address parameter.
-    scored: true
-
-- id: 1.3
-  text: "Controller Manager"
-  checks:
-  - id: 1.3.1
-    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--terminated-pod-gc-threshold"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
-      --terminated-pod-gc-threshold=10
-    scored: true
-
-  - id: 1.3.2
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the below parameter.
-      --profiling=false
-    scored: true
-
-  - id: 1.3.3
-    text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--use-service-account-credentials"
-        compare:
-          op: noteq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node to set the below parameter.
-      --use-service-account-credentials=true
-    scored: true
-
-  - id: 1.3.4
-    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-private-key-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --service-account-private-
-      key-file parameter to the private key file for service accounts.
-      --service-account-private-key-file=<filename>
-    scored: true
-
-  - id: 1.3.5
-    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--root-ca-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      on the master node and set the --root-ca-file parameter to
-      the certificate bundle file.
-      --root-ca-file=<path/to/file>
-    scored: true
-
-  - id: 1.3.6
-    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--feature-gates"
-        compare:
-          op: eq
-          value: "RotateKubeletServerCertificate=true"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      controller-manager.yaml on the master node and set the --feature-gates parameter to
-      include RotateKubeletServerCertificate=true.
-      --feature-gates=RotateKubeletServerCertificate=true
-    scored: true
-
-  - id: 1.3.7
-    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--address"
-        set: false
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf
-      controller-manager.yaml on the master node and ensure the correct value
-      for the --address parameter.
-    scored: true
-
-- id: 1.4
-  text: "Configuration Files"
-  checks:
-  - id: 1.4.1
-    text: "Ensure that the API server pod specification file permissions are
-    set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $apiserverconf
-    scored: true
-
-  - id: 1.4.2
-    text: "Ensure that the API server pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $apiserverconf
-    scored: true
-
-  - id: 1.4.3
-    text: "Ensure that the controller manager pod specification file
-    permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $controllermanagerconf
-    scored: true
-
-  - id: 1.4.4
-    text: "Ensure that the controller manager pod specification file
-    ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $controllermanagerconf
-    scored: true
-
-  - id: 1.4.5
-    text: "Ensure that the scheduler pod specification file permissions are set
-    to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $schedulerconf
-    scored: true
-
-  - id: 1.4.6
-    text: "Ensure that the scheduler pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: "root:root"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $schedulerconf
-    scored: true
-
-  - id: 1.4.7
-    text: "Ensure that the etcd pod specification file permissions are set to
-    644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 $etcdconf
-    scored: true
-
-  - id: 1.4.8
-    text: "Ensure that the etcd pod specification file ownership is set to
-    root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root $etcdconf
-    scored: true
-
-  - id: 1.4.9
-    text: "Ensure that the Container Network Interface file permissions are
-    set to 644 or more restrictive (Not Scored)"
-    audit: "stat -c %a <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 <path/to/cni/files>
-    scored: true
-
-  - id: 1.4.10
-    text: "Ensure that the Container Network Interface file ownership is set
-    to root:root (Not Scored)"
-    audit: "stat -c %U:%G <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root <path/to/cni/files>
-    scored: true
-
-  - id: 1.4.11
-    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
-    tests:
-      test_items:
-      - flag: "700"
-        compare:
-          op: eq
-          value: "700"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-      from the below command:
-      ps -ef | grep $etcdbin
-      Run the below command (based on the etcd data directory found above). For example,
-      chmod 700 /var/lib/etcd
-    scored: true
-
-  - id: 1.4.12
-    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
-    tests:
-      test_items:
-      - flag: "etcd:etcd"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-      from the below command:
-      ps -ef | grep $etcdbin
-      Run the below command (based on the etcd data directory found above). For example,
-      chown etcd:etcd /var/lib/etcd
-    scored: true
-
-  - id: 1.4.13
-    text: "Ensure that the admin.conf file permissions are set to 644 or
-    more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chmod 644 /etc/kubernetes/admin.conf
-    scored: true
-
-  - id: 1.4.14
-    text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example,
-      chown root:root /etc/kubernetes/admin.conf
-    scored: true
-
-  - id: 1.4.15
-    text: "Ensure that the scheduler.conf file permissions are set to 644 or
-    more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
-    scored: true
-
-  - id: 1.4.16
-    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chown root:root /etc/kubernetes/scheduler.conf
-    scored: true
-
-  - id: 1.4.17
-    text: "Ensure that the controller-manager.conf file permissions are set
-    to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
-    scored: true
-
-  - id: 1.4.18
-    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
-    scored: true
-  
-  - id: 1.4.19
-    text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
-    audit: "ls -laR /etc/kubernetes/pki/"
-    type: "manual"
-    tests:
-      test_items:
-      - flag: "root root"
-        compare:
-          op: eq
-          value: "root root"
-        set: true
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example, chown -R root:root /etc/kubernetes/pki/
-    scored: true
-    
-  - id: 1.4.20
-    text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)"
-    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
-    type: "manual"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example, chmod -R 644 /etc/kubernetes/pki/*.crt
-    scored: true
-    
-  - id: 1.4.21
-    text: "Ensure that the Kubernetes PKI key file permissions are set to 600 or more restrictive (Scored)"
-    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
-    type: "manual"
-    tests:
-      test_items:
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      [Manual test]
-      Run the below command (based on the file location on your system) on the master node.
-      For example, chmod -R 600 /etc/kubernetes/pki/*.key
-    scored: true
-
-- id: 1.5
-  text: "etcd"
-  checks:
-  - id: 1.5.1
-    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--cert-file"
-        set: true
-      - flag:  "--key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure TLS encryption.
-      Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameters.
-      --ca-file=</path/to/ca-file>
-      --key-file=</path/to/key-file>
-    scored: true
-
-  - id: 1.5.2
-    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-cert-auth"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --client-cert-auth="true"
-    scored: true
-
-  - id: 1.5.3
-    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--auto-tls"
-        set: false
-      - flag: "--auto-tls"
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --auto-tls parameter or set it to false.
-        --auto-tls=false
-    scored: true
-
-  - id: 1.5.4
-    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
-    set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--peer-cert-file"
-        set: true
-      - flag: "--peer-key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure peer TLS encryption as appropriate
-      for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameters.
-      --peer-client-file=</path/to/peer-cert-file>
-      --peer-key-file=</path/to/peer-key-file>
-    scored: true
-
-  - id: 1.5.5
-    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--peer-client-cert-auth"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --peer-client-cert-auth=true
-    scored: true
-
-  - id: 1.5.6
-    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--peer-auto-tls"
-        set: false
-      - flag: "--peer-auto-tls"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --peer-auto-tls parameter or set it to false.
-      --peer-auto-tls=false
-    scored: true
-
-  - id: 1.5.7
-    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
-    audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
-    type: "manual"
-    tests:
-      test_items:
-      - flag: "--trusted-ca-file"
-        set: true
-    remediation: |
-      [Manual test]
-      Follow the etcd documentation and create a dedicated certificate authority setup for the
-      etcd service.
-      Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameter.
-      --trusted-ca-file=</path/to/ca-file>
-    scored: false
-
-- id: 1.6
-  text: "General Security Primitives"
-  checks:
-  - id: 1.6.1
-    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Remove any unneeded clusterrolebindings :
-      kubectl delete clusterrolebinding [name]
-    scored: false
-
-  - id: 1.6.2
-    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the documentation and create namespaces for objects in your deployment as you
-      need them.
-    scored: false
-
-  - id: 1.6.3
-    text: "Create network segmentation using Network Policies (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the documentation and create NetworkPolicy objects as you need them.
-    scored: false
-
-  - id: 1.6.4
-    text: "Ensure that the seccomp profile is set to docker/default in your pod
-    definitions (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
-      would need to enable alpha features in the apiserver by passing "--feature-
-      gates=AllAlpha=true" argument.
-      Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
-      parameter to "--feature-gates=AllAlpha=true"
-      KUBE_API_ARGS="--feature-gates=AllAlpha=true"
-      Based on your system, restart the kube-apiserver service. For example:
-      systemctl restart kube-apiserver.service
-      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
-      example is as below:
-      apiVersion: v1
-      kind: Pod
-      metadata:
-        name: trustworthy-pod
-        annotations:
-          seccomp.security.alpha.kubernetes.io/pod: docker/default
-      spec:
-        containers:
-          - name: trustworthy-container
-            image: sotrustworthy:latest
-    scored: false
-
-  - id: 1.6.5
-    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and apply security contexts to your pods. For a
-      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
-      Containers.
-    scored: false
-
-  - id: 1.6.6
-    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and setup image provenance.
-    scored: false
-
-  - id: 1.6.7
-    text: "Configure Network policies as appropriate (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow the Kubernetes documentation and setup network policies as appropriate.
-      For example, you could create a "default" isolation policy for a Namespace by creating a
-      NetworkPolicy that selects all pods but does not allow any traffic:
-      apiVersion: networking.k8s.io/v1
-      kind: NetworkPolicy
-      metadata:
-        name: default-deny
-      spec:
-        podSelector:
-    scored: false
-
-  - id: 1.6.8
-    text: "Place compensating controls in the form of PSP and RBAC for
-    privileged containers usage (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
-    scored: false
-
-- id: 1.7
-  text: "PodSecurityPolicies"
-  checks:
-  - id: 1.7.1
-    text: "Do not admit privileged containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.2
-    text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-     [Manual test]
-     Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.3
-    text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.4
-    text: "Do not admit containers wishing to share the host network namespace (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.5
-    text: " Do not admit containers with allowPrivilegeEscalation (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
-    scored: false
-
-  - id: 1.7.6
-    text: "Do not admit root containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
-    scored: false
-
-  - id: 1.7.7
-    text: "Do not admit containers with dangerous capabilities (Not Scored)"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
-    scored: false
+---
+controls:
+version: 1.13
+id: 1
+text: "Master Node Security Configuration"
+type: "master"
+groups:
+  - id: 1.1
+    text: "API Server"
+    checks:
+      - id: 1.1.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --anonymous-auth=false
+        scored: false
+
+      - id: 1.1.2
+        text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--basic-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --basic-auth-file=<filename>
+          parameter.
+        scored: true
+
+      - id: 1.1.3
+        text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-allow-any-token"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --insecure-allow-any-token
+          parameter.
+        scored: true
+
+      - id: 1.1.4
+        text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--kubelet-https"
+              compare:
+                op: eq
+                value: true
+              set: true
+            - flag: "--kubelet-https"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --kubelet-https parameter.
+        scored: true
+
+      - id: 1.1.5
+        text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-bind-address"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --insecure-bind-address
+          parameter.
+        scored: true
+
+      - id: 1.1.6
+        text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-port"
+              compare:
+                op: eq
+                value: 0
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          apiserver.yaml on the master node and set the below parameter.
+          --insecure-port=0
+        scored: true
+
+      - id: 1.1.7
+        text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--secure-port"
+              compare:
+                op: gt
+                value: 0
+              set: true
+            - flag: "--secure-port"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and either remove the --secure-port parameter or
+          set it to a different (non-zero) desired port.
+        scored: true
+
+      - id: 1.1.8
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.1.9
+        text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--repair-malformed-updates"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --repair-malformed-updates=false
+        scored: true
+
+      - id: 1.1.10
+        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: nothave
+                value: AlwaysAdmit
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that does not include AlwaysAdmit.
+        scored: true
+
+      - id: 1.1.11
+        text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "AlwaysPullImages"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins to
+          include AlwaysPullImages.
+          --enable-admission-plugins=...,AlwaysPullImages,...
+        scored: true
+
+      - id: 1.1.12
+        text: "[DEPRECATED] Ensure that the admission control plugin DenyEscalatingExec is set (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        type: "skip"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "DenyEscalatingExec"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes DenyEscalatingExec.
+          --enable-admission-plugins=...,DenyEscalatingExec,...
+        scored: false
+
+      - id: 1.1.13
+        text: "Ensure that the admission control plugin SecurityContextDeny is set (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "SecurityContextDeny"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to
+          include SecurityContextDeny.
+          --enable-admission-plugins=...,SecurityContextDeny,...
+        scored: false
+
+      - id: 1.1.14
+        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--disable-admission-plugins"
+              compare:
+                op: nothave
+                value: "NamespaceLifecycle"
+              set: true
+            - flag: "--disable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --disable-admission-plugins parameter to
+          ensure it does not include NamespaceLifecycle.
+          --disable-admission-plugins=...,NamespaceLifecycle,...
+        scored: true
+
+      - id: 1.1.15
+        text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-path"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-path parameter to a suitable
+          path and file where you would like audit logs to be written, for example:
+          --audit-log-path=/var/log/apiserver/audit.log
+        scored: true
+
+      - id: 1.1.16
+        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxage"
+              compare:
+                op: gte
+                value: 30
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxage parameter to 30 or
+          as an appropriate number of days: --audit-log-maxage=30
+        scored: true
+
+      - id: 1.1.17
+        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxbackup"
+              compare:
+                op: gte
+                value: 10
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxbackup parameter to 10
+          or to an appropriate value.
+          --audit-log-maxbackup=10
+        scored: true
+
+      - id: 1.1.18
+        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxsize"
+              compare:
+                op: gte
+                value: 100
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxsize parameter to an
+          appropriate size in MB. For example, to set it as 100 MB:
+          --audit-log-maxsize=100
+        scored: true
+
+      - id: 1.1.19
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: nothave
+                value: "AlwaysAllow"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to
+          values other than AlwaysAllow. One such example could be as below.
+          --authorization-mode=RBAC
+        scored: true
+
+      - id: 1.1.20
+        text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--token-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --token-auth-file=<filename>
+          parameter.
+        scored: true
+
+      - id: 1.1.21
+        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--kubelet-certificate-authority"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and setup the TLS connection between the
+          apiserver and kubelets. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the --kubelet-certificate-authority
+          parameter to the path to the cert file for the certificate authority.
+          --kubelet-certificate-authority=<ca-string>
+        scored: true
+
+      - id: 1.1.22
+        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--kubelet-client-certificate"
+              set: true
+            - flag: "--kubelet-client-key"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and kubelets. Then, edit API server pod specification file
+          $apiserverconf on the master node and set the
+          kubelet client certificate and key parameters as below.
+          --kubelet-client-certificate=<path/to/client-certificate-file>
+          --kubelet-client-key=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.1.23
+        text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--service-account-lookup"
+              set: false
+            - flag: "--service-account-lookup"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --service-account-lookup=true
+        scored: true
+
+      - id: 1.1.24
+        text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "PodSecurityPolicy"
+              set: true
+        remediation: |
+          Follow the documentation and create Pod Security Policy objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes PodSecurityPolicy :
+          --enable-admission-plugins=...,PodSecurityPolicy,...
+          Then restart the API Server.
+        scored: true
+
+      - id: 1.1.25
+        text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-key-file"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --service-account-key-file parameter
+          to the public key file for service accounts:
+          --service-account-key-file=<filename>
+        scored: true
+
+      - id: 1.1.26
+        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
+          appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--etcd-certfile"
+              set: true
+            - flag: "--etcd-keyfile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and etcd. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the etcd
+          certificate and key file parameters.
+          --etcd-certfile=<path/to/client-certificate-file>
+          --etcd-keyfile=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.1.27
+        text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "ServiceAccount"
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Follow the documentation and create ServiceAccount objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes ServiceAccount.
+          --enable-admission-plugins=...,ServiceAccount,...
+        scored: true
+
+      - id: 1.1.28
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
+        as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--tls-cert-file"
+              set: true
+            - flag: "--tls-private-key-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the TLS certificate and private key file
+          parameters.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+        scored: true
+
+      - id: 1.1.29
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-ca-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the client certificate authority file.
+          --client-ca-file=<path/to/client-ca-file>
+        scored: true
+
+      - id: 1.1.30
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--tls-cipher-suites"
+              compare:
+                op: has
+                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        scored: false
+
+      - id: 1.1.31
+        text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--etcd-cafile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and etcd. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the etcd
+          certificate authority file parameter.
+          --etcd-cafile=<path/to/ca-file>
+        scored: true
+
+      - id: 1.1.32
+        text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "Node"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to a
+          value that includes Node.
+          --authorization-mode=Node,RBAC
+        scored: true
+
+      - id: 1.1.33
+        text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "NodeRestriction"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and configure NodeRestriction plug-in on
+          kubelets. Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes NodeRestriction.
+          --enable-admission-plugins=...,NodeRestriction,...
+        scored: true
+
+      - id: 1.1.34
+        text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--encryption-provider-config"
+              set: true
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          Then, edit the API server pod specification file $apiserverconf on the
+          master node and set the --encryption-provider-config parameter
+          to the path of that file:
+          --encryption-provider-config=</path/to/EncryptionConfig/File>
+        scored: true
+
+      - id: 1.1.35
+        text: "Ensure that the encryption provider is set to aescbc (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
+          choose aescbc as the encryption provider.
+          For example,
+          kind: EncryptionConfig
+          apiVersion: v1
+          resources:
+            - resources:
+              - secrets
+                providers:
+                - aescbc:
+                    keys:
+                    - name: key1
+                      secret: <32-byte base64-encoded secret>
+        scored: true
+
+      - id: 1.1.36
+        text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "EventRateLimit"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set the desired limits in a
+          configuration file. Then, edit the API server pod specification file
+          $apiserverconf and set the below parameters.
+          --enable-admission-plugins=...,EventRateLimit,...
+          --admission-control-config-file=<path/to/configuration/file>
+        scored: true
+
+      - id: 1.1.37a
+        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: nothave
+                value: "AdvancedAuditing=false"
+              set: true
+            - flag: "--feature-gates"
+              set: false
+        remediation: |
+          Follow the Kubernetes documentation and set the desired audit policy in the
+          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+        scored: true
+
+      - id: 1.1.37b
+        text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-policy-file"
+              compare:
+                op: eq
+                value: "/etc/kubernetes/audit-policy.yaml"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set the desired audit policy in the
+          /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+        scored: true
+
+      - id: 1.1.38
+        text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--request-timeout"
+              set: false
+            - flag: "--request-timeout"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          and set the below parameter as appropriate and if needed. For example,
+          --request-timeout=300s
+        scored: true
+
+      - id: 1.1.39
+        text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "RBAC"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC
+        scored: true
+
+  - id: 1.2
+    text: "Scheduler"
+    checks:
+      - id: 1.2.1
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          file on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.2.2
+        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--address"
+              set: false
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          file on the master node and ensure the correct value for the
+          --address parameter.
+        scored: true
+
+  - id: 1.3
+    text: "Controller Manager"
+    checks:
+      - id: 1.3.1
+        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--terminated-pod-gc-threshold"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
+          --terminated-pod-gc-threshold=10
+        scored: true
+
+      - id: 1.3.2
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.3.3
+        text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--use-service-account-credentials"
+              compare:
+                op: noteq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node to set the below parameter.
+          --use-service-account-credentials=true
+        scored: true
+
+      - id: 1.3.4
+        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-private-key-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --service-account-private-
+          key-file parameter to the private key file for service accounts.
+          --service-account-private-key-file=<filename>
+        scored: true
+
+      - id: 1.3.5
+        text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--root-ca-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --root-ca-file parameter to
+          the certificate bundle file.
+          --root-ca-file=<path/to/file>
+        scored: true
+
+      - id: 1.3.6
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: eq
+                value: "RotateKubeletServerCertificate=true"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          controller-manager.yaml on the master node and set the --feature-gates parameter to
+          include RotateKubeletServerCertificate=true.
+          --feature-gates=RotateKubeletServerCertificate=true
+        scored: true
+
+      - id: 1.3.7
+        text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--address"
+              set: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          controller-manager.yaml on the master node and ensure the correct value
+          for the --address parameter.
+        scored: true
+
+  - id: 1.4
+    text: "Configuration Files"
+    checks:
+      - id: 1.4.1
+        text: "Ensure that the API server pod specification file permissions are
+        set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $apiserverconf
+        scored: true
+
+      - id: 1.4.2
+        text: "Ensure that the API server pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $apiserverconf
+        scored: true
+
+      - id: 1.4.3
+        text: "Ensure that the controller manager pod specification file
+        permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $controllermanagerconf
+        scored: true
+
+      - id: 1.4.4
+        text: "Ensure that the controller manager pod specification file
+        ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $controllermanagerconf
+        scored: true
+
+      - id: 1.4.5
+        text: "Ensure that the scheduler pod specification file permissions are set
+        to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $schedulerconf
+        scored: true
+
+      - id: 1.4.6
+        text: "Ensure that the scheduler pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $schedulerconf
+        scored: true
+
+      - id: 1.4.7
+        text: "Ensure that the etcd pod specification file permissions are set to
+        644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $etcdconf
+        scored: true
+
+      - id: 1.4.8
+        text: "Ensure that the etcd pod specification file ownership is set to
+        root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $etcdconf
+        scored: true
+
+      - id: 1.4.9
+        text: "Ensure that the Container Network Interface file permissions are
+        set to 644 or more restrictive (Not Scored)"
+        audit: "stat -c %a <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 <path/to/cni/files>
+        scored: true
+
+      - id: 1.4.10
+        text: "Ensure that the Container Network Interface file ownership is set
+        to root:root (Not Scored)"
+        audit: "stat -c %U:%G <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root <path/to/cni/files>
+        scored: true
+
+      - id: 1.4.11
+        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
+        tests:
+          test_items:
+            - flag: "700"
+              compare:
+                op: eq
+                value: "700"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+          from the below command:
+          ps -ef | grep $etcdbin
+          Run the below command (based on the etcd data directory found above). For example,
+          chmod 700 /var/lib/etcd
+        scored: true
+
+      - id: 1.4.12
+        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
+        tests:
+          test_items:
+            - flag: "etcd:etcd"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+          from the below command:
+          ps -ef | grep $etcdbin
+          Run the below command (based on the etcd data directory found above). For example,
+          chown etcd:etcd /var/lib/etcd
+        scored: true
+
+      - id: 1.4.13
+        text: "Ensure that the admin.conf file permissions are set to 644 or
+        more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.4.14
+        text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.4.15
+        text: "Ensure that the scheduler.conf file permissions are set to 644 or
+        more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.4.16
+        text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chown root:root /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.4.17
+        text: "Ensure that the controller-manager.conf file permissions are set
+        to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
+        scored: true
+
+      - id: 1.4.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
+        scored: true
+
+      - id: 1.4.19
+        text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
+        audit: "ls -laR /etc/kubernetes/pki/"
+        type: "manual"
+        tests:
+          test_items:
+            - flag: "root root"
+              compare:
+                op: eq
+                value: "root root"
+              set: true
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example, chown -R root:root /etc/kubernetes/pki/
+        scored: true
+
+      - id: 1.4.20
+        text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)"
+        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
+        type: "manual"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example, chmod -R 644 /etc/kubernetes/pki/*.crt
+        scored: true
+
+      - id: 1.4.21
+        text: "Ensure that the Kubernetes PKI key file permissions are set to 600 or more restrictive (Scored)"
+        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
+        type: "manual"
+        tests:
+          test_items:
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          [Manual test]
+          Run the below command (based on the file location on your system) on the master node.
+          For example, chmod -R 600 /etc/kubernetes/pki/*.key
+        scored: true
+
+  - id: 1.5
+    text: "etcd"
+    checks:
+      - id: 1.5.1
+        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--cert-file"
+              set: true
+            - flag: "--key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure TLS encryption.
+          Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameters.
+          --ca-file=</path/to/ca-file>
+          --key-file=</path/to/key-file>
+        scored: true
+
+      - id: 1.5.2
+        text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-cert-auth"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --client-cert-auth="true"
+        scored: true
+
+      - id: 1.5.3
+        text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--auto-tls"
+              set: false
+            - flag: "--auto-tls"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --auto-tls parameter or set it to false.
+            --auto-tls=false
+        scored: true
+
+      - id: 1.5.4
+        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
+        set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--peer-cert-file"
+              set: true
+            - flag: "--peer-key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure peer TLS encryption as appropriate
+          for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameters.
+          --peer-client-file=</path/to/peer-cert-file>
+          --peer-key-file=</path/to/peer-key-file>
+        scored: true
+
+      - id: 1.5.5
+        text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--peer-client-cert-auth"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --peer-client-cert-auth=true
+        scored: true
+
+      - id: 1.5.6
+        text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--peer-auto-tls"
+              set: false
+            - flag: "--peer-auto-tls"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --peer-auto-tls parameter or set it to false.
+          --peer-auto-tls=false
+        scored: true
+
+      - id: 1.5.7
+        text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+        audit: "/bin/ps -ef | grep $etcdbin | grep -v grep"
+        type: "manual"
+        tests:
+          test_items:
+            - flag: "--trusted-ca-file"
+              set: true
+        remediation: |
+          [Manual test]
+          Follow the etcd documentation and create a dedicated certificate authority setup for the
+          etcd service.
+          Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameter.
+          --trusted-ca-file=</path/to/ca-file>
+        scored: false
+
+  - id: 1.6
+    text: "General Security Primitives"
+    checks:
+      - id: 1.6.1
+        text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Remove any unneeded clusterrolebindings :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 1.6.2
+        text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the documentation and create namespaces for objects in your deployment as you
+          need them.
+        scored: false
+
+      - id: 1.6.3
+        text: "Create network segmentation using Network Policies (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: false
+
+      - id: 1.6.4
+        text: "Ensure that the seccomp profile is set to docker/default in your pod
+        definitions (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+          would need to enable alpha features in the apiserver by passing "--feature-
+          gates=AllAlpha=true" argument.
+          Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
+          parameter to "--feature-gates=AllAlpha=true"
+          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+          Based on your system, restart the kube-apiserver service. For example:
+          systemctl restart kube-apiserver.service
+          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+          example is as below:
+          apiVersion: v1
+          kind: Pod
+          metadata:
+            name: trustworthy-pod
+            annotations:
+              seccomp.security.alpha.kubernetes.io/pod: docker/default
+          spec:
+            containers:
+              - name: trustworthy-container
+                image: sotrustworthy:latest
+        scored: false
+
+      - id: 1.6.5
+        text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and apply security contexts to your pods. For a
+          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 1.6.6
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and setup image provenance.
+        scored: false
+
+      - id: 1.6.7
+        text: "Configure Network policies as appropriate (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow the Kubernetes documentation and setup network policies as appropriate.
+          For example, you could create a "default" isolation policy for a Namespace by creating a
+          NetworkPolicy that selects all pods but does not allow any traffic:
+          apiVersion: networking.k8s.io/v1
+          kind: NetworkPolicy
+          metadata:
+            name: default-deny
+          spec:
+            podSelector:
+        scored: false
+
+      - id: 1.6.8
+        text: "Place compensating controls in the form of PSP and RBAC for
+        privileged containers usage (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
+        scored: false
+
+  - id: 1.7
+    text: "PodSecurityPolicies"
+    checks:
+      - id: 1.7.1
+        text: "Do not admit privileged containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.2
+        text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+         [Manual test]
+         Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.3
+        text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.4
+        text: "Do not admit containers wishing to share the host network namespace (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.5
+        text: " Do not admit containers with allowPrivilegeEscalation (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
+        scored: false
+
+      - id: 1.7.6
+        text: "Do not admit root containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
+        scored: false
+
+      - id: 1.7.7
+        text: "Do not admit containers with dangerous capabilities (Not Scored)"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+        scored: false
diff --git a/cfg/cis-1.4/node.yaml b/cfg/cis-1.4/node.yaml
index 7ef93b3..f600a99 100644
--- a/cfg/cis-1.4/node.yaml
+++ b/cfg/cis-1.4/node.yaml
@@ -5,538 +5,538 @@ id: "2"
 text: Worker Node Security Configuration
 type: "node"
 groups:
-- id: "2.1"
-  text: Kubelet
-  checks:
-  - id: 2.1.1
-    text: Ensure that the --anonymous-auth argument is set to false (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: "--anonymous-auth"
-        path: '{.authentication.anonymous.enabled}'
-        set: true
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
-      false .
-      If using executable arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --anonymous-auth=false
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+  - id: "2.1"
+    text: Kubelet
+    checks:
+      - id: 2.1.1
+        text: Ensure that the --anonymous-auth argument is set to false (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              path: '{.authentication.anonymous.enabled}'
+              set: true
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+          false .
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --anonymous-auth=false
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.2
-    text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --authorization-mode
-        path: '{.authorization.mode}'
-        set: true
-        compare:
-          op: nothave
-          value: AlwaysAllow
-    remediation: |
-      If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
-      If using executable arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_AUTHZ_ARGS variable.
-      --authorization-mode=Webhook
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.2
+        text: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --authorization-mode
+              path: '{.authorization.mode}'
+              set: true
+              compare:
+                op: nothave
+                value: AlwaysAllow
+        remediation: |
+          If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --authorization-mode=Webhook
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.3
-    text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --client-ca-file
-        path: '{.authentication.x509.clientCAFile}'
-        set: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
-      the location of the client CA file.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_AUTHZ_ARGS variable.
-      --client-ca-file=<path/to/client-ca-file>
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.3
+        text: Ensure that the --client-ca-file argument is set as appropriate (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --client-ca-file
+              path: '{.authentication.x509.clientCAFile}'
+              set: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
+          the location of the client CA file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --client-ca-file=<path/to/client-ca-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.4
-    text: Ensure that the --read-only-port argument is set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: "--read-only-port"
-        path: '{.readOnlyPort}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --read-only-port=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.4
+        text: Ensure that the --read-only-port argument is set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--read-only-port"
+              path: '{.readOnlyPort}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --read-only-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.5
-    text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: true
-        compare:
-          op: noteq
-          value: 0
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
-      value other than 0.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --streaming-connection-idle-timeout=5m
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.5
+        text: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: true
+              compare:
+                op: noteq
+                value: 0
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
+          value other than 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --streaming-connection-idle-timeout=5m
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.6
-    text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --protect-kernel-defaults
-        path: '{.protectKernelDefaults}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --protect-kernel-defaults=true
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.6
+        text: Ensure that the --protect-kernel-defaults argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --protect-kernel-defaults
+              path: '{.protectKernelDefaults}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --protect-kernel-defaults=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.7
-    text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      remove the --make-iptables-util-chains argument from the
-      KUBELET_SYSTEM_PODS_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.7
+        text: Ensure that the --make-iptables-util-chains argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove the --make-iptables-util-chains argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.8
-    text: Ensure that the --hostname-override argument is not set (Scored)
-    # This is one of those properties that can only be set as a command line argument. 
-    # To check if the property is set as expected, we need to parse the kubelet command 
-    # instead reading the Kubelet Configuration file.
-    audit: "/bin/ps -fC $kubeletbin "
-    tests:
-      test_items:
-      - flag: --hostname-override
-        set: false
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and remove the --hostname-override argument from the
-      KUBELET_SYSTEM_PODS_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.8
+        text: Ensure that the --hostname-override argument is not set (Scored)
+        # This is one of those properties that can only be set as a command line argument.
+        # To check if the property is set as expected, we need to parse the kubelet command
+        # instead reading the Kubelet Configuration file.
+        audit: "/bin/ps -fC $kubeletbin "
+        tests:
+          test_items:
+            - flag: --hostname-override
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and remove the --hostname-override argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.9
-    text: Ensure that the --event-qps argument is set to 0 (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --event-qps
-        path: '{.eventRecordQPS}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
-      --event-qps=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.9
+        text: Ensure that the --event-qps argument is set to 0 (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --event-qps
+              path: '{.eventRecordQPS}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --event-qps=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.10
-    text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cert-file
-        path: '{.tlsCertFile}'
-        set: true
-      - flag: --tls-private-key-file
-        path: '{.tlsPrivateKeyFile}'
-        set: true
-      bin_op: and
-    remediation: |
-      If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
-      file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
-      corresponding private key file.
-      If using command line arguments, edit the kubelet service file
-      $kubeletsvc on each worker node and
-      set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
-      --tls-cert-file=<path/to/tls-certificate-file>
-      file=<path/to/tls-key-file>
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.10
+        text: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cert-file
+              path: '{.tlsCertFile}'
+              set: true
+            - flag: --tls-private-key-file
+              path: '{.tlsPrivateKeyFile}'
+              set: true
+          bin_op: and
+        remediation: |
+          If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
+          file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
+          corresponding private key file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          file=<path/to/tls-key-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.11
-    text: '[DEPRECATED] Ensure that the --cadvisor-port argument is set to 0'
-    # This is one of those properties that can only be set as a command line argument. 
-    # To check if the property is set as expected, we need to parse the kubelet command 
-    # instead reading the Kubelet Configuration file.
-    audit: "/bin/ps -fC $kubeletbin "
-    type: skip
-    tests:
-      test_items:
-      - flag: --cadvisor-port
-        set: true
-        compare:
-          op: eq
-          value: 0
-      - flag: --cadvisor-port
-        set: false
-      bin_op: or
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
-      --cadvisor-port=0
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: false
+      - id: 2.1.11
+        text: '[DEPRECATED] Ensure that the --cadvisor-port argument is set to 0'
+        # This is one of those properties that can only be set as a command line argument.
+        # To check if the property is set as expected, we need to parse the kubelet command
+        # instead reading the Kubelet Configuration file.
+        audit: "/bin/ps -fC $kubeletbin "
+        type: skip
+        tests:
+          test_items:
+            - flag: --cadvisor-port
+              set: true
+              compare:
+                op: eq
+                value: 0
+            - flag: --cadvisor-port
+              set: false
+          bin_op: or
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
+          --cadvisor-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
 
-  - id: 2.1.12
-    text: Ensure that the --rotate-certificates argument is not set to false (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
-      If using command line arguments, edit the kubelet service file $kubeletsvc
-      on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.12
+        text: Ensure that the --rotate-certificates argument is not set to false (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
+          If using command line arguments, edit the kubelet service file $kubeletsvc
+          on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.13
-    text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: RotateKubeletServerCertificate
-        path: '{.featureGates.RotateKubeletServerCertificate}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      Edit the kubelet service file $kubeletsvc
-      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
-      --feature-gates=RotateKubeletServerCertificate=true
-      Based on your system, restart the kubelet service. For example:
-      systemctl daemon-reload
-      systemctl restart kubelet.service
-    scored: true
+      - id: 2.1.13
+        text: Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              path: '{.featureGates.RotateKubeletServerCertificate}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+          --feature-gates=RotateKubeletServerCertificate=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 2.1.14
-    text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cipher-suites
-        path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
-        set: true
-        compare:
-          op: valid_elements
-          value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    remediation: |
-      If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-      If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    scored: false
+      - id: 2.1.14
+        text: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cipher-suites
+              path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
+              set: true
+              compare:
+                op: valid_elements
+                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        remediation: |
+          If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          If using executable arguments, edit the kubelet service file $kubeletsvc on each worker node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        scored: false
 
-- id: "2.2"
-  text: Configuration Files
-  checks:
-  - id: 2.2.1
-    text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $kubeletkubeconfig
-    scored: true
+  - id: "2.2"
+    text: Configuration Files
+    checks:
+      - id: 2.2.1
+        text: Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $kubeletkubeconfig
+        scored: true
 
-  - id: 2.2.2
-    text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $kubeletkubeconfig
-    scored: true
+      - id: 2.2.2
+        text: Ensure that the kubelet.conf file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $kubeletkubeconfig
+        scored: true
 
-  - id: 2.2.3
-    text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $kubeletsvc
-    scored: true
+      - id: 2.2.3
+        text: Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $kubeletsvc
+        scored: true
 
-  - id: 2.2.4
-    text: Ensure that the kubelet service file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $kubeletsvc
-    scored: true
+      - id: 2.2.4
+        text: Ensure that the kubelet service file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $kubeletsvc
+        scored: true
 
-  - id: 2.2.5
-    text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chmod 644 $proxykubeconfig
-    scored: true
+      - id: 2.2.5
+        text: Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chmod 644 $proxykubeconfig
+        scored: true
 
-  - id: 2.2.6
-    text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker
-      node. For example,
-      chown root:root $proxykubeconfig
-    scored: true
+      - id: 2.2.6
+        text: Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $proxykubeconfig
+        scored: true
 
-  - id: 2.2.7
-    text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
-    audit: "/bin/sh -c 'if test -e $kubeletcafile; then stat -c %a $kubeletcafile; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the following command to modify the file permissions of the --client-ca-file
-      chmod 644 <filename>
-    scored: true
+      - id: 2.2.7
+        text: Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)
+        audit: "/bin/sh -c 'if test -e $kubeletcafile; then stat -c %a $kubeletcafile; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the following command to modify the file permissions of the --client-ca-file
+          chmod 644 <filename>
+        scored: true
 
-  - id: 2.2.8
-    text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the following command to modify the ownership of the --client-ca-file .
-      chown root:root <filename>
-    scored: true
+      - id: 2.2.8
+        text: Ensure that the client certificate authorities file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the following command to modify the ownership of the --client-ca-file .
+          chown root:root <filename>
+        scored: true
 
-  - id: 2.2.9
-    text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step)
-      chown root:root $kubeletconf
-    scored: true
+      - id: 2.2.9
+        text: Ensure that the kubelet configuration file ownership is set to root:root (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chown root:root $kubeletconf
+        scored: true
 
-  - id: 2.2.10
-    text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step)
-      chmod 644 $kubeletconf
-    scored: true
+      - id: 2.2.10
+        text: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chmod 644 $kubeletconf
+        scored: true
diff --git a/cfg/cis-1.5/config.yaml b/cfg/cis-1.5/config.yaml
index 4d9b1b8..b783945 100644
--- a/cfg/cis-1.5/config.yaml
+++ b/cfg/cis-1.5/config.yaml
@@ -1,2 +1,2 @@
----
-## Version-specific settings that override the values in cfg/config.yaml
\ No newline at end of file
+---
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/cis-1.5/controlplane.yaml b/cfg/cis-1.5/controlplane.yaml
index e13a7ad..2e2600b 100644
--- a/cfg/cis-1.5/controlplane.yaml
+++ b/cfg/cis-1.5/controlplane.yaml
@@ -5,31 +5,31 @@ id: 3
 text: "Control Plane Configuration"
 type: "controlplane"
 groups:
-- id: 3.1
-  text: "Authentication and Authorization"
-  checks:
-  - id: 3.1.1
-    text: "Client certificate authentication should not be used for users (Not Scored) "
-    type: "manual"
-    remediation: |
-      Alternative mechanisms provided by Kubernetes such as the use of OIDC should be 
-      implemented in place of client certificates. 
-    scored: false
+  - id: 3.1
+    text: "Authentication and Authorization"
+    checks:
+      - id: 3.1.1
+        text: "Client certificate authentication should not be used for users (Not Scored) "
+        type: "manual"
+        remediation: |
+          Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
+          implemented in place of client certificates.
+        scored: false
 
-- id: 3.2
-  text: "Logging"
-  checks:
-  - id: 3.2.1
-    text: "Ensure that a minimal audit policy is created (Scored) "
-    type: "manual"
-    remediation: |
-      Create an audit policy file for your cluster. 
-    scored: true
+  - id: 3.2
+    text: "Logging"
+    checks:
+      - id: 3.2.1
+        text: "Ensure that a minimal audit policy is created (Scored) "
+        type: "manual"
+        remediation: |
+          Create an audit policy file for your cluster.
+        scored: true
 
-  - id: 3.2.2
-    text: "Ensure that the audit policy covers key security concerns (Not Scored) "
-    type: "manual"
-    remediation: |
-      Consider modification of the audit policy in use on the cluster to include these items, at a 
-      minimum. 
-    scored: false
+      - id: 3.2.2
+        text: "Ensure that the audit policy covers key security concerns (Not Scored) "
+        type: "manual"
+        remediation: |
+          Consider modification of the audit policy in use on the cluster to include these items, at a
+          minimum.
+        scored: false
diff --git a/cfg/cis-1.5/etcd.yaml b/cfg/cis-1.5/etcd.yaml
index c813da3..5108ac0 100644
--- a/cfg/cis-1.5/etcd.yaml
+++ b/cfg/cis-1.5/etcd.yaml
@@ -5,127 +5,127 @@ id: 2
 text: "Etcd Node Configuration"
 type: "etcd"
 groups:
-- id: 2
-  text: "Etcd Node Configuration Files"
-  checks:
-  - id: 2.1
-    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--cert-file"
-        set: true
-      - flag:  "--key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure TLS encryption.
-      Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml 
-      on the master node and set the below parameters.
-      --cert-file=</path/to/ca-file>
-      --key-file=</path/to/key-file>
-    scored: true
-    
-  - id: 2.2
-    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-cert-auth"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --client-cert-auth="true"
-    scored: true
+  - id: 2
+    text: "Etcd Node Configuration Files"
+    checks:
+      - id: 2.1
+        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--cert-file"
+              set: true
+            - flag: "--key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure TLS encryption.
+          Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
+          on the master node and set the below parameters.
+          --cert-file=</path/to/ca-file>
+          --key-file=</path/to/key-file>
+        scored: true
 
-  - id: 2.3
-    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--auto-tls"
-        set: false
-      - flag: "--auto-tls"
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --auto-tls parameter or set it to false.
-        --auto-tls=false
-    scored: true
-    
-  - id: 2.4
-    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
-    set as appropriate (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--peer-cert-file"
-        set: true
-      - flag: "--peer-key-file"
-        set: true
-    remediation: |
-      Follow the etcd service documentation and configure peer TLS encryption as appropriate
-      for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameters.
-      --peer-client-file=</path/to/peer-cert-file>
-      --peer-key-file=</path/to/peer-key-file>
-    scored: true
-    
-  - id: 2.5
-    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      test_items:
-      - flag: "--peer-client-cert-auth"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and set the below parameter.
-      --peer-client-cert-auth=true
-    scored: true
-    
-  - id: 2.6
-    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--peer-auto-tls"
-        set: false
-      - flag: "--peer-auto-tls"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the etcd pod specification file $etcdconf on the master
-      node and either remove the --peer-auto-tls parameter or set it to false.
-      --peer-auto-tls=false
-    scored: true
-    
-  - id: 2.7
-    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
-    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
-    tests:
-      test_items:
-      - flag: "--trusted-ca-file"
-        set: true
-    remediation: |
-      [Manual test]
-      Follow the etcd documentation and create a dedicated certificate authority setup for the
-      etcd service.
-      Then, edit the etcd pod specification file $etcdconf on the
-      master node and set the below parameter.
-      --trusted-ca-file=</path/to/ca-file>
-    scored: false
+      - id: 2.2
+        text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-cert-auth"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --client-cert-auth="true"
+        scored: true
+
+      - id: 2.3
+        text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--auto-tls"
+              set: false
+            - flag: "--auto-tls"
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --auto-tls parameter or set it to false.
+            --auto-tls=false
+        scored: true
+
+      - id: 2.4
+        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
+        set as appropriate (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--peer-cert-file"
+              set: true
+            - flag: "--peer-key-file"
+              set: true
+        remediation: |
+          Follow the etcd service documentation and configure peer TLS encryption as appropriate
+          for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameters.
+          --peer-client-file=</path/to/peer-cert-file>
+          --peer-key-file=</path/to/peer-key-file>
+        scored: true
+
+      - id: 2.5
+        text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          test_items:
+            - flag: "--peer-client-cert-auth"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and set the below parameter.
+          --peer-client-cert-auth=true
+        scored: true
+
+      - id: 2.6
+        text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--peer-auto-tls"
+              set: false
+            - flag: "--peer-auto-tls"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the etcd pod specification file $etcdconf on the master
+          node and either remove the --peer-auto-tls parameter or set it to false.
+          --peer-auto-tls=false
+        scored: true
+
+      - id: 2.7
+        text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+        audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+        tests:
+          test_items:
+            - flag: "--trusted-ca-file"
+              set: true
+        remediation: |
+          [Manual test]
+          Follow the etcd documentation and create a dedicated certificate authority setup for the
+          etcd service.
+          Then, edit the etcd pod specification file $etcdconf on the
+          master node and set the below parameter.
+          --trusted-ca-file=</path/to/ca-file>
+        scored: false
diff --git a/cfg/cis-1.5/master.yaml b/cfg/cis-1.5/master.yaml
index 83315dd..fc12468 100644
--- a/cfg/cis-1.5/master.yaml
+++ b/cfg/cis-1.5/master.yaml
@@ -5,1110 +5,1110 @@ id: 1
 text: "Master Node Security Configuration"
 type: "master"
 groups:
-- id: 1.1
-  text: "Master Node Configuration Files "
-  checks:
-  - id: 1.1.1
-    text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the
-      master node.
-      For example, chmod 644 $apiserverconf 
-    scored: true
-
-  - id: 1.1.2
-    text: "Ensure that the API server pod specification file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example, 
-      chown root:root $apiserverconf 
-    scored: true
-
-  - id: 1.1.3
-    text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node.
-      For example, 
-      chmod 644 $controllermanagerconf 
-    scored: true
-
-  - id: 1.1.4
-    text: "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root $controllermanagerconf 
-    scored: true
-
-  - id: 1.1.5
-    text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 $schedulerconf 
-    scored: true
-
-  - id: 1.1.6
-    text: "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: "root:root"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root $schedulerconf 
-    scored: true
-
-  - id: 1.1.7
-    text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 $etcdconf 
-    scored: true
-
-  - id: 1.1.8
-    text: "Ensure that the etcd pod specification file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root $etcdconf 
-    scored: true
-
-  - id: 1.1.9
-    text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Scored)"
-    audit: "stat -c %a <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 <path/to/cni/files> 
-    scored: false
-
-  - id: 1.1.10
-    text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Scored)"
-    audit: "stat -c %U:%G <path/to/cni/files>"
-    type: "manual"
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root <path/to/cni/files> 
-    scored: false
-
-  - id: 1.1.11
-    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
-    tests:
-      test_items:
-      - flag: "700"
-        compare:
-          op: eq
-          value: "700"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir, 
-      from the below command: 
-      ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, 
-      chmod 700 /var/lib/etcd 
-    scored: true
-
-  - id: 1.1.12
-    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
-    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
-    tests:
-      test_items:
-      - flag: "etcd:etcd"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir, 
-      from the below command: 
-      ps -ef | grep etcd 
-      Run the below command (based on the etcd data directory found above). 
-      For example, chown etcd:etcd /var/lib/etcd 
-    scored: true
-
-  - id: 1.1.13
-    text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 /etc/kubernetes/admin.conf 
-    scored: true
-
-  - id: 1.1.14
-    text: "Ensure that the admin.conf file ownership is set to root:root (Scored) "
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root /etc/kubernetes/admin.conf 
-    scored: true
-
-  - id: 1.1.15
-    text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 /etc/kubernetes/scheduler.conf 
-    scored: true
-
-  - id: 1.1.16
-    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root /etc/kubernetes/scheduler.conf 
-    scored: true
-
-  - id: 1.1.17
-    text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod 644 /etc/kubernetes/controller-manager.conf 
-    scored: true
-
-  - id: 1.1.18
-    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown root:root /etc/kubernetes/controller-manager.conf 
-    scored: true
-
-  - id: 1.1.19
-    text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
-    audit: "ls -laR /etc/kubernetes/pki/"
-    type: "manual"
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chown -R root:root /etc/kubernetes/pki/ 
-    scored: true
-
-  - id: 1.1.20
-    text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored) "
-    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
-    type: "manual"
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod -R 644 /etc/kubernetes/pki/*.crt 
-    scored: true
-
-  - id: 1.1.21
-    text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored)"
-    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
-    type: "manual"
-    remediation: |
-      Run the below command (based on the file location on your system) on the master node. 
-      For example, 
-      chmod -R 600 /etc/kubernetes/pki/*.key 
-    scored: true
-
-- id: 1.2
-  text: "API Server"
-  checks:
-  - id: 1.2.1
-    text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--anonymous-auth"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the below parameter. 
-      --anonymous-auth=false 
-    scored: false
-
-  - id: 1.2.2
-    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--basic-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then, 
-      edit the API server pod specification file $apiserverconf 
-      on the master node and remove the --basic-auth-file=<filename> parameter. 
-    scored: true
-
-  - id: 1.2.3
-    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--token-auth-file"
-        set: false
-    remediation: |
-      Follow the documentation and configure alternate mechanisms for authentication. Then, 
-      edit the API server pod specification file $apiserverconf 
-      on the master node and remove the --token-auth-file=<filename> parameter. 
-    scored: true
-
-  - id: 1.2.4
-    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--kubelet-https"
-        compare:
-          op: eq
-          value: true
-        set: true
-      - flag: "--kubelet-https"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and remove the --kubelet-https parameter. 
-    scored: true
-
-  - id: 1.2.5
-    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--kubelet-client-certificate"
-        set: true
-      - flag: "--kubelet-client-key"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the 
-      apiserver and kubelets. Then, edit API server pod specification file 
-      $apiserverconf on the master node and set the 
-      kubelet client certificate and key parameters as below. 
-      --kubelet-client-certificate=<path/to/client-certificate-file> 
-      --kubelet-client-key=<path/to/client-key-file> 
-    scored: true
-
-  - id: 1.2.6
-    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--kubelet-certificate-authority"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and setup the TLS connection between 
-      the apiserver and kubelets. Then, edit the API server pod specification file 
-      $apiserverconf on the master node and set the 
-      --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. 
-      --kubelet-certificate-authority=<ca-string> 
-    scored: true
-
-  - id: 1.2.7
-    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: nothave
-          value: "AlwaysAllow"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. 
-      One such example could be as below. 
-      --authorization-mode=RBAC 
-    scored: true
-
-  - id: 1.2.8
-    text: "Ensure that the --authorization-mode argument includes Node (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: has
-          value: "Node"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --authorization-mode parameter to a value that includes Node. 
-      --authorization-mode=Node,RBAC 
-    scored: true
-
-  - id: 1.2.9
-    text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--authorization-mode"
-        compare:
-          op: has
-          value: "RBAC"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --authorization-mode parameter to a value that includes RBAC, 
-      for example: 
-      --authorization-mode=Node,RBAC 
-    scored: true
-
-  - id: 1.2.10
-    text: "Ensure that the admission control plugin EventRateLimit is set (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "EventRateLimit"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set the desired limits in a configuration file. 
-      Then, edit the API server pod specification file $apiserverconf 
-      and set the below parameters. 
-      --enable-admission-plugins=...,EventRateLimit,... 
-      --admission-control-config-file=<path/to/configuration/file> 
-    scored: false
-
-  - id: 1.2.11
-    text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: nothave
-          value: AlwaysAdmit
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and either remove the --enable-admission-plugins parameter, or set it to a 
-      value that does not include AlwaysAdmit. 
-    scored: true
-
-  - id: 1.2.12
-    text: "Ensure that the admission control plugin AlwaysPullImages is set (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "AlwaysPullImages"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --enable-admission-plugins parameter to include 
-      AlwaysPullImages. 
-      --enable-admission-plugins=...,AlwaysPullImages,... 
-    scored: false
-
-  - id: 1.2.13
-    text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "SecurityContextDeny"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --enable-admission-plugins parameter to include 
-      SecurityContextDeny, unless PodSecurityPolicy is already in place. 
-      --enable-admission-plugins=...,SecurityContextDeny,... 
-    scored: false
-
-  - id: 1.2.14
-    text: "Ensure that the admission control plugin ServiceAccount is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "ServiceAccount"
-        set: true
-      - flag: "--enable-admission-plugins"
-        set: false
-    remediation: |
-      Follow the documentation and create ServiceAccount objects as per your environment. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and ensure that the --disable-admission-plugins parameter is set to a 
-      value that does not include ServiceAccount.
-    scored: true
-
-  - id: 1.2.15
-    text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--disable-admission-plugins"
-        compare:
-          op: nothave
-          value: "NamespaceLifecycle"
-        set: true
-      - flag: "--disable-admission-plugins"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --disable-admission-plugins parameter to 
-      ensure it does not include NamespaceLifecycle. 
-    scored: true
-
-  - id: 1.2.16
-    text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "PodSecurityPolicy"
-        set: true
-    remediation: |
-      Follow the documentation and create Pod Security Policy objects as per your environment. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the --enable-admission-plugins parameter to a 
-      value that includes PodSecurityPolicy: 
-      --enable-admission-plugins=...,PodSecurityPolicy,... 
-      Then restart the API Server.
-    scored: true
-
-  - id: 1.2.17
-    text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--enable-admission-plugins"
-        compare:
-          op: has
-          value: "NodeRestriction"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the --enable-admission-plugins parameter to a 
-      value that includes NodeRestriction. 
-      --enable-admission-plugins=...,NodeRestriction,... 
-    scored: true
-
-  - id: 1.2.18
-    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-bind-address"
-        set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and remove the --insecure-bind-address parameter. 
-    scored: true
-
-  - id: 1.2.19
-    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--insecure-port"
-        compare:
-          op: eq
-          value: 0
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the below parameter. 
-      --insecure-port=0 
-    scored: true
-
-  - id: 1.2.20
-    text: "Ensure that the --secure-port argument is not set to 0 (Scored) "
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-        - flag:  "--secure-port"
-          compare:
-            op: gt
-            value: 0
-          set: true
-        - flag: "--secure-port"
-          set: false
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and either remove the --secure-port parameter or 
-      set it to a different (non-zero) desired port. 
-    scored: true
-
-  - id: 1.2.21
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $apiserver | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the below parameter. 
-      --profiling=false 
-    scored: true
-
-  - id: 1.2.22
-    text: "Ensure that the --audit-log-path argument is set (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-path"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --audit-log-path parameter to a suitable path and 
-      file where you would like audit logs to be written, for example: 
-      --audit-log-path=/var/log/apiserver/audit.log 
-    scored: true
-
-  - id: 1.2.23
-    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxage"
-        compare:
-          op: gte
-          value: 30
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days:
-      --audit-log-maxage=30 
-    scored: true
-
-  - id: 1.2.24
-    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxbackup"
-        compare:
-          op: gte
-          value: 10
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate 
-      value. 
-      --audit-log-maxbackup=10 
-    scored: true
-
-  - id: 1.2.25
-    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--audit-log-maxsize"
-        compare:
-          op: gte
-          value: 100
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. 
-      For example, to set it as 100 MB: 
-      --audit-log-maxsize=100 
-    scored: true
-
-  - id: 1.2.26
-    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--request-timeout"
-        set: false
-      - flag: "--request-timeout"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      and set the below parameter as appropriate and if needed. 
-      For example, 
-      --request-timeout=300s 
-    scored: true
-
-  - id: 1.2.27
-    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--service-account-lookup"
-        set: false
-      - flag: "--service-account-lookup"
-        compare:
-          op: eq
-          value: true
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the below parameter. 
-      --service-account-lookup=true 
-      Alternatively, you can delete the --service-account-lookup parameter from this file so 
-      that the default takes effect. 
-    scored: true
-
-  - id: 1.2.28
-    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-key-file"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the --service-account-key-file parameter 
-      to the public key file for service accounts: 
-      --service-account-key-file=<filename> 
-    scored: true
-
-  - id: 1.2.29
-    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Scored) "
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--etcd-certfile"
-        set: true
-      - flag: "--etcd-keyfile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the etcd certificate and key file parameters. 
-      --etcd-certfile=<path/to/client-certificate-file>  
-      --etcd-keyfile=<path/to/client-key-file> 
-    scored: true
-
-  - id: 1.2.30
-    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "--tls-cert-file"
-        set: true
-      - flag: "--tls-private-key-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the TLS certificate and private key file parameters. 
-      --tls-cert-file=<path/to/tls-certificate-file>  
-      --tls-private-key-file=<path/to/tls-key-file> 
-    scored: true
-
-  - id: 1.2.31
-    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--client-ca-file"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the client certificate authority file. 
-      --client-ca-file=<path/to/client-ca-file> 
-    scored: true
-
-  - id: 1.2.32
-    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--etcd-cafile"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the etcd certificate authority file parameter. 
-      --etcd-cafile=<path/to/ca-file> 
-    scored: true
-
-  - id: 1.2.33
-    text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--encryption-provider-config"
-        set: true
-    remediation: |
-      Follow the Kubernetes documentation and configure a EncryptionConfig file. 
-      Then, edit the API server pod specification file $apiserverconf 
-      on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config=</path/to/EncryptionConfig/File> 
-    scored: true
-
-  - id: 1.2.34
-    text: "Ensure that encryption providers are appropriately configured (Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    type: "manual"
-    remediation: |
-      Follow the Kubernetes documentation and configure a EncryptionConfig file. 
-      In this file, choose aescbc, kms or secretbox as the encryption provider. 
-    scored: true
-
-  - id: 1.2.35
-    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
-    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--tls-cipher-suites"
-        compare:
-          op: has
-          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
-        set: true
-    remediation: |
-      Edit the API server pod specification file $apiserverconf 
-      on the master node and set the below parameter. 
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 
-    scored: false
-
-- id: 1.3
-  text: "Controller Manager"
-  checks:
-  - id: 1.3.1
-    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--terminated-pod-gc-threshold"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, 
-      for example: 
-      --terminated-pod-gc-threshold=10 
-    scored: true
-
-  - id: 1.3.2
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and set the below parameter. 
-      --profiling=false 
-    scored: true
-
-  - id: 1.3.3
-    text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--use-service-account-credentials"
-        compare:
-          op: noteq
-          value: false
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node to set the below parameter. 
-      --use-service-account-credentials=true 
-    scored: true
-
-  - id: 1.3.4
-    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--service-account-private-key-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and set the --service-account-private-key-file parameter 
-      to the private key file for service accounts. 
-      --service-account-private-key-file=<filename> 
-    scored: true
-
-  - id: 1.3.5
-    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--root-ca-file"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and set the --root-ca-file parameter to the certificate bundle file`. 
-      --root-ca-file=<path/to/file> 
-    scored: true
-
-  - id: 1.3.6
-    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--feature-gates"
-        compare:
-          op: eq
-          value: "RotateKubeletServerCertificate=true"
-        set: true
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. 
-      --feature-gates=RotateKubeletServerCertificate=true 
-    scored: true
-
-  - id: 1.3.7
-    text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored)"
-    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--bind-address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--bind-address"
-        set: false
-    remediation: |
-      Edit the Controller Manager pod specification file $controllermanagerconf 
-      on the master node and ensure the correct value for the --bind-address parameter 
-    scored: true
-
-- id: 1.4
-  text: "Scheduler"
-  checks:
-  - id: 1.4.1
-    text: "Ensure that the --profiling argument is set to false (Scored)"
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      test_items:
-      - flag: "--profiling"
-        compare:
-          op: eq
-          value: false
-        set: true
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf file 
-      on the master node and set the below parameter. 
-      --profiling=false 
-    scored: true
-
-  - id: 1.4.2
-    text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored) "
-    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "--bind-address"
-        compare:
-          op: eq
-          value: "127.0.0.1"
-        set: true
-      - flag: "--bind-address"
-        set: false
-    remediation: |
-      Edit the Scheduler pod specification file $schedulerconf 
-      on the master node and ensure the correct value for the --bind-address parameter 
-    scored: true
+  - id: 1.1
+    text: "Master Node Configuration Files "
+    checks:
+      - id: 1.1.1
+        text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the
+          master node.
+          For example, chmod 644 $apiserverconf
+        scored: true
+
+      - id: 1.1.2
+        text: "Ensure that the API server pod specification file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $apiserverconf
+        scored: true
+
+      - id: 1.1.3
+        text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $controllermanagerconf
+        scored: true
+
+      - id: 1.1.4
+        text: "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $controllermanagerconf
+        scored: true
+
+      - id: 1.1.5
+        text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $schedulerconf
+        scored: true
+
+      - id: 1.1.6
+        text: "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $schedulerconf
+        scored: true
+
+      - id: 1.1.7
+        text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 $etcdconf
+        scored: true
+
+      - id: 1.1.8
+        text: "Ensure that the etcd pod specification file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root $etcdconf
+        scored: true
+
+      - id: 1.1.9
+        text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Scored)"
+        audit: "stat -c %a <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 <path/to/cni/files>
+        scored: false
+
+      - id: 1.1.10
+        text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Scored)"
+        audit: "stat -c %U:%G <path/to/cni/files>"
+        type: "manual"
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root <path/to/cni/files>
+        scored: false
+
+      - id: 1.1.11
+        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
+        tests:
+          test_items:
+            - flag: "700"
+              compare:
+                op: eq
+                value: "700"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
+          from the below command:
+          ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example,
+          chmod 700 /var/lib/etcd
+        scored: true
+
+      - id: 1.1.12
+        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+        audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
+        tests:
+          test_items:
+            - flag: "etcd:etcd"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
+          from the below command:
+          ps -ef | grep etcd
+          Run the below command (based on the etcd data directory found above).
+          For example, chown etcd:etcd /var/lib/etcd
+        scored: true
+
+      - id: 1.1.13
+        text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.1.14
+        text: "Ensure that the admin.conf file ownership is set to root:root (Scored) "
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root /etc/kubernetes/admin.conf
+        scored: true
+
+      - id: 1.1.15
+        text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.1.16
+        text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root /etc/kubernetes/scheduler.conf
+        scored: true
+
+      - id: 1.1.17
+        text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod 644 /etc/kubernetes/controller-manager.conf
+        scored: true
+
+      - id: 1.1.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+        audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown root:root /etc/kubernetes/controller-manager.conf
+        scored: true
+
+      - id: 1.1.19
+        text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
+        audit: "ls -laR /etc/kubernetes/pki/"
+        type: "manual"
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chown -R root:root /etc/kubernetes/pki/
+        scored: true
+
+      - id: 1.1.20
+        text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored) "
+        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
+        type: "manual"
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod -R 644 /etc/kubernetes/pki/*.crt
+        scored: true
+
+      - id: 1.1.21
+        text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored)"
+        audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
+        type: "manual"
+        remediation: |
+          Run the below command (based on the file location on your system) on the master node.
+          For example,
+          chmod -R 600 /etc/kubernetes/pki/*.key
+        scored: true
+
+  - id: 1.2
+    text: "API Server"
+    checks:
+      - id: 1.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --anonymous-auth=false
+        scored: false
+
+      - id: 1.2.2
+        text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--basic-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --basic-auth-file=<filename> parameter.
+        scored: true
+
+      - id: 1.2.3
+        text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--token-auth-file"
+              set: false
+        remediation: |
+          Follow the documentation and configure alternate mechanisms for authentication. Then,
+          edit the API server pod specification file $apiserverconf
+          on the master node and remove the --token-auth-file=<filename> parameter.
+        scored: true
+
+      - id: 1.2.4
+        text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--kubelet-https"
+              compare:
+                op: eq
+                value: true
+              set: true
+            - flag: "--kubelet-https"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --kubelet-https parameter.
+        scored: true
+
+      - id: 1.2.5
+        text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--kubelet-client-certificate"
+              set: true
+            - flag: "--kubelet-client-key"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the
+          apiserver and kubelets. Then, edit API server pod specification file
+          $apiserverconf on the master node and set the
+          kubelet client certificate and key parameters as below.
+          --kubelet-client-certificate=<path/to/client-certificate-file>
+          --kubelet-client-key=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.2.6
+        text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--kubelet-certificate-authority"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and setup the TLS connection between
+          the apiserver and kubelets. Then, edit the API server pod specification file
+          $apiserverconf on the master node and set the
+          --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
+          --kubelet-certificate-authority=<ca-string>
+        scored: true
+
+      - id: 1.2.7
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: nothave
+                value: "AlwaysAllow"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to values other than AlwaysAllow.
+          One such example could be as below.
+          --authorization-mode=RBAC
+        scored: true
+
+      - id: 1.2.8
+        text: "Ensure that the --authorization-mode argument includes Node (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "Node"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to a value that includes Node.
+          --authorization-mode=Node,RBAC
+        scored: true
+
+      - id: 1.2.9
+        text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--authorization-mode"
+              compare:
+                op: has
+                value: "RBAC"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --authorization-mode parameter to a value that includes RBAC,
+          for example:
+          --authorization-mode=Node,RBAC
+        scored: true
+
+      - id: 1.2.10
+        text: "Ensure that the admission control plugin EventRateLimit is set (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "EventRateLimit"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set the desired limits in a configuration file.
+          Then, edit the API server pod specification file $apiserverconf
+          and set the below parameters.
+          --enable-admission-plugins=...,EventRateLimit,...
+          --admission-control-config-file=<path/to/configuration/file>
+        scored: false
+
+      - id: 1.2.11
+        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: nothave
+                value: AlwaysAdmit
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and either remove the --enable-admission-plugins parameter, or set it to a
+          value that does not include AlwaysAdmit.
+        scored: true
+
+      - id: 1.2.12
+        text: "Ensure that the admission control plugin AlwaysPullImages is set (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "AlwaysPullImages"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to include
+          AlwaysPullImages.
+          --enable-admission-plugins=...,AlwaysPullImages,...
+        scored: false
+
+      - id: 1.2.13
+        text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "SecurityContextDeny"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to include
+          SecurityContextDeny, unless PodSecurityPolicy is already in place.
+          --enable-admission-plugins=...,SecurityContextDeny,...
+        scored: false
+
+      - id: 1.2.14
+        text: "Ensure that the admission control plugin ServiceAccount is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "ServiceAccount"
+              set: true
+            - flag: "--enable-admission-plugins"
+              set: false
+        remediation: |
+          Follow the documentation and create ServiceAccount objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and ensure that the --disable-admission-plugins parameter is set to a
+          value that does not include ServiceAccount.
+        scored: true
+
+      - id: 1.2.15
+        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--disable-admission-plugins"
+              compare:
+                op: nothave
+                value: "NamespaceLifecycle"
+              set: true
+            - flag: "--disable-admission-plugins"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --disable-admission-plugins parameter to
+          ensure it does not include NamespaceLifecycle.
+        scored: true
+
+      - id: 1.2.16
+        text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "PodSecurityPolicy"
+              set: true
+        remediation: |
+          Follow the documentation and create Pod Security Policy objects as per your environment.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes PodSecurityPolicy:
+          --enable-admission-plugins=...,PodSecurityPolicy,...
+          Then restart the API Server.
+        scored: true
+
+      - id: 1.2.17
+        text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--enable-admission-plugins"
+              compare:
+                op: has
+                value: "NodeRestriction"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --enable-admission-plugins parameter to a
+          value that includes NodeRestriction.
+          --enable-admission-plugins=...,NodeRestriction,...
+        scored: true
+
+      - id: 1.2.18
+        text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-bind-address"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and remove the --insecure-bind-address parameter.
+        scored: true
+
+      - id: 1.2.19
+        text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--insecure-port"
+              compare:
+                op: eq
+                value: 0
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --insecure-port=0
+        scored: true
+
+      - id: 1.2.20
+        text: "Ensure that the --secure-port argument is not set to 0 (Scored) "
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--secure-port"
+              compare:
+                op: gt
+                value: 0
+              set: true
+            - flag: "--secure-port"
+              set: false
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and either remove the --secure-port parameter or
+          set it to a different (non-zero) desired port.
+        scored: true
+
+      - id: 1.2.21
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $apiserver | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.2.22
+        text: "Ensure that the --audit-log-path argument is set (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-path"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-path parameter to a suitable path and
+          file where you would like audit logs to be written, for example:
+          --audit-log-path=/var/log/apiserver/audit.log
+        scored: true
+
+      - id: 1.2.23
+        text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxage"
+              compare:
+                op: gte
+                value: 30
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days:
+          --audit-log-maxage=30
+        scored: true
+
+      - id: 1.2.24
+        text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxbackup"
+              compare:
+                op: gte
+                value: 10
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
+          value.
+          --audit-log-maxbackup=10
+        scored: true
+
+      - id: 1.2.25
+        text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--audit-log-maxsize"
+              compare:
+                op: gte
+                value: 100
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB.
+          For example, to set it as 100 MB:
+          --audit-log-maxsize=100
+        scored: true
+
+      - id: 1.2.26
+        text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--request-timeout"
+              set: false
+            - flag: "--request-timeout"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          and set the below parameter as appropriate and if needed.
+          For example,
+          --request-timeout=300s
+        scored: true
+
+      - id: 1.2.27
+        text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--service-account-lookup"
+              set: false
+            - flag: "--service-account-lookup"
+              compare:
+                op: eq
+                value: true
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --service-account-lookup=true
+          Alternatively, you can delete the --service-account-lookup parameter from this file so
+          that the default takes effect.
+        scored: true
+
+      - id: 1.2.28
+        text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-key-file"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the --service-account-key-file parameter
+          to the public key file for service accounts:
+          --service-account-key-file=<filename>
+        scored: true
+
+      - id: 1.2.29
+        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Scored) "
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--etcd-certfile"
+              set: true
+            - flag: "--etcd-keyfile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the etcd certificate and key file parameters.
+          --etcd-certfile=<path/to/client-certificate-file>
+          --etcd-keyfile=<path/to/client-key-file>
+        scored: true
+
+      - id: 1.2.30
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "--tls-cert-file"
+              set: true
+            - flag: "--tls-private-key-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the TLS certificate and private key file parameters.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+        scored: true
+
+      - id: 1.2.31
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--client-ca-file"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the client certificate authority file.
+          --client-ca-file=<path/to/client-ca-file>
+        scored: true
+
+      - id: 1.2.32
+        text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--etcd-cafile"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the etcd certificate authority file parameter.
+          --etcd-cafile=<path/to/ca-file>
+        scored: true
+
+      - id: 1.2.33
+        text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--encryption-provider-config"
+              set: true
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          Then, edit the API server pod specification file $apiserverconf
+          on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config=</path/to/EncryptionConfig/File>
+        scored: true
+
+      - id: 1.2.34
+        text: "Ensure that encryption providers are appropriately configured (Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          In this file, choose aescbc, kms or secretbox as the encryption provider.
+        scored: true
+
+      - id: 1.2.35
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
+        audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--tls-cipher-suites"
+              compare:
+                op: has
+                value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+              set: true
+        remediation: |
+          Edit the API server pod specification file $apiserverconf
+          on the master node and set the below parameter.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
+        scored: false
+
+  - id: 1.3
+    text: "Controller Manager"
+    checks:
+      - id: 1.3.1
+        text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--terminated-pod-gc-threshold"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold,
+          for example:
+          --terminated-pod-gc-threshold=10
+        scored: true
+
+      - id: 1.3.2
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.3.3
+        text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--use-service-account-credentials"
+              compare:
+                op: noteq
+                value: false
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node to set the below parameter.
+          --use-service-account-credentials=true
+        scored: true
+
+      - id: 1.3.4
+        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--service-account-private-key-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --service-account-private-key-file parameter
+          to the private key file for service accounts.
+          --service-account-private-key-file=<filename>
+        scored: true
+
+      - id: 1.3.5
+        text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--root-ca-file"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --root-ca-file parameter to the certificate bundle file`.
+          --root-ca-file=<path/to/file>
+        scored: true
+
+      - id: 1.3.6
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--feature-gates"
+              compare:
+                op: eq
+                value: "RotateKubeletServerCertificate=true"
+              set: true
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
+          --feature-gates=RotateKubeletServerCertificate=true
+        scored: true
+
+      - id: 1.3.7
+        text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored)"
+        audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--bind-address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--bind-address"
+              set: false
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and ensure the correct value for the --bind-address parameter
+        scored: true
+
+  - id: 1.4
+    text: "Scheduler"
+    checks:
+      - id: 1.4.1
+        text: "Ensure that the --profiling argument is set to false (Scored)"
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          test_items:
+            - flag: "--profiling"
+              compare:
+                op: eq
+                value: false
+              set: true
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf file
+          on the master node and set the below parameter.
+          --profiling=false
+        scored: true
+
+      - id: 1.4.2
+        text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored) "
+        audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "--bind-address"
+              compare:
+                op: eq
+                value: "127.0.0.1"
+              set: true
+            - flag: "--bind-address"
+              set: false
+        remediation: |
+          Edit the Scheduler pod specification file $schedulerconf
+          on the master node and ensure the correct value for the --bind-address parameter
+        scored: true
diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml
index 31646f1..e6cb34b 100644
--- a/cfg/cis-1.5/node.yaml
+++ b/cfg/cis-1.5/node.yaml
@@ -5,501 +5,501 @@ id: 4
 text: "Worker Node Security Configuration"
 type: "node"
 groups:
-- id: 4.1
-  text: "Worker Node Configuration Files"
-  checks:
-  - id: 4.1.1
-    text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, 
-      chmod 644 $kubeletsvc 
-    scored: true
+  - id: 4.1
+    text: "Worker Node Configuration Files"
+    checks:
+      - id: 4.1.1
+        text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chmod 644 $kubeletsvc
+        scored: true
 
-  - id: 4.1.2
-    text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, 
-      chown root:root $kubeletsvc 
-    scored: true
+      - id: 4.1.2
+        text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chown root:root $kubeletsvc
+        scored: true
 
-  - id: 4.1.3
-    text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, 
-      chmod 644 $proykubeconfig 
-    scored: true
+      - id: 4.1.3
+        text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chmod 644 $proykubeconfig
+        scored: true
 
-  - id: 4.1.4
-    text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
-    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, chown root:root $proxykubeconfig 
-    scored: true
+      - id: 4.1.4
+        text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example, chown root:root $proxykubeconfig
+        scored: true
 
-  - id: 4.1.5
-    text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, 
-      chmod 644 $kubeletkubeconfig 
-    scored: true
+      - id: 4.1.5
+        text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chmod 644 $kubeletkubeconfig
+        scored: true
 
-  - id: 4.1.6
-    text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the below command (based on the file location on your system) on the each worker node. 
-      For example, 
-      chown root:root $kubeletkubeconfig 
-    scored: true
+      - id: 4.1.6
+        text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on the each worker node.
+          For example,
+          chown root:root $kubeletkubeconfig
+        scored: true
 
-  - id: 4.1.7
-    text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)"
-    types: "manual"
-    remediation: |
-      Run the following command to modify the file permissions of the 
-      --client-ca-file chmod 644 <filename> 
-    scored: true
+      - id: 4.1.7
+        text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)"
+        types: "manual"
+        remediation: |
+          Run the following command to modify the file permissions of the
+          --client-ca-file chmod 644 <filename>
+        scored: true
 
-  - id: 4.1.8
-    text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-        compare:
-          op: eq
-          value: root:root
-    remediation: |
-      Run the following command to modify the ownership of the --client-ca-file. 
-      chown root:root <filename> 
-    scored: true
+      - id: 4.1.8
+        text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+              compare:
+                op: eq
+                value: root:root
+        remediation: |
+          Run the following command to modify the ownership of the --client-ca-file.
+          chown root:root <filename>
+        scored: true
 
-  - id: 4.1.9
-    text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: "644"
-        set: true
-        compare:
-          op: eq
-          value: "644"
-      - flag: "640"
-        set: true
-        compare:
-          op: eq
-          value: "640"
-      - flag: "600"
-        set: true
-        compare:
-          op: eq
-          value: "600"
-      bin_op: or
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step) 
-      chmod 644 $kubeletconf 
-    scored: true
+      - id: 4.1.9
+        text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: "644"
+              set: true
+              compare:
+                op: eq
+                value: "644"
+            - flag: "640"
+              set: true
+              compare:
+                op: eq
+                value: "640"
+            - flag: "600"
+              set: true
+              compare:
+                op: eq
+                value: "600"
+          bin_op: or
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chmod 644 $kubeletconf
+        scored: true
 
-  - id: 4.1.10
-    text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
-    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
-    tests:
-      test_items:
-      - flag: root:root
-        set: true
-    remediation: |
-      Run the following command (using the config file location identied in the Audit step) 
-      chown root:root $kubeletconf 
-    scored: true
+      - id: 4.1.10
+        text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+              set: true
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chown root:root $kubeletconf
+        scored: true
 
-- id: 4.2
-  text: "Kubelet"
-  checks:
-  - id: 4.2.1
-    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: "--anonymous-auth"
-        path: '{.authentication.anonymous.enabled}'
-        set: true
-        compare:
-          op: eq
-          value: false
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
-      false. 
-      If using executable arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
-      --anonymous-auth=false 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service
-    scored: true
+  - id: 4.2
+    text: "Kubelet"
+    checks:
+      - id: 4.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              path: '{.authentication.anonymous.enabled}'
+              set: true
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+          false.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --anonymous-auth=false
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.2
-    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --authorization-mode
-        path: '{.authorization.mode}'
-        set: true
-        compare:
-          op: nothave
-          value: AlwaysAllow
-    remediation: |
-      If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If 
-      using executable arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_AUTHZ_ARGS variable. 
-      --authorization-mode=Webhook 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service  
-    scored: true
+      - id: 4.2.2
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --authorization-mode
+              path: '{.authorization.mode}'
+              set: true
+              compare:
+                op: nothave
+                value: AlwaysAllow
+        remediation: |
+          If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If
+          using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --authorization-mode=Webhook
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.3
-    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --client-ca-file
-        path: '{.authentication.x509.clientCAFile}'
-        set: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to 
-      the location of the client CA file. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_AUTHZ_ARGS variable. 
-      --client-ca-file=<path/to/client-ca-file> 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service 
-    scored: true
+      - id: 4.2.3
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --client-ca-file
+              path: '{.authentication.x509.clientCAFile}'
+              set: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
+          the location of the client CA file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --client-ca-file=<path/to/client-ca-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.4
-    text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: "--read-only-port"
-        path: '{.readOnlyPort}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set readOnlyPort to 0. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
-      --read-only-port=0 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service 
-    scored: true
+      - id: 4.2.4
+        text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--read-only-port"
+              path: '{.readOnlyPort}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set readOnlyPort to 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --read-only-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.5
-    text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: true
-        compare:
-          op: noteq
-          value: 0
-      - flag: --streaming-connection-idle-timeout
-        path: '{.streamingConnectionIdleTimeout}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a 
-      value other than 0. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
-      --streaming-connection-idle-timeout=5m 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: true
+      - id: 4.2.5
+        text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: true
+              compare:
+                op: noteq
+                value: 0
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
+          value other than 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --streaming-connection-idle-timeout=5m
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.6
-    text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --protect-kernel-defaults
-        path: '{.protectKernelDefaults}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set protectKernelDefaults: true. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
-      --protect-kernel-defaults=true 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: true
+      - id: 4.2.6
+        text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --protect-kernel-defaults
+              path: '{.protectKernelDefaults}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set protectKernelDefaults: true.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --protect-kernel-defaults=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.7
-    text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored) "
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --make-iptables-util-chains
-        path: '{.makeIPTablesUtilChains}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      remove the --make-iptables-util-chains argument from the 
-      KUBELET_SYSTEM_PODS_ARGS variable. 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: true
+      - id: 4.2.7
+        text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored) "
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove the --make-iptables-util-chains argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.8
-    text: "Ensure that the --hostname-override argument is not set (Not Scored)"
-    # This is one of those properties that can only be set as a command line argument. 
-    # To check if the property is set as expected, we need to parse the kubelet command 
-    # instead reading the Kubelet Configuration file.
-    audit: "/bin/ps -fC $kubeletbin "
-    tests:
-      test_items:
-      - flag: --hostname-override
-        set: false
-    remediation: |
-      Edit the kubelet service file $kubeletsvc 
-      on each worker node and remove the --hostname-override argument from the
-      KUBELET_SYSTEM_PODS_ARGS variable. 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service 
-    scored: false
+      - id: 4.2.8
+        text: "Ensure that the --hostname-override argument is not set (Not Scored)"
+        # This is one of those properties that can only be set as a command line argument.
+        # To check if the property is set as expected, we need to parse the kubelet command
+        # instead reading the Kubelet Configuration file.
+        audit: "/bin/ps -fC $kubeletbin "
+        tests:
+          test_items:
+            - flag: --hostname-override
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and remove the --hostname-override argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
 
-  - id: 4.2.9
-    text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Not Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --event-qps
-        path: '{.eventRecordQPS}'
-        set: true
-        compare:
-          op: eq
-          value: 0
-    remediation: |
-      If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: false
+      - id: 4.2.9
+        text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Not Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --event-qps
+              path: '{.eventRecordQPS}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
 
-  - id: 4.2.10
-    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cert-file
-        path: '{.tlsCertFile}'
-        set: true
-      - flag: --tls-private-key-file
-        path: '{.tlsPrivateKeyFile}'
-        set: true
-    remediation: |
-      If using a Kubelet config file, edit the file to set tlsCertFile to the location 
-      of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile 
-      to the location of the corresponding private key file. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the below parameters in KUBELET_CERTIFICATE_ARGS variable. 
-      --tls-cert-file=<path/to/tls-certificate-file>  
-      --tls-private-key-file=<path/to/tls-key-file> 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: true
+      - id: 4.2.10
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cert-file
+              path: '{.tlsCertFile}'
+              set: true
+            - flag: --tls-private-key-file
+              path: '{.tlsPrivateKeyFile}'
+              set: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set tlsCertFile to the location
+          of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile
+          to the location of the corresponding private key file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.11
-    text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: true
-        compare:
-          op: eq
-          value: true
-      - flag: --rotate-certificates
-        path: '{.rotateCertificates}'
-        set: false
-      bin_op: or
-    remediation: |
-      If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
-      remove it altogether to use the default value. 
-      If using command line arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
-      variable. 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service 
-    scored: true
+      - id: 4.2.11
+        text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: true
+              compare:
+                op: eq
+                value: true
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
+          remove it altogether to use the default value.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
+          variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.12
-    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: RotateKubeletServerCertificate
-        path: '{.featureGates.RotateKubeletServerCertificate}'
-        set: true
-        compare:
-          op: eq
-          value: true
-    remediation: |
-      Edit the kubelet service file $kubeletsvc 
-      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. 
-      --feature-gates=RotateKubeletServerCertificate=true 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service   
-    scored: true
+      - id: 4.2.12
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              path: '{.featureGates.RotateKubeletServerCertificate}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+          --feature-gates=RotateKubeletServerCertificate=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
 
-  - id: 4.2.13
-    text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
-    audit: "/bin/ps -fC $kubeletbin"      
-    audit_config: "/bin/cat $kubeletconf"
-    tests:
-      test_items:
-      - flag: --tls-cipher-suites
-        path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
-        set: true
-        compare:
-          op: valid_elements
-          value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-    remediation: |
-      If using a Kubelet config file, edit the file to set TLSCipherSuites: to 
-      TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
-      or to a subset of these values. 
-      If using executable arguments, edit the kubelet service file 
-      $kubeletsvc on each worker node and 
-      set the --tls-cipher-suites parameter as follows, or to a subset of these values.  
-      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 
-      Based on your system, restart the kubelet service. For example: 
-      systemctl daemon-reload 
-      systemctl restart kubelet.service 
-    scored: false
+      - id: 4.2.13
+        text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cipher-suites
+              path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
+              set: true
+              compare:
+                op: valid_elements
+                value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+        remediation: |
+          If using a Kubelet config file, edit the file to set TLSCipherSuites: to
+          TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          or to a subset of these values.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the --tls-cipher-suites parameter as follows, or to a subset of these values.
+          --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
diff --git a/cfg/cis-1.5/policies.yaml b/cfg/cis-1.5/policies.yaml
index 5e24ec1..cb4340e 100644
--- a/cfg/cis-1.5/policies.yaml
+++ b/cfg/cis-1.5/policies.yaml
@@ -5,235 +5,235 @@ id: 5
 text: "Kubernetes Policies"
 type: "policies"
 groups:
-- id: 5.1
-  text: "RBAC and Service Accounts"
-  checks:
-  - id: 5.1.1
-    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
-    type: "manual"
-    remediation: |
-      Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
-      if they need this role or if they could use a role with fewer privileges. 
-      Where possible, first bind users to a lower privileged role and then remove the
-      clusterrolebinding to the cluster-admin role : 
-      kubectl delete clusterrolebinding [name]   
-    scored: false
-
-  - id: 5.1.2
-    text: "Minimize access to secrets (Not Scored)"
-    type: "manual"
-    remediation: |
-      Where possible, remove get, list and watch access to secret objects in the cluster. 
-    scored: false
-
-  - id: 5.1.3
-    text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
-    type: "manual"
-    remediation: |
-      Where possible replace any use of wildcards in clusterroles and roles with specific
-      objects or actions.
-    scored: false
-    
-  - id: 5.1.4
-    text: "Minimize access to create pods (Not Scored)"
-    type: "manual"
-    Remediation: |
-      Where possible, remove create access to pod objects in the cluster. 
-    scored: false
-
-  - id: 5.1.5
-    text: "Ensure that default service accounts are not actively used. (Scored)"
-    type: "manual"
-    remediation: |
-      Create explicit service accounts wherever a Kubernetes workload requires specific access
-      to the Kubernetes API server. 
-      Modify the configuration of each default service account to include this value
-      automountServiceAccountToken: false 
-    scored: true
-
-  - id: 5.1.6
-    text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
-    type: "manual"
-    remediation: |
-      Modify the definition of pods and service accounts which do not need to mount service
-      account tokens to disable it. 
-    scored: false
-
-- id: 5.2
-  text: "Pod Security Policies"
-  checks:
-  - id: 5.2.1
-    text: "Minimize the admission of privileged containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that
-      the .spec.privileged field is omitted or set to false. 
-    scored: false
-
-  - id: 5.2.2
-    text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.hostPID field is omitted or set to false. 
-    scored: true
-
-  - id: 5.2.3
-    text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.hostIPC field is omitted or set to false. 
-    scored: true
-
-  - id: 5.2.4
-    text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.hostNetwork field is omitted or set to false. 
-    scored: true
-
-  - id: 5.2.5
-    text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.allowPrivilegeEscalation field is omitted or set to false.
-    scored: true
-
-  - id: 5.2.6
-    text: "Minimize the admission of root containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of 
-      UIDs not including 0.
-    scored: false
-
-  - id: 5.2.7
-    text: "Minimize the admission of containers with the NET_RAW capability (Not Scored)"
-    type: "manual"
-    remediation: |
-      Create a PSP as described in the Kubernetes documentation, ensuring that the 
-      .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
-    scored: false
-
-  - id: 5.2.8
-    text: "Minimize the admission of containers with added capabilities (Not Scored)"
-    type: "manual"
-    remediation: |
-      Ensure that allowedCapabilities is not present in PSPs for the cluster unless 
-      it is set to an empty array. 
-    scored: false
-
-  - id: 5.2.9
-    text: "Minimize the admission of containers with capabilities assigned (Not Scored) "
-    type: "manual"
-    remediation: |
-      Review the use of capabilites in applications runnning on your cluster. Where a namespace
-      contains applicaions which do not require any Linux capabities to operate consider adding 
-      a PSP which forbids the admission of containers which do not drop all capabilities. 
-    scored: false
-
-- id: 5.3
-  text: "Network Policies and CNI"
-  checks:
-  - id: 5.3.1
-    text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
-    type: "manual"
-    remediation: |
-      If the CNI plugin in use does not support network policies, consideration should be given to
-      making use of a different plugin, or finding an alternate mechanism for restricting traffic
-      in the Kubernetes cluster. 
-    scored: false
-
-  - id: 5.3.2
-    text: "Ensure that all Namespaces have Network Policies defined (Scored)"
-    type: "manual"
-    remediation: |
-      Follow the documentation and create NetworkPolicy objects as you need them. 
-    scored: true
-
-- id: 5.4
-  text: "Secrets Management"
-  checks:
-  - id: 5.4.1
-    text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
-    type: "manual"
-    remediation: |
-      if possible, rewrite application code to read secrets from mounted secret files, rather than
-      from environment variables. 
-    scored: false
-
-  - id: 5.4.2
-    text: "Consider external secret storage (Not Scored)"
-    type: "manual"
-    remediation: |
-      Refer to the secrets management options offered by your cloud provider or a third-party
-      secrets management solution. 
-    scored: false
-
-- id: 5.5
-  text: "Extensible Admission Control"
-  checks:
-  - id: 5.5.1
-    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
-    type: "manual"
-    remediation: |
-      Follow the Kubernetes documentation and setup image provenance. 
-    scored: false
-
-- id: 5.6
-  text: "General Policies"
-  checks:
-  - id: 5.6.1
-    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
-    type: "manual"
-    remediation: |
-      Follow the documentation and create namespaces for objects in your deployment as you need
-      them. 
-    scored: false
-
-  - id: 5.6.2
-    text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
-    type: "manual"
-    remediation: |
-      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
-      would need to enable alpha features in the apiserver by passing "--feature-
-      gates=AllAlpha=true" argument. 
-      Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
-      parameter to "--feature-gates=AllAlpha=true" 
-      KUBE_API_ARGS="--feature-gates=AllAlpha=true" 
-      Based on your system, restart the kube-apiserver service. For example: 
-      systemctl restart kube-apiserver.service 
-      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
-      example is as below: 
-      apiVersion: v1 
-      kind: Pod 
-      metadata: 
-        name: trustworthy-pod   
-        annotations:   
-          seccomp.security.alpha.kubernetes.io/pod: docker/default 
-      spec: 
-        containers:   
-          - name: trustworthy-container       
-            image: sotrustworthy:latest 
-    scored: false
-
-  - id: 5.6.3
-    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
-    type: "manual"
-    remediation: |
-      Follow the Kubernetes documentation and apply security contexts to your pods. For a
-      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
-      Containers. 
-    scored: false
-
-  - id: 5.6.4
-    text: "The default namespace should not be used (Scored)"
-    type: "manual"
-    remediation: |
-      Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
-      resources and that all new resources are created in a specific namespace. 
-    scored: true
+  - id: 5.1
+    text: "RBAC and Service Accounts"
+    checks:
+      - id: 5.1.1
+        text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+        type: "manual"
+        remediation: |
+          Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
+          if they need this role or if they could use a role with fewer privileges.
+          Where possible, first bind users to a lower privileged role and then remove the
+          clusterrolebinding to the cluster-admin role :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 5.1.2
+        text: "Minimize access to secrets (Not Scored)"
+        type: "manual"
+        remediation: |
+          Where possible, remove get, list and watch access to secret objects in the cluster.
+        scored: false
+
+      - id: 5.1.3
+        text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
+        type: "manual"
+        remediation: |
+          Where possible replace any use of wildcards in clusterroles and roles with specific
+          objects or actions.
+        scored: false
+
+      - id: 5.1.4
+        text: "Minimize access to create pods (Not Scored)"
+        type: "manual"
+        Remediation: |
+          Where possible, remove create access to pod objects in the cluster.
+        scored: false
+
+      - id: 5.1.5
+        text: "Ensure that default service accounts are not actively used. (Scored)"
+        type: "manual"
+        remediation: |
+          Create explicit service accounts wherever a Kubernetes workload requires specific access
+          to the Kubernetes API server.
+          Modify the configuration of each default service account to include this value
+          automountServiceAccountToken: false
+        scored: true
+
+      - id: 5.1.6
+        text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
+        type: "manual"
+        remediation: |
+          Modify the definition of pods and service accounts which do not need to mount service
+          account tokens to disable it.
+        scored: false
+
+  - id: 5.2
+    text: "Pod Security Policies"
+    checks:
+      - id: 5.2.1
+        text: "Minimize the admission of privileged containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that
+          the .spec.privileged field is omitted or set to false.
+        scored: false
+
+      - id: 5.2.2
+        text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostPID field is omitted or set to false.
+        scored: true
+
+      - id: 5.2.3
+        text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostIPC field is omitted or set to false.
+        scored: true
+
+      - id: 5.2.4
+        text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostNetwork field is omitted or set to false.
+        scored: true
+
+      - id: 5.2.5
+        text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.allowPrivilegeEscalation field is omitted or set to false.
+        scored: true
+
+      - id: 5.2.6
+        text: "Minimize the admission of root containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
+          UIDs not including 0.
+        scored: false
+
+      - id: 5.2.7
+        text: "Minimize the admission of containers with the NET_RAW capability (Not Scored)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+        scored: false
+
+      - id: 5.2.8
+        text: "Minimize the admission of containers with added capabilities (Not Scored)"
+        type: "manual"
+        remediation: |
+          Ensure that allowedCapabilities is not present in PSPs for the cluster unless
+          it is set to an empty array.
+        scored: false
+
+      - id: 5.2.9
+        text: "Minimize the admission of containers with capabilities assigned (Not Scored) "
+        type: "manual"
+        remediation: |
+          Review the use of capabilites in applications runnning on your cluster. Where a namespace
+          contains applicaions which do not require any Linux capabities to operate consider adding
+          a PSP which forbids the admission of containers which do not drop all capabilities.
+        scored: false
+
+  - id: 5.3
+    text: "Network Policies and CNI"
+    checks:
+      - id: 5.3.1
+        text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
+        type: "manual"
+        remediation: |
+          If the CNI plugin in use does not support network policies, consideration should be given to
+          making use of a different plugin, or finding an alternate mechanism for restricting traffic
+          in the Kubernetes cluster.
+        scored: false
+
+      - id: 5.3.2
+        text: "Ensure that all Namespaces have Network Policies defined (Scored)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: true
+
+  - id: 5.4
+    text: "Secrets Management"
+    checks:
+      - id: 5.4.1
+        text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
+        type: "manual"
+        remediation: |
+          if possible, rewrite application code to read secrets from mounted secret files, rather than
+          from environment variables.
+        scored: false
+
+      - id: 5.4.2
+        text: "Consider external secret storage (Not Scored)"
+        type: "manual"
+        remediation: |
+          Refer to the secrets management options offered by your cloud provider or a third-party
+          secrets management solution.
+        scored: false
+
+  - id: 5.5
+    text: "Extensible Admission Control"
+    checks:
+      - id: 5.5.1
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and setup image provenance.
+        scored: false
+
+  - id: 5.6
+    text: "General Policies"
+    checks:
+      - id: 5.6.1
+        text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create namespaces for objects in your deployment as you need
+          them.
+        scored: false
+
+      - id: 5.6.2
+        text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
+        type: "manual"
+        remediation: |
+          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+          would need to enable alpha features in the apiserver by passing "--feature-
+          gates=AllAlpha=true" argument.
+          Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
+          parameter to "--feature-gates=AllAlpha=true"
+          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+          Based on your system, restart the kube-apiserver service. For example:
+          systemctl restart kube-apiserver.service
+          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+          example is as below:
+          apiVersion: v1
+          kind: Pod
+          metadata:
+            name: trustworthy-pod
+            annotations:
+              seccomp.security.alpha.kubernetes.io/pod: docker/default
+          spec:
+            containers:
+              - name: trustworthy-container
+                image: sotrustworthy:latest
+        scored: false
+
+      - id: 5.6.3
+        text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and apply security contexts to your pods. For a
+          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 5.6.4
+        text: "The default namespace should not be used (Scored)"
+        type: "manual"
+        remediation: |
+          Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+          resources and that all new resources are created in a specific namespace.
+        scored: true
diff --git a/cfg/config.yaml b/cfg/config.yaml
index 0c20d6d..cc390cb 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -88,7 +88,7 @@ node:
       - "/etc/kubernetes/pki/ca.crt"
       - "/etc/kubernetes/certs/ca.crt"
       - "/etc/kubernetes/cert/ca.pem"
-    svc: 
+    svc:
       # These paths must also be included
       #  in the 'confs' property below
       - "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
@@ -145,7 +145,7 @@ node:
 etcd:
   components:
     - etcd
-   
+
   etcd:
     bins:
       - "etcd"
diff --git a/cfg/node_only.yaml b/cfg/node_only.yaml
index 62351ae..d4bcba9 100644
--- a/cfg/node_only.yaml
+++ b/cfg/node_only.yaml
@@ -70,4 +70,4 @@ version_mapping:
   "1.16": "cis-1.5"
   "1.17": "cis-1.5"
   "ocp-3.10": "rh-0.7"
-  "ocp-3.11": "rh-0.7"
\ No newline at end of file
+  "ocp-3.11": "rh-0.7"
diff --git a/cfg/rh-0.7/config.yaml b/cfg/rh-0.7/config.yaml
index 289c512..4d4f563 100644
--- a/cfg/rh-0.7/config.yaml
+++ b/cfg/rh-0.7/config.yaml
@@ -6,7 +6,7 @@ master:
     bins:
       - openshift start master api
       - hypershift openshift-kube-apiserver
-     
+
   scheduler:
     bins:
       - "openshift start master controllers"
diff --git a/cfg/rh-0.7/master.yaml b/cfg/rh-0.7/master.yaml
index 3be26b6..2169685 100644
--- a/cfg/rh-0.7/master.yaml
+++ b/cfg/rh-0.7/master.yaml
@@ -1,1464 +1,1463 @@
----
-controls:
-version: 3.10
-id: 1
-text: "Securing the OpenShift Master"
-type: "master"
-groups:
-
-- id: 1
-  text: "Protecting the API Server"
-  checks:
-  - id: 1.1
-    text: "Maintain default behavior for anonymous access"
-    type: "skip"
-    scored: true
-
-  - id: 1.2
-    text: "Verify that the basic-auth-file method is not enabled"
-    audit: "grep -A2 basic-auth-file /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "--basic-auth-file"
-        compare:
-          op: eq
-          value: ""
-        set: false
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml and
-      remove the basic-auth-file entry.
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-           basic-auth-file:
-             - /path/to/any/file
-    scored: true
-
-  - id: 1.3
-    text: "Insecure Tokens"
-    type: "skip"
-    scored: true
-
-  - id: 1.4
-    text: "Secure communications between the API server and master nodes"
-    audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "kubeletClientInfo:"
-        compare:
-          op: eq
-          value: "kubeletClientInfo:"
-        set: true
-      - flag: "ca"
-        compare:
-          op: has
-          value: "ca-bundle.crt"
-        set: true
-      - flag: "certFile"
-        compare:
-          op: has
-          value: "master.kubelet-client.crt"
-        set: true
-      - flag: "keyFile"
-        compare:
-          op: has
-          value: "master.kubelet-client.key"
-        set: true
-      - flag: "port: 10250"
-        compare:
-          op: eq
-          value: "port: 10250"
-        set: true
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-      and change it to match the below.
-
-      kubeletClientInfo:
-        ca: ca-bundle.crt
-        certFile: master.kubelet-client.crt
-        keyFile: master.kubelet-client.key
-        port: 10250
-    scored: true
-
-  - id: 1.5
-    text: "Prevent insecure bindings"
-    audit: "grep -A2 insecure-bind-address /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "insecure-bind-address"
-        set: false
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-      and remove the insecure-bind-address entry.
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-           insecure-bind-address:
-           - 127.0.0.1
-    scored: true
-
-  - id: 1.6
-    text: "Prevent insecure port access"
-    audit: "grep -A2 insecure-port /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "insecure-port"
-        set: false
-    remediation: |
-     Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-     and remove the insecure-port entry.
-
-     kubernetesMasterConfig:
-       apiServerArguments:
-         insecure-port:
-         - 0
-    scored: true
-
-  - id: 1.7
-    text: "Use Secure Ports for API Server Traffic"
-    audit: "grep -A2 secure-port /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "secure-port"
-        set: false
-      - flag: "secure-port"
-        compare:
-          op: nothave
-          value: "0"
-        set: true
-    remediation: |
-     Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-     and either remove the secure-port parameter or set it to a different (non-zero)
-     desired port.
-
-     kubernetesMasterConfig:
-       apiServerArguments:
-         secure-port:
-         - 8443
-    scored: true
-
-  - id: 1.8
-    text: "Do not expose API server profiling data"
-    type: "skip"
-    scored: true
-
-  - id: 1.9
-    text: "Verify repair-malformed-updates argument for API compatibility"
-    audit: "grep -A2 repair-malformed-updates /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "repair-malformed-updates"
-        set: false
-      - flag: "repair-malformed-updates"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-     Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-     and remove the repair-malformed-updates entry or set repair-malformed-updates=true.
-    scored: true
-
-  - id: 1.10
-    text: "Verify that the AlwaysAdmit admission controller is disabled"
-    audit: "grep -A4 AlwaysAdmit /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "AlwaysAdmit"
-        set: false
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-      and remove the entry below.
-
-      AlwaysAdmit:
-        configuration:
-          kind: DefaultAdmissionConfig
-          apiVersion: v1
-          disable: false
-    scored: true
-
-  - id: 1.11
-    text: "Manage the AlwaysPullImages admission controller"
-    audit: "grep -A4 AlwaysPullImages /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "disable"
-        compare:
-          op: has
-          value: "false"
-        set: true
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-      and add the entry below.
-
-      admissionConfig:
-        pluginConfig:
-          AlwaysPullImages:
-            configuration:
-              kind: DefaultAdmissionConfig
-              apiVersion: v1
-              disable: false
-    scored: true
-
-  - id: 1.12
-    text: "Use Security Context Constraints instead of DenyEscalatingExec admission"
-    type: "skip"
-    scored: true
-
-  - id: 1.13
-    text: "Use Security Context Constraints instead of the SecurityContextDeny admission controller"
-    type: "skip"
-    scored: true
-
-  - id: 1.14
-    text: "Manage the NamespaceLifecycle admission controller"
-    audit: "grep -A4 NamespaceLifecycle /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "NamespaceLifecycle"
-        set: false
-    remediation: |
-      Edit the kubernetes master config file /etc/origin/master/master-config.yaml
-      and remove the following entry.
-
-      NamespaceLifecycle: 
-        configuration:
-          kind: DefaultAdmissionConfig
-          apiVersion: v1
-          disable: true
-    scored: true
-
-  - id: 1.15
-    text: "Configure API server auditing - audit log file path"
-    audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "enabled"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the following entry and restart the API server.
-
-      auditConfig:
-        auditFilePath: ""/etc/origin/master/audit-ocp.log""
-        enabled: true
-        maximumFileRetentionDays: 30
-        maximumFileSizeMegabytes: 10
-        maximumRetainedFiles: 10
-
-      Make the same changes in the inventory/ansible variables so the changes are not
-      lost when an upgrade occurs.
-    scored: true
-
-  - id: 1.16
-    text: "Configure API server auditing - audit log retention"
-    audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "maximumFileRetentionDays: 30"
-        compare:
-          op: has
-          value: "maximumFileRetentionDays"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml,
-      update the maximumFileRetentionDays entry and restart the API server.
-
-      auditConfig:
-        auditFilePath: ""/etc/origin/master/audit-ocp.log""
-        enabled: true
-        maximumFileRetentionDays: 30
-        maximumFileSizeMegabytes: 10
-        maximumRetainedFiles: 10
-
-      Make the same changes in the inventory/ansible variables so the changes are not
-      lost when an upgrade occurs.
-    scored: true
-
-  - id: 1.17
-    text: "Configure API server auditing - audit log backup retention"
-    audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "maximumRetainedFiles: 10"
-        compare:
-          op: has
-          value: "maximumRetainedFiles"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumRetainedFiles entry,
-      set enabled to true and restart the API server.
-
-      auditConfig:
-        auditFilePath: ""/etc/origin/master/audit-ocp.log""
-        enabled: true
-        maximumFileRetentionDays: 30
-        maximumFileSizeMegabytes: 10
-        maximumRetainedFiles: 10
-
-      Make the same changes in the inventory/ansible variables so the changes are not
-      lost when an upgrade occurs.
-    scored: true
-
-  - id: 1.18
-    text: "Configure audit log file size"
-    audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "maximumFileSizeMegabytes: 30"
-        compare:
-          op: has
-          value: "maximumFileSizeMegabytes"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumFileSizeMegabytes entry,
-      set enabled to true and restart the API server.
-
-      auditConfig:
-        auditFilePath: ""/etc/origin/master/audit-ocp.log""
-        enabled: true
-        maximumFileRetentionDays: 30
-        maximumFileSizeMegabytes: 10
-        maximumRetainedFiles: 10
-
-      Make the same changes in the inventory/ansible variables so the changes are not
-      lost when an upgrade occurs.
-    scored: true
-
-  - id: 1.19
-    text: "Verify that authorization-mode is not set to AlwaysAllow"
-    audit: "grep -A1 authorization-mode /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "authorization-mode"
-        set: false
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the authorization-mode
-      entry.
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-           authorization-mode:
-             - AllowAll
-    scored: true
-
-  - id: 1.20
-    text: "Verify that the token-auth-file flag is not set"
-    audit: "grep token-auth-file /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "token-auth-file"
-        set: false
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the token-auth-file
-      entry under apiserverArguments section.
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-           token-auth-file:
-             - /path/to/file
-    scored: true
-
-  - id: 1.21
-    text: "Verify the API server certificate authority"
-    audit: "grep -A1 kubelet-certificate-authority /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "kubelet-certificate-authority"
-        set: false
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the following
-      configuration under apiserverArguments section.
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-           kubelet-certificat-authority:
-             - /path/to/ca
-    scored: true
-
-  - id: 1.22
-    text: "Verify the API server client certificate and client key"
-    audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "keyFile"
-        compare:
-          op: has
-          value: "master.kubelet-client.key"
-        set: true
-      - flag: "certFile"
-        compare:
-          op: has
-          value: "master.kubelet-client.crt"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and add the following
-      configuration under kubeletClientInfo
-
-      kubeletClientInfo:
-        ca: ca-bundle.crt
-        certFile: master.kubelet-client.crt
-        keyFile: master.kubelet-client.key
-        port: 10250
-    scored: true
-
-  - id: 1.23
-    text: "Verify that the service account lookup flag is not set"
-    type: "skip"
-    scored: true
-
-  - id: 1.24
-    text: "Verify the PodSecurityPolicy is disabled to ensure use of SecurityContextConstraints"
-    type: "skip"
-    scored: true
-
-  - id: 1.25
-    text: "Verify that the service account key file argument is not set"
-    audit: "grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "privateKeyFile"
-        compare:
-          op: has
-          value: "serviceaccounts.private.key"
-        set: true
-      - flag: "serviceaccounts.public.key"
-        compare:
-          op: has
-          value: "serviceaccounts.public.key"
-        set: true
-    remediation: |
-      OpenShift API server does not use the service-account-key-file argument. 
-      Even if value is set in master-config.yaml, it will not be used to verify 
-      service account tokens, as it is in upstream Kubernetes. The ServiceAccount 
-      token authenticator is configured with serviceAccountConfig.publicKeyFiles in 
-      the master-config.yaml. OpenShift does not reuse the apiserver TLS key.
-
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set the privateKeyFile 
-      and publicKeyFile configuration under serviceAccountConfig.
-
-        serviceAccountConfig:
-          limitSecretReferences: false
-          managedNames:
-            - default
-            - builder
-            - deployer
-          masterCA: ca-bundle.crt
-          privateKeyFile: serviceaccounts.private.key
-          publicKeyFiles:
-            - serviceaccounts.public.key
-
-      Verify that privateKeyFile and publicKeyFile exist and set.
-    scored: true
-
-  - id: 1.26
-    text: "Verify the certificate and key used for communication with etcd"
-    audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "certFile"
-        compare:
-          op: has
-          value: "master.etcd-client.crt"
-        set: true
-      - flag: "keyFile"
-        compare:
-          op: has
-          value: "master.etcd-client.key"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile 
-      under etcdClientInfo like below.
-      
-        etcdClientInfo:
-          ca: master.etcd-ca.crt
-          certFile: master.etcd-client.crt
-          keyFile: master.etcd-client.key
-    scored: true
-
-  - id: 1.27
-    text: "Verify that the ServiceAccount admission controller is enabled"
-    audit: "grep -A4 ServiceAccount /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "ServiceAccount"
-        set: false
-      - flag: "disable"
-        compare:
-          op: has
-          value: "false"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable ServiceAccount
-      admission control policy.
-      
-        ServiceAccount: 
-          configuration:
-            kind: DefaultAdmissionConfig
-            apiVersion: v1
-            disable: false
-    scored: true
-
-  - id: 1.28
-    text: "Verify the certificate and key used to encrypt API server traffic"
-    audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "certFile"
-        compare:
-          op: has
-          value: "master.server.crt"
-        set: true
-      - flag: "keyFile"
-        compare:
-          op: has
-          value: "master.server.key"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile under servingInfo.
-
-        servingInfo:
-          bindAddress: 0.0.0.0:8443
-          bindNetwork: tcp4
-          certFile: master.server.crt
-          clientCA: ca.crt
-          keyFile: master.server.key
-          maxRequestsInFlight: 500
-          requestTimeoutSeconds: 3600
-    scored: true
-
-  - id: 1.29
-    text: "Verify that the --client-ca-file argument is not set"
-    audit: "grep client-ca-file /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "clientCA: ca.crt"
-        set: false
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set clientCA under servingInfo.
-
-        servingInfo:
-          bindAddress: 0.0.0.0:8443
-          bindNetwork: tcp4
-          certFile: master.server.crt
-          clientCA: ca.crt
-          keyFile: master.server.key
-          maxRequestsInFlight: 500
-          requestTimeoutSeconds: 3600
-    scored: true
-
-  - id: 1.30
-    text: "Verify the CA used for communication with etcd"
-    audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "ca"
-        compare:
-          op: has
-          value: "master.etcd-ca.crt"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set ca under etcdClientInfo.
-
-        etcdClientInfo:
-          ca: master.etcd-ca.crt
-          certFile: master.etcd-client.crt
-          keyFile: master.etcd-client.key
-    scored: true
-
-  - id: 1.31
-    text: "Verify that the authorization-mode argument is not set"
-    type: "skip"
-    scored: true
-
-  - id: 1.32
-    text: "Verify that the NodeRestriction admission controller is enabled"
-    audit: "grep -A4 NodeRestriction /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "NodeRestriction"
-        set: false
-      - flag: "disable"
-        compare:
-          op: has
-          value: "false"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable NodeRestriction ca under etcdClientInfo.
-
-        NodeRestriction:
-          configuration:
-            kind: DefaultAdmissionConfig
-            apiVersion: v1
-            disable: false
-    scored: true
-
-  - id: 1.33
-    text: "Configure encryption of data at rest in etcd datastore"
-    audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "experimental-encryption-provider-config:"
-        compare:
-          op: has
-          value: "experimental-encryption-provider-config:"
-        set: true
-    remediation: |
-      Follow the instructions in the documentation to configure encryption. 
-      https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html
-    scored: true
-
-  - id: 1.34
-    text: "Set the encryption provider to aescbc for etcd data at rest"
-    audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml | sed -n '2p' | awk '{ print $2 }' | xargs grep -A1 providers"
-    tests:
-      test_items:
-      - flag: "aescbc:"
-        compare:
-          op: has
-          value: "aescbc:"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set aescbc as the first provider in encryption provider config.
-      See https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html.
-    scored: true
-
-  - id: 1.35
-    text: "Enable the EventRateLimit plugin"
-    audit: "grep -A4 EventRateLimit /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "disable"
-        compare:
-          op: has
-          value: "false"
-        set: true
-    remediation: |
-      Follow the documentation to enable the EventRateLimit plugin.
-      https://docs.openshift.com/container-platform/3.10/architecture/additional_concepts/admission_controllers.html#admission-controllers-general-admission-rules 
-    scored: true
-
-  - id: 1.36
-    text: "Configure advanced auditing"
-    audit: "grep AdvancedAuditing /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "AdvancedAuditing"
-        compare:
-          op: eq
-          value: "true"
-        set: true
-      - flag: "AdvancedAuditing"
-        set: false
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable AdvancedAuditing,
-
-      kubernetesMasterConfig:
-        apiServerArguments:
-          feature-gates:
-            - AdvancedAuditing=true
-    scored: true
-
-  # Review 1.1.37 in Aquasec shared doc, the tests are net zero.
-  - id: 1.37
-    text: "Adjust the request timeout argument for your cluster resources"
-    audit: "grep request-timeout /etc/origin/master/master-config.yaml"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      change the request-timeout value in the  /etc/origin/master/master-config.yaml
-    scored: true
-
-
-- id: 2
-  text: "Scheduler"
-  checks:
-  - id: 2.1
-    text: "Verify that Scheduler profiling is not exposed to the web"
-    type: "skip"
-    scored: true
-
-
-- id: 3
-  text: "Controller Manager"
-  checks:
-  - id: 3.1
-    text: "Adjust the terminated-pod-gc-threshold argument as needed"
-    audit: "grep terminated-pod-gc-threshold -A1 /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-      - flag: "terminated-pod-gc-threshold:"
-        compare:
-          op: has
-          value: "12500"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml  and enable terminated-pod-gc-threshold.
-
-        kubernetesMasterConfig:
-          controllerArguments:
-             terminated-pod-gc-threshold:
-             - true
-
-      Enabling the "terminated-pod-gc-threshold" settings is optional.
-    scored: true
-
-  - id: 3.2
-    text: "Verify that Controller profiling is not exposed to the web"
-    type: "skip"
-    scored: true
-
-  - id: 3.3
-    text: "Verify that the --use-service-account-credentials argument is set to true"
-    audit: "grep -A2 use-service-account-credentials /etc/origin/master/master-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "use-service-account-credentials"
-        set: false
-      - flag: "true"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and set use-service-account-credentials
-      to true under controllerArguments section.
-
-      kubernetesMasterConfig:
-        controllerArguments:
-           use-service-account-credentials:
-             - true
-    scored: true
-
-  # Review 3.4
-  - id: 3.4
-    text: "Verify that the --service-account-private-key-file argument is set as appropriate"
-    audit: |
-      grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml | grep privateKeyFile;
-      grep -A2 service-account-private-key-file /etc/origin/master/master-config.yaml
-    tests:
-      bin_op: and
-      test_items:
-        - flag: "privateKeyFile: serviceaccounts.private.key"
-          compare:
-            op: has
-            value: "privateKeyFile"
-        - flag: "service-account-private-key-file"
-          set: false
-    remediation:
-      Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove service-account-private-key-file
-    scored: true
-
-  # Review 3.5
-  - id: 3.5
-    text: "Verify that the --root-ca-file argument is set as appropriate"
-    audit: "/bin/sh -c 'grep root-ca-file /etc/origin/master/master-config.yaml; grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml'"
-    tests:
-      bin_op: and
-      test_items:
-        - flag: "root-ca-file=/etc/origin/master/ca-bundle.crt"
-          compare:
-            op: has
-            value: "/etc/origin/master/ca-bundle.crt"
-          set: true
-      test_items:
-        - flag: "masterCA"
-          compare:
-            op: has
-            value: "ca-bundle.crt"
-          set: true
-    remediation:
-      Reset to OpenShift defaults OpenShift starts kube-controller-manager with
-      root-ca-file=/etc/origin/master/ca-bundle.crt by default.  OpenShift Advanced
-      Installation creates this certificate authority and configuration without any
-      configuration required.
-
-      https://docs.openshift.com/container-platform/3.10/admin_guide/service_accounts.html"
-    scored: true
-
-  - id: 3.6
-    text: "Verify that Security Context Constraints are applied to Your Pods and Containers"
-    type: "skip"
-    scored: false
-
-  - id: 3.7
-    text: "Manage certificate rotation"
-    audit: "grep -B3 RotateKubeletServerCertificate=true /etc/origin/master/master-config.yaml"
-    tests:
-      test_items:
-        - flag: "RotateKubeletServerCertificate"
-          compare:
-            op: eq
-            value: "true"
-          set: true
-    remediation:
-      If you decide not to enable the RotateKubeletServerCertificate feature,
-      be sure to use the Ansible playbooks provided with the OpenShift installer to
-      automate re-deploying certificates.
-    scored: true
-
-
-- id: 4
-  text: "Configuration Files"
-  checks:
-  - id: 4.1
-    text: "Verify the OpenShift default permissions for the API server pod specification file"
-    audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml"
-    tests:      
-      test_items:
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command.
-
-      chmod 600 /etc/origin/node/pods/apiserver.yaml
-    scored: true
-
-  - id: 4.2
-    text: "Verify the OpenShift default file ownership for the API server pod specification file"
-    audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/node/pods/apiserver.yaml
-    scored: true
-
-  - id: 4.3
-    text: "Verify the OpenShift default file permissions for the controller manager pod specification file"
-    audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
-    tests:      
-      test_items:
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chmod 600 /etc/origin/node/pods/controller.yaml
-    scored: true
-
-  - id: 4.4
-    text: "Verify the OpenShift default ownership for the controller manager pod specification file"
-    audit: "stat -c %U:%G /etc/origin/node/pods/controller.yaml"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/node/pods/controller.yaml
-    scored: true
-
-  - id: 4.5
-    text: "Verify the OpenShift default permissions for the scheduler pod specification file"
-    audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
-    tests:      
-      test_items:
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command.
-
-      chmod 600 stat -c %a /etc/origin/node/pods/controller.yaml
-    scored: true
-
-  - id: 4.6
-    text: "Verify the scheduler pod specification file ownership set by OpenShift"
-    audit: "stat -c %u:%g /etc/origin/node/pods/controller.yaml"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/node/pods/controller.yaml
-    scored: true
-
-  - id: 4.7
-    text: "Verify the OpenShift default etcd pod specification file permissions"
-    audit: "stat -c %a /etc/origin/node/pods/etcd.yaml"
-    tests:      
-      test_items:
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command.
-
-      chmod 600 /etc/origin/node/pods/etcd.yaml
-    scored: true
-
-  - id: 4.8
-    text: "Verify the OpenShift default etcd pod specification file ownership"
-    audit: "stat -c %U:%G /etc/origin/node/pods/etcd.yaml"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/node/pods/etcd.yaml
-    scored: true
-
-  - id: 4.9
-    text: "Verify the default OpenShift Container Network Interface file permissions"
-    audit: "stat -c %a /etc/origin/openvswitch/ /etc/cni/net.d/"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command.
-
-      chmod 644 -R /etc/origin/openvswitch/ /etc/cni/net.d/
-    scored: true
-
-  - id: 4.10
-    text: "Verify the default OpenShift Container Network Interface file ownership"
-    audit: "stat -c %U:%G /etc/origin/openvswitch/ /etc/cni/net.d/"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/openvswitch/ /etc/cni/net.d/
-    scored: true
-
-  - id: 4.11
-    text: "Verify the default OpenShift etcd data directory permissions"
-    audit: "stat -c %a /var/lib/etcd"
-    tests:
-      test_items:
-      - flag: "700"
-        compare:
-          op: eq
-          value: "700"
-        set: true
-    remediation: |
-      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
-      from the below command:
-      ps -ef | grep etcd
-      Run the below command (based on the etcd data directory found above). For example,
-      chmod 700 /var/lib/etcd
-    scored: true
-
-  - id: 4.12
-    text: "Verify the default OpenShift etcd data directory ownership"
-    audit: "stat -c %U:%G /var/lib/etcd"
-    tests:
-      test_items:
-      - flag: "etcd:etcd"
-        compare:
-          op: eq
-          value: "etcd:etcd"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown etcd:etcd /var/lib/etcd
-    scored: true
-
-  - id: 4.13
-    text: "Verify the default OpenShift admin.conf file permissions"
-    audit: "stat -c %a /etc/origin/master/admin.kubeconfig"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command.
-
-      chmod 644 /etc/origin/master/admin.kubeconfig"
-    scored: true
-
-  - id: 4.14
-    text: "Verify the default OpenShift admin.conf file ownership"
-    audit: "stat -c %U:%G /etc/origin/master/admin.kubeconfig"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/master/admin.kubeconfig
-    scored: true
-
-  - id: 4.15
-    text: "Verify the default OpenShift scheduler.conf file permissions"
-    audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command.
-
-      chmod 644 /etc/origin/master/openshift-master.kubeconfig
-    scored: true
-
-  - id: 4.16
-    text: "Verify the default OpenShift scheduler.conf file ownership"
-    audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/master/openshift-master.kubeconfig
-    scored: true
-
-  - id: 4.17
-    text: "Verify the default Openshift controller-manager.conf file permissions"
-    audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "644"
-        compare:
-          op: eq
-          value: "644"
-        set: true
-      - flag: "640"
-        compare:
-          op: eq
-          value: "640"
-        set: true
-      - flag: "600"
-        compare:
-          op: eq
-          value: "600"
-        set: true
-    remediation: |
-      Run the below command.
-
-      chmod 644 /etc/origin/master/openshift-master.kubeconfig
-    scored: true
-
-  - id: 4.18
-    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
-    audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
-    tests:
-      test_items:
-      - flag: "root:root"
-        compare:
-          op: eq
-          value: "root:root"
-        set: true
-    remediation: |
-      Run the below command on the master node.
-
-      chown root:root /etc/origin/master/openshift-master.kubeconfig
-    scored: true
-
-
-- id: 5
-  text: "Etcd"
-  checks:
-  - id: 5.1
-    text: "Verify the default OpenShift cert-file and key-file configuration"
-    audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_CERT_FILE=/etc/etcd/server.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep etcd_key_file=/etc/etcd/server.key /proc/1/environ; grep ETCD_CERT_FILE=/etc/etcd/server.crt /etc/etcd/etcd.conf; grep ETCD_KEY_FILE=/etc/etcd/server.key /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "Binary file /proc/1/environ matches"
-        compare:
-          op: has
-          value: "Binary file /proc/1/environ matches"
-        set: true
-      - flag: "ETCD_CERT_FILE=/etc/etcd/server.crt"
-        compare:
-          op: has
-          value: "ETCD_CERT_FILE=/etc/etcd/server.crt"
-        set: true
-      - flag: "ETCD_KEY_FILE=/etc/etcd/server.key"
-        compare:
-          op: has
-          value: "ETCD_KEY_FILE=/etc/etcd/server.key"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.2
-    text: "Verify the default OpenShift setting for the client-cert-auth argument"
-    audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "Binary file /proc/1/environ matches"
-        compare:
-          op: has
-          value: "Binary file /proc/1/environ matches"
-        set: true
-      - flag: "ETCD_CLIENT_CERT_AUTH=true"
-        compare:
-          op: has
-          value: "ETCD_CLIENT_CERT_AUTH=true"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.3
-    text: "Verify the OpenShift default values for etcd_auto_tls"
-    audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_AUTO_TLS /proc/1/environ; grep ETCD_AUTO_TLS /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "ETCD_AUTO_TLS=false"
-        compare:
-          op: has
-          value: "ETCD_AUTO_TLS=false"
-        set: true
-      - flag: "#ETCD_AUTO_TLS"
-        compare:
-          op: has
-          value: "#ETCD_AUTO_TLS"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.4
-    text: "Verify the OpenShift default peer-cert-file and peer-key-file arguments for etcd"
-    audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep ETCD_PEER_KEY_FILE=/etc/etcd/peer.key /proc/1/environ; grep ETCD_PEER_CERT_FILE /etc/etcd/etcd.conf; grep ETCD_PEER_KEY_FILE /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "Binary file /proc/1/environ matches"
-        compare:
-          op: has
-          value: "Binary file /proc/1/environ matches"
-        set: true
-      - flag: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt"
-        compare:
-          op: has
-          value: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt"
-        set: true
-      - flag: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key"
-        compare:
-          op: has
-          value: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.5
-    text: "Verify the OpenShift default configuration for the peer-client-cert-auth"
-    audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_PEER_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "Binary file /proc/1/environ matches"
-        compare:
-          op: has
-          value: "Binary file /proc/1/environ matches"
-        set: true
-      - flag: "ETCD_PEER_CLIENT_CERT_AUTH=true"
-        compare:
-          op: has
-          value: "ETCD_PEER_CLIENT_CERT_AUTH=true"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.6
-    text: "Verify the OpenShift default configuration for the peer-auto-tls argument"
-    audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_AUTO_TLS /proc/1/environ; grep ETCD_PEER_AUTO_TLS /etc/etcd/etcd.conf'"
-    tests:
-      bin_op: and
-      test_items:
-      - flag: "Binary file /proc/1/environ matches"
-        compare:
-          op: has
-          value: "Binary file /proc/1/environ matches"
-        set: true
-      - flag: "#ETCD_PEER_AUTO_TLS=false"
-        compare:
-          op: has
-          value: "#ETCD_PEER_AUTO_TLS=false"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: true
-
-  - id: 5.7
-    text: "Optionally modify the wal-dir argument"
-    type: "skip"
-    scored: true
-
-  - id: 5.8
-    text: "Optionally modify the max-wals argument"
-    type: "skip"
-    scored: true
-
-  - id: 5.9
-    text: "Verify the OpenShift default configuration for the etcd Certificate Authority"
-    audit: "openssl x509 -in /etc/origin/master/master.etcd-ca.crt -subject -issuer -noout | sed 's/@/ /'"
-    tests:
-      test_items:
-      - flag: "issuer= /CN=etcd-signer"
-        compare:
-          op: has
-          value: "issuer= /CN=etcd-signer"
-        set: true
-    remediation: |
-      Reset to the OpenShift default configuration.
-    scored: false
-
-
-- id: 6
-  text: "General Security Primitives"
-  checks:
-  - id: 6.1
-    text: "Ensure that the cluster-admin role is only used where required"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Review users, groups, serviceaccounts bound to cluster-admin:
-      oc get clusterrolebindings | grep cluster-admin
-
-      Review users and groups bound to cluster-admin and decide whether they require
-      such access. Consider creating least-privilege roles for users and service accounts
-    scored: false
-
-  - id: 6.2
-    text: "Verify Security Context Constraints as in use"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Review Security Context Constraints:
-      oc get scc
-
-      Use OpenShift's Security Context Constraint feature, which has been contributed
-      to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
-      OpenShift ships with two SCCs: restricted and privileged.
-
-      The two default SCCs will be created when the master is started. The restricted
-      SCC is granted to all authenticated users by default.
-
-       https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html"
-    scored: false
-
-  - id: 6.3
-    text: "Use OpenShift projects to maintain boundaries between resources"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Review projects:
-      oc get projects
-    scored: false
-
-  - id: 6.4
-    text: "Create network segmentation using the Multi-tenant plugin or Network Policies"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Verify on masters the plugin being used:
-      grep networkPluginName /etc/origin/master/master-config.yaml
-
-      OpenShift provides multi-tenant networking isolation (using Open vSwich and
-      vXLAN), to segregate network traffic between containers belonging to different
-      tenants (users or applications) while running on a shared cluster. Red Hat also
-      works with 3rd-party SDN vendors to provide the same level of capabilities
-      integrated with OpenShift. OpenShift SDN is included a part of OpenShift
-      subscription.
-
-      OpenShift supports Kubernetes NetworkPolicy. Administrator must configure
-      NetworkPolicies if desired.
-
-      https://docs.openshift.com/container-platform/3.10/architecture/networking/sdn.html#architecture-additional-concepts-sdn
-
-      Ansible Inventory variable: os_sdn_network_plugin_name:
-      https://docs.openshift.com/container-platform/3.10/install/configuring_inventory_file.html
-    scored: false
-
-  - id: 6.5
-    text: "Enable seccomp and configure custom Security Context Constraints"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Verify SCCs that have been configured with seccomp:
-      oc get scc -ocustom-columns=NAME:.metadata.name,SECCOMP-PROFILES:.seccompProfiles
-
-      OpenShift does not enable seccomp by default. To configure seccomp profiles that
-      are applied to pods run by the SCC, follow the instructions in the
-      documentation:
-
-      https://docs.openshift.com/container-platform/3.9/admin_guide/seccomp.html#admin-guide-seccomp
-    scored: false
-
-  - id: 6.6
-    text: "Review Security Context Constraints"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Review SCCs:
-      oc describe scc
-
-      Use OpenShift's Security Context Constraint feature, which has been contributed
-      to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
-
-      OpenShift ships with two SCCs: restricted and privileged. The two default SCCs
-      will be created when the master is started. The restricted SCC is granted to
-      all authenticated users by default.
-
-      All pods are run under the restricted SCC by default. Running a pod under any
-      other SCC requires an account with cluster admin capabilities to grant access
-      for the service account.
-
-      SecurityContextConstraints limit what securityContext is applied to pods and
-      containers.
-
-      https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html
-    scored: false
-
-  - id: 6.7
-    text: "Manage Image Provenance using ImagePolicyWebhook admission controller"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      Review imagePolicyConfig in /etc/origin/master/master-config.yaml.
-    scored: false
-
-  - id: 6.8
-    text: "Configure Network policies as appropriate"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      If ovs-networkplugin is used, review network policies:
-      oc get networkpolicies
-
-      OpenShift supports Kubernetes NetworkPolicy via ovs-networkpolicy plugin.
-      If choosing ovs-multitenant plugin, each namespace is isolated in its own
-      netnamespace by default.
-    scored: false
-
-  - id: 6.9
-    text: "Use Security Context Constraints as compensating controls for privileged containers"
-    type: "manual"
-    remediation: |
-      [Manual test]
-      1) Determine all sccs allowing privileged containers:
-         oc get scc -ocustom-columns=NAME:.metadata.name,ALLOWS_PRIVILEGED:.allowPrivilegedContainer
-      2) Review users and groups assigned to sccs allowing priviliged containers:
-         oc describe sccs <from (1)>
-
-      Use OpenShift's Security Context Constraint feature, which has been contributed
-      to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
-
-      OpenShift ships with two SCCs: restricted and privileged. The two default SCCs
-      will be created when the master is started. The restricted SCC is granted to all
-      authenticated users by default.
-
-      Similar scenarios are documented in the SCC
-      documentation, which outlines granting SCC access to specific serviceaccounts.
-      Administrators may create least-restrictive SCCs based on individual container
-      needs.
-
-      For example, if a container only requires running as the root user, the anyuid
-      SCC can be used, which will not expose additional access granted by running
-      privileged containers.
-
-      https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html
-    scored: false
+---
+controls:
+version: 3.10
+id: 1
+text: "Securing the OpenShift Master"
+type: "master"
+groups:
+
+  - id: 1
+    text: "Protecting the API Server"
+    checks:
+      - id: 1.1
+        text: "Maintain default behavior for anonymous access"
+        type: "skip"
+        scored: true
+
+      - id: 1.2
+        text: "Verify that the basic-auth-file method is not enabled"
+        audit: "grep -A2 basic-auth-file /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "--basic-auth-file"
+              compare:
+                op: eq
+                value: ""
+              set: false
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml and
+          remove the basic-auth-file entry.
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+               basic-auth-file:
+                 - /path/to/any/file
+        scored: true
+
+      - id: 1.3
+        text: "Insecure Tokens"
+        type: "skip"
+        scored: true
+
+      - id: 1.4
+        text: "Secure communications between the API server and master nodes"
+        audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "kubeletClientInfo:"
+              compare:
+                op: eq
+                value: "kubeletClientInfo:"
+              set: true
+            - flag: "ca"
+              compare:
+                op: has
+                value: "ca-bundle.crt"
+              set: true
+            - flag: "certFile"
+              compare:
+                op: has
+                value: "master.kubelet-client.crt"
+              set: true
+            - flag: "keyFile"
+              compare:
+                op: has
+                value: "master.kubelet-client.key"
+              set: true
+            - flag: "port: 10250"
+              compare:
+                op: eq
+                value: "port: 10250"
+              set: true
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+          and change it to match the below.
+
+          kubeletClientInfo:
+            ca: ca-bundle.crt
+            certFile: master.kubelet-client.crt
+            keyFile: master.kubelet-client.key
+            port: 10250
+        scored: true
+
+      - id: 1.5
+        text: "Prevent insecure bindings"
+        audit: "grep -A2 insecure-bind-address /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "insecure-bind-address"
+              set: false
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+          and remove the insecure-bind-address entry.
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+               insecure-bind-address:
+               - 127.0.0.1
+        scored: true
+
+      - id: 1.6
+        text: "Prevent insecure port access"
+        audit: "grep -A2 insecure-port /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "insecure-port"
+              set: false
+        remediation: |
+         Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+         and remove the insecure-port entry.
+
+         kubernetesMasterConfig:
+           apiServerArguments:
+             insecure-port:
+             - 0
+        scored: true
+
+      - id: 1.7
+        text: "Use Secure Ports for API Server Traffic"
+        audit: "grep -A2 secure-port /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "secure-port"
+              set: false
+            - flag: "secure-port"
+              compare:
+                op: nothave
+                value: "0"
+              set: true
+        remediation: |
+         Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+         and either remove the secure-port parameter or set it to a different (non-zero)
+         desired port.
+
+         kubernetesMasterConfig:
+           apiServerArguments:
+             secure-port:
+             - 8443
+        scored: true
+
+      - id: 1.8
+        text: "Do not expose API server profiling data"
+        type: "skip"
+        scored: true
+
+      - id: 1.9
+        text: "Verify repair-malformed-updates argument for API compatibility"
+        audit: "grep -A2 repair-malformed-updates /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "repair-malformed-updates"
+              set: false
+            - flag: "repair-malformed-updates"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+         Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+         and remove the repair-malformed-updates entry or set repair-malformed-updates=true.
+        scored: true
+
+      - id: 1.10
+        text: "Verify that the AlwaysAdmit admission controller is disabled"
+        audit: "grep -A4 AlwaysAdmit /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "AlwaysAdmit"
+              set: false
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+          and remove the entry below.
+
+          AlwaysAdmit:
+            configuration:
+              kind: DefaultAdmissionConfig
+              apiVersion: v1
+              disable: false
+        scored: true
+
+      - id: 1.11
+        text: "Manage the AlwaysPullImages admission controller"
+        audit: "grep -A4 AlwaysPullImages /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "disable"
+              compare:
+                op: has
+                value: "false"
+              set: true
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+          and add the entry below.
+
+          admissionConfig:
+            pluginConfig:
+              AlwaysPullImages:
+                configuration:
+                  kind: DefaultAdmissionConfig
+                  apiVersion: v1
+                  disable: false
+        scored: true
+
+      - id: 1.12
+        text: "Use Security Context Constraints instead of DenyEscalatingExec admission"
+        type: "skip"
+        scored: true
+
+      - id: 1.13
+        text: "Use Security Context Constraints instead of the SecurityContextDeny admission controller"
+        type: "skip"
+        scored: true
+
+      - id: 1.14
+        text: "Manage the NamespaceLifecycle admission controller"
+        audit: "grep -A4 NamespaceLifecycle /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "NamespaceLifecycle"
+              set: false
+        remediation: |
+          Edit the kubernetes master config file /etc/origin/master/master-config.yaml
+          and remove the following entry.
+
+          NamespaceLifecycle:
+            configuration:
+              kind: DefaultAdmissionConfig
+              apiVersion: v1
+              disable: true
+        scored: true
+
+      - id: 1.15
+        text: "Configure API server auditing - audit log file path"
+        audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "enabled"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the following entry and restart the API server.
+
+          auditConfig:
+            auditFilePath: ""/etc/origin/master/audit-ocp.log""
+            enabled: true
+            maximumFileRetentionDays: 30
+            maximumFileSizeMegabytes: 10
+            maximumRetainedFiles: 10
+
+          Make the same changes in the inventory/ansible variables so the changes are not
+          lost when an upgrade occurs.
+        scored: true
+
+      - id: 1.16
+        text: "Configure API server auditing - audit log retention"
+        audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "maximumFileRetentionDays: 30"
+              compare:
+                op: has
+                value: "maximumFileRetentionDays"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml,
+          update the maximumFileRetentionDays entry and restart the API server.
+
+          auditConfig:
+            auditFilePath: ""/etc/origin/master/audit-ocp.log""
+            enabled: true
+            maximumFileRetentionDays: 30
+            maximumFileSizeMegabytes: 10
+            maximumRetainedFiles: 10
+
+          Make the same changes in the inventory/ansible variables so the changes are not
+          lost when an upgrade occurs.
+        scored: true
+
+      - id: 1.17
+        text: "Configure API server auditing - audit log backup retention"
+        audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "maximumRetainedFiles: 10"
+              compare:
+                op: has
+                value: "maximumRetainedFiles"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumRetainedFiles entry,
+          set enabled to true and restart the API server.
+
+          auditConfig:
+            auditFilePath: ""/etc/origin/master/audit-ocp.log""
+            enabled: true
+            maximumFileRetentionDays: 30
+            maximumFileSizeMegabytes: 10
+            maximumRetainedFiles: 10
+
+          Make the same changes in the inventory/ansible variables so the changes are not
+          lost when an upgrade occurs.
+        scored: true
+
+      - id: 1.18
+        text: "Configure audit log file size"
+        audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "maximumFileSizeMegabytes: 30"
+              compare:
+                op: has
+                value: "maximumFileSizeMegabytes"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumFileSizeMegabytes entry,
+          set enabled to true and restart the API server.
+
+          auditConfig:
+            auditFilePath: ""/etc/origin/master/audit-ocp.log""
+            enabled: true
+            maximumFileRetentionDays: 30
+            maximumFileSizeMegabytes: 10
+            maximumRetainedFiles: 10
+
+          Make the same changes in the inventory/ansible variables so the changes are not
+          lost when an upgrade occurs.
+        scored: true
+
+      - id: 1.19
+        text: "Verify that authorization-mode is not set to AlwaysAllow"
+        audit: "grep -A1 authorization-mode /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "authorization-mode"
+              set: false
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the authorization-mode
+          entry.
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+               authorization-mode:
+                 - AllowAll
+        scored: true
+
+      - id: 1.20
+        text: "Verify that the token-auth-file flag is not set"
+        audit: "grep token-auth-file /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "token-auth-file"
+              set: false
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the token-auth-file
+          entry under apiserverArguments section.
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+               token-auth-file:
+                 - /path/to/file
+        scored: true
+
+      - id: 1.21
+        text: "Verify the API server certificate authority"
+        audit: "grep -A1 kubelet-certificate-authority /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "kubelet-certificate-authority"
+              set: false
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the following
+          configuration under apiserverArguments section.
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+               kubelet-certificat-authority:
+                 - /path/to/ca
+        scored: true
+
+      - id: 1.22
+        text: "Verify the API server client certificate and client key"
+        audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "keyFile"
+              compare:
+                op: has
+                value: "master.kubelet-client.key"
+              set: true
+            - flag: "certFile"
+              compare:
+                op: has
+                value: "master.kubelet-client.crt"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and add the following
+          configuration under kubeletClientInfo
+
+          kubeletClientInfo:
+            ca: ca-bundle.crt
+            certFile: master.kubelet-client.crt
+            keyFile: master.kubelet-client.key
+            port: 10250
+        scored: true
+
+      - id: 1.23
+        text: "Verify that the service account lookup flag is not set"
+        type: "skip"
+        scored: true
+
+      - id: 1.24
+        text: "Verify the PodSecurityPolicy is disabled to ensure use of SecurityContextConstraints"
+        type: "skip"
+        scored: true
+
+      - id: 1.25
+        text: "Verify that the service account key file argument is not set"
+        audit: "grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "privateKeyFile"
+              compare:
+                op: has
+                value: "serviceaccounts.private.key"
+              set: true
+            - flag: "serviceaccounts.public.key"
+              compare:
+                op: has
+                value: "serviceaccounts.public.key"
+              set: true
+        remediation: |
+          OpenShift API server does not use the service-account-key-file argument.
+          Even if value is set in master-config.yaml, it will not be used to verify
+          service account tokens, as it is in upstream Kubernetes. The ServiceAccount
+          token authenticator is configured with serviceAccountConfig.publicKeyFiles in
+          the master-config.yaml. OpenShift does not reuse the apiserver TLS key.
+
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set the privateKeyFile
+          and publicKeyFile configuration under serviceAccountConfig.
+
+            serviceAccountConfig:
+              limitSecretReferences: false
+              managedNames:
+                - default
+                - builder
+                - deployer
+              masterCA: ca-bundle.crt
+              privateKeyFile: serviceaccounts.private.key
+              publicKeyFiles:
+                - serviceaccounts.public.key
+
+          Verify that privateKeyFile and publicKeyFile exist and set.
+        scored: true
+
+      - id: 1.26
+        text: "Verify the certificate and key used for communication with etcd"
+        audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "certFile"
+              compare:
+                op: has
+                value: "master.etcd-client.crt"
+              set: true
+            - flag: "keyFile"
+              compare:
+                op: has
+                value: "master.etcd-client.key"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile
+          under etcdClientInfo like below.
+
+            etcdClientInfo:
+              ca: master.etcd-ca.crt
+              certFile: master.etcd-client.crt
+              keyFile: master.etcd-client.key
+        scored: true
+
+      - id: 1.27
+        text: "Verify that the ServiceAccount admission controller is enabled"
+        audit: "grep -A4 ServiceAccount /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "ServiceAccount"
+              set: false
+            - flag: "disable"
+              compare:
+                op: has
+                value: "false"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable ServiceAccount
+          admission control policy.
+
+            ServiceAccount:
+              configuration:
+                kind: DefaultAdmissionConfig
+                apiVersion: v1
+                disable: false
+        scored: true
+
+      - id: 1.28
+        text: "Verify the certificate and key used to encrypt API server traffic"
+        audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "certFile"
+              compare:
+                op: has
+                value: "master.server.crt"
+              set: true
+            - flag: "keyFile"
+              compare:
+                op: has
+                value: "master.server.key"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile under servingInfo.
+
+            servingInfo:
+              bindAddress: 0.0.0.0:8443
+              bindNetwork: tcp4
+              certFile: master.server.crt
+              clientCA: ca.crt
+              keyFile: master.server.key
+              maxRequestsInFlight: 500
+              requestTimeoutSeconds: 3600
+        scored: true
+
+      - id: 1.29
+        text: "Verify that the --client-ca-file argument is not set"
+        audit: "grep client-ca-file /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "clientCA: ca.crt"
+              set: false
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set clientCA under servingInfo.
+
+            servingInfo:
+              bindAddress: 0.0.0.0:8443
+              bindNetwork: tcp4
+              certFile: master.server.crt
+              clientCA: ca.crt
+              keyFile: master.server.key
+              maxRequestsInFlight: 500
+              requestTimeoutSeconds: 3600
+        scored: true
+
+      - id: 1.30
+        text: "Verify the CA used for communication with etcd"
+        audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "ca"
+              compare:
+                op: has
+                value: "master.etcd-ca.crt"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set ca under etcdClientInfo.
+
+            etcdClientInfo:
+              ca: master.etcd-ca.crt
+              certFile: master.etcd-client.crt
+              keyFile: master.etcd-client.key
+        scored: true
+
+      - id: 1.31
+        text: "Verify that the authorization-mode argument is not set"
+        type: "skip"
+        scored: true
+
+      - id: 1.32
+        text: "Verify that the NodeRestriction admission controller is enabled"
+        audit: "grep -A4 NodeRestriction /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "NodeRestriction"
+              set: false
+            - flag: "disable"
+              compare:
+                op: has
+                value: "false"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable NodeRestriction ca under etcdClientInfo.
+
+            NodeRestriction:
+              configuration:
+                kind: DefaultAdmissionConfig
+                apiVersion: v1
+                disable: false
+        scored: true
+
+      - id: 1.33
+        text: "Configure encryption of data at rest in etcd datastore"
+        audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "experimental-encryption-provider-config:"
+              compare:
+                op: has
+                value: "experimental-encryption-provider-config:"
+              set: true
+        remediation: |
+          Follow the instructions in the documentation to configure encryption.
+          https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html
+        scored: true
+
+      - id: 1.34
+        text: "Set the encryption provider to aescbc for etcd data at rest"
+        audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml | sed -n '2p' | awk '{ print $2 }' | xargs grep -A1 providers"
+        tests:
+          test_items:
+            - flag: "aescbc:"
+              compare:
+                op: has
+                value: "aescbc:"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set aescbc as the first provider in encryption provider config.
+          See https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html.
+        scored: true
+
+      - id: 1.35
+        text: "Enable the EventRateLimit plugin"
+        audit: "grep -A4 EventRateLimit /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "disable"
+              compare:
+                op: has
+                value: "false"
+              set: true
+        remediation: |
+          Follow the documentation to enable the EventRateLimit plugin.
+          https://docs.openshift.com/container-platform/3.10/architecture/additional_concepts/admission_controllers.html#admission-controllers-general-admission-rules
+        scored: true
+
+      - id: 1.36
+        text: "Configure advanced auditing"
+        audit: "grep AdvancedAuditing /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "AdvancedAuditing"
+              compare:
+                op: eq
+                value: "true"
+              set: true
+            - flag: "AdvancedAuditing"
+              set: false
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable AdvancedAuditing,
+
+          kubernetesMasterConfig:
+            apiServerArguments:
+              feature-gates:
+                - AdvancedAuditing=true
+        scored: true
+
+      # Review 1.1.37 in Aquasec shared doc, the tests are net zero.
+      - id: 1.37
+        text: "Adjust the request timeout argument for your cluster resources"
+        audit: "grep request-timeout /etc/origin/master/master-config.yaml"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          change the request-timeout value in the  /etc/origin/master/master-config.yaml
+        scored: true
+
+
+  - id: 2
+    text: "Scheduler"
+    checks:
+      - id: 2.1
+        text: "Verify that Scheduler profiling is not exposed to the web"
+        type: "skip"
+        scored: true
+
+
+  - id: 3
+    text: "Controller Manager"
+    checks:
+      - id: 3.1
+        text: "Adjust the terminated-pod-gc-threshold argument as needed"
+        audit: "grep terminated-pod-gc-threshold -A1 /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "terminated-pod-gc-threshold:"
+              compare:
+                op: has
+                value: "12500"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml  and enable terminated-pod-gc-threshold.
+
+            kubernetesMasterConfig:
+              controllerArguments:
+                 terminated-pod-gc-threshold:
+                 - true
+
+          Enabling the "terminated-pod-gc-threshold" settings is optional.
+        scored: true
+
+      - id: 3.2
+        text: "Verify that Controller profiling is not exposed to the web"
+        type: "skip"
+        scored: true
+
+      - id: 3.3
+        text: "Verify that the --use-service-account-credentials argument is set to true"
+        audit: "grep -A2 use-service-account-credentials /etc/origin/master/master-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "use-service-account-credentials"
+              set: false
+            - flag: "true"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and set use-service-account-credentials
+          to true under controllerArguments section.
+
+          kubernetesMasterConfig:
+            controllerArguments:
+               use-service-account-credentials:
+                 - true
+        scored: true
+
+      # Review 3.4
+      - id: 3.4
+        text: "Verify that the --service-account-private-key-file argument is set as appropriate"
+        audit: |
+          grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml | grep privateKeyFile;
+          grep -A2 service-account-private-key-file /etc/origin/master/master-config.yaml
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "privateKeyFile: serviceaccounts.private.key"
+              compare:
+                op: has
+                value: "privateKeyFile"
+            - flag: "service-account-private-key-file"
+              set: false
+        remediation:
+          Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove service-account-private-key-file
+        scored: true
+
+      # Review 3.5
+      - id: 3.5
+        text: "Verify that the --root-ca-file argument is set as appropriate"
+        audit: "/bin/sh -c 'grep root-ca-file /etc/origin/master/master-config.yaml; grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "root-ca-file=/etc/origin/master/ca-bundle.crt"
+              compare:
+                op: has
+                value: "/etc/origin/master/ca-bundle.crt"
+              set: true
+            - flag: "masterCA"
+              compare:
+                op: has
+                value: "ca-bundle.crt"
+              set: true
+        remediation:
+          Reset to OpenShift defaults OpenShift starts kube-controller-manager with
+          root-ca-file=/etc/origin/master/ca-bundle.crt by default.  OpenShift Advanced
+          Installation creates this certificate authority and configuration without any
+          configuration required.
+
+          https://docs.openshift.com/container-platform/3.10/admin_guide/service_accounts.html"
+        scored: true
+
+      - id: 3.6
+        text: "Verify that Security Context Constraints are applied to Your Pods and Containers"
+        type: "skip"
+        scored: false
+
+      - id: 3.7
+        text: "Manage certificate rotation"
+        audit: "grep -B3 RotateKubeletServerCertificate=true /etc/origin/master/master-config.yaml"
+        tests:
+          test_items:
+            - flag: "RotateKubeletServerCertificate"
+              compare:
+                op: eq
+                value: "true"
+              set: true
+        remediation:
+          If you decide not to enable the RotateKubeletServerCertificate feature,
+          be sure to use the Ansible playbooks provided with the OpenShift installer to
+          automate re-deploying certificates.
+        scored: true
+
+
+  - id: 4
+    text: "Configuration Files"
+    checks:
+      - id: 4.1
+        text: "Verify the OpenShift default permissions for the API server pod specification file"
+        audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml"
+        tests:
+          test_items:
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 600 /etc/origin/node/pods/apiserver.yaml
+        scored: true
+
+      - id: 4.2
+        text: "Verify the OpenShift default file ownership for the API server pod specification file"
+        audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/node/pods/apiserver.yaml
+        scored: true
+
+      - id: 4.3
+        text: "Verify the OpenShift default file permissions for the controller manager pod specification file"
+        audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
+        tests:
+          test_items:
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chmod 600 /etc/origin/node/pods/controller.yaml
+        scored: true
+
+      - id: 4.4
+        text: "Verify the OpenShift default ownership for the controller manager pod specification file"
+        audit: "stat -c %U:%G /etc/origin/node/pods/controller.yaml"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/node/pods/controller.yaml
+        scored: true
+
+      - id: 4.5
+        text: "Verify the OpenShift default permissions for the scheduler pod specification file"
+        audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
+        tests:
+          test_items:
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 600 stat -c %a /etc/origin/node/pods/controller.yaml
+        scored: true
+
+      - id: 4.6
+        text: "Verify the scheduler pod specification file ownership set by OpenShift"
+        audit: "stat -c %u:%g /etc/origin/node/pods/controller.yaml"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/node/pods/controller.yaml
+        scored: true
+
+      - id: 4.7
+        text: "Verify the OpenShift default etcd pod specification file permissions"
+        audit: "stat -c %a /etc/origin/node/pods/etcd.yaml"
+        tests:
+          test_items:
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 600 /etc/origin/node/pods/etcd.yaml
+        scored: true
+
+      - id: 4.8
+        text: "Verify the OpenShift default etcd pod specification file ownership"
+        audit: "stat -c %U:%G /etc/origin/node/pods/etcd.yaml"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/node/pods/etcd.yaml
+        scored: true
+
+      - id: 4.9
+        text: "Verify the default OpenShift Container Network Interface file permissions"
+        audit: "stat -c %a /etc/origin/openvswitch/ /etc/cni/net.d/"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 644 -R /etc/origin/openvswitch/ /etc/cni/net.d/
+        scored: true
+
+      - id: 4.10
+        text: "Verify the default OpenShift Container Network Interface file ownership"
+        audit: "stat -c %U:%G /etc/origin/openvswitch/ /etc/cni/net.d/"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/openvswitch/ /etc/cni/net.d/
+        scored: true
+
+      - id: 4.11
+        text: "Verify the default OpenShift etcd data directory permissions"
+        audit: "stat -c %a /var/lib/etcd"
+        tests:
+          test_items:
+            - flag: "700"
+              compare:
+                op: eq
+                value: "700"
+              set: true
+        remediation: |
+          On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+          from the below command:
+          ps -ef | grep etcd
+          Run the below command (based on the etcd data directory found above). For example,
+          chmod 700 /var/lib/etcd
+        scored: true
+
+      - id: 4.12
+        text: "Verify the default OpenShift etcd data directory ownership"
+        audit: "stat -c %U:%G /var/lib/etcd"
+        tests:
+          test_items:
+            - flag: "etcd:etcd"
+              compare:
+                op: eq
+                value: "etcd:etcd"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown etcd:etcd /var/lib/etcd
+        scored: true
+
+      - id: 4.13
+        text: "Verify the default OpenShift admin.conf file permissions"
+        audit: "stat -c %a /etc/origin/master/admin.kubeconfig"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 644 /etc/origin/master/admin.kubeconfig"
+        scored: true
+
+      - id: 4.14
+        text: "Verify the default OpenShift admin.conf file ownership"
+        audit: "stat -c %U:%G /etc/origin/master/admin.kubeconfig"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/master/admin.kubeconfig
+        scored: true
+
+      - id: 4.15
+        text: "Verify the default OpenShift scheduler.conf file permissions"
+        audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 644 /etc/origin/master/openshift-master.kubeconfig
+        scored: true
+
+      - id: 4.16
+        text: "Verify the default OpenShift scheduler.conf file ownership"
+        audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/master/openshift-master.kubeconfig
+        scored: true
+
+      - id: 4.17
+        text: "Verify the default Openshift controller-manager.conf file permissions"
+        audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command.
+
+          chmod 644 /etc/origin/master/openshift-master.kubeconfig
+        scored: true
+
+      - id: 4.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+        audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: "root:root"
+              set: true
+        remediation: |
+          Run the below command on the master node.
+
+          chown root:root /etc/origin/master/openshift-master.kubeconfig
+        scored: true
+
+
+  - id: 5
+    text: "Etcd"
+    checks:
+      - id: 5.1
+        text: "Verify the default OpenShift cert-file and key-file configuration"
+        audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_CERT_FILE=/etc/etcd/server.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep etcd_key_file=/etc/etcd/server.key /proc/1/environ; grep ETCD_CERT_FILE=/etc/etcd/server.crt /etc/etcd/etcd.conf; grep ETCD_KEY_FILE=/etc/etcd/server.key /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "Binary file /proc/1/environ matches"
+              compare:
+                op: has
+                value: "Binary file /proc/1/environ matches"
+              set: true
+            - flag: "ETCD_CERT_FILE=/etc/etcd/server.crt"
+              compare:
+                op: has
+                value: "ETCD_CERT_FILE=/etc/etcd/server.crt"
+              set: true
+            - flag: "ETCD_KEY_FILE=/etc/etcd/server.key"
+              compare:
+                op: has
+                value: "ETCD_KEY_FILE=/etc/etcd/server.key"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.2
+        text: "Verify the default OpenShift setting for the client-cert-auth argument"
+        audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "Binary file /proc/1/environ matches"
+              compare:
+                op: has
+                value: "Binary file /proc/1/environ matches"
+              set: true
+            - flag: "ETCD_CLIENT_CERT_AUTH=true"
+              compare:
+                op: has
+                value: "ETCD_CLIENT_CERT_AUTH=true"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.3
+        text: "Verify the OpenShift default values for etcd_auto_tls"
+        audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_AUTO_TLS /proc/1/environ; grep ETCD_AUTO_TLS /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "ETCD_AUTO_TLS=false"
+              compare:
+                op: has
+                value: "ETCD_AUTO_TLS=false"
+              set: true
+            - flag: "#ETCD_AUTO_TLS"
+              compare:
+                op: has
+                value: "#ETCD_AUTO_TLS"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.4
+        text: "Verify the OpenShift default peer-cert-file and peer-key-file arguments for etcd"
+        audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep ETCD_PEER_KEY_FILE=/etc/etcd/peer.key /proc/1/environ; grep ETCD_PEER_CERT_FILE /etc/etcd/etcd.conf; grep ETCD_PEER_KEY_FILE /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "Binary file /proc/1/environ matches"
+              compare:
+                op: has
+                value: "Binary file /proc/1/environ matches"
+              set: true
+            - flag: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt"
+              compare:
+                op: has
+                value: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt"
+              set: true
+            - flag: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key"
+              compare:
+                op: has
+                value: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.5
+        text: "Verify the OpenShift default configuration for the peer-client-cert-auth"
+        audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_PEER_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "Binary file /proc/1/environ matches"
+              compare:
+                op: has
+                value: "Binary file /proc/1/environ matches"
+              set: true
+            - flag: "ETCD_PEER_CLIENT_CERT_AUTH=true"
+              compare:
+                op: has
+                value: "ETCD_PEER_CLIENT_CERT_AUTH=true"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.6
+        text: "Verify the OpenShift default configuration for the peer-auto-tls argument"
+        audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_AUTO_TLS /proc/1/environ; grep ETCD_PEER_AUTO_TLS /etc/etcd/etcd.conf'"
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "Binary file /proc/1/environ matches"
+              compare:
+                op: has
+                value: "Binary file /proc/1/environ matches"
+              set: true
+            - flag: "#ETCD_PEER_AUTO_TLS=false"
+              compare:
+                op: has
+                value: "#ETCD_PEER_AUTO_TLS=false"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: true
+
+      - id: 5.7
+        text: "Optionally modify the wal-dir argument"
+        type: "skip"
+        scored: true
+
+      - id: 5.8
+        text: "Optionally modify the max-wals argument"
+        type: "skip"
+        scored: true
+
+      - id: 5.9
+        text: "Verify the OpenShift default configuration for the etcd Certificate Authority"
+        audit: "openssl x509 -in /etc/origin/master/master.etcd-ca.crt -subject -issuer -noout | sed 's/@/ /'"
+        tests:
+          test_items:
+            - flag: "issuer= /CN=etcd-signer"
+              compare:
+                op: has
+                value: "issuer= /CN=etcd-signer"
+              set: true
+        remediation: |
+          Reset to the OpenShift default configuration.
+        scored: false
+
+
+  - id: 6
+    text: "General Security Primitives"
+    checks:
+      - id: 6.1
+        text: "Ensure that the cluster-admin role is only used where required"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Review users, groups, serviceaccounts bound to cluster-admin:
+          oc get clusterrolebindings | grep cluster-admin
+
+          Review users and groups bound to cluster-admin and decide whether they require
+          such access. Consider creating least-privilege roles for users and service accounts
+        scored: false
+
+      - id: 6.2
+        text: "Verify Security Context Constraints as in use"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Review Security Context Constraints:
+          oc get scc
+
+          Use OpenShift's Security Context Constraint feature, which has been contributed
+          to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
+          OpenShift ships with two SCCs: restricted and privileged.
+
+          The two default SCCs will be created when the master is started. The restricted
+          SCC is granted to all authenticated users by default.
+
+           https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html"
+        scored: false
+
+      - id: 6.3
+        text: "Use OpenShift projects to maintain boundaries between resources"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Review projects:
+          oc get projects
+        scored: false
+
+      - id: 6.4
+        text: "Create network segmentation using the Multi-tenant plugin or Network Policies"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Verify on masters the plugin being used:
+          grep networkPluginName /etc/origin/master/master-config.yaml
+
+          OpenShift provides multi-tenant networking isolation (using Open vSwich and
+          vXLAN), to segregate network traffic between containers belonging to different
+          tenants (users or applications) while running on a shared cluster. Red Hat also
+          works with 3rd-party SDN vendors to provide the same level of capabilities
+          integrated with OpenShift. OpenShift SDN is included a part of OpenShift
+          subscription.
+
+          OpenShift supports Kubernetes NetworkPolicy. Administrator must configure
+          NetworkPolicies if desired.
+
+          https://docs.openshift.com/container-platform/3.10/architecture/networking/sdn.html#architecture-additional-concepts-sdn
+
+          Ansible Inventory variable: os_sdn_network_plugin_name:
+          https://docs.openshift.com/container-platform/3.10/install/configuring_inventory_file.html
+        scored: false
+
+      - id: 6.5
+        text: "Enable seccomp and configure custom Security Context Constraints"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Verify SCCs that have been configured with seccomp:
+          oc get scc -ocustom-columns=NAME:.metadata.name,SECCOMP-PROFILES:.seccompProfiles
+
+          OpenShift does not enable seccomp by default. To configure seccomp profiles that
+          are applied to pods run by the SCC, follow the instructions in the
+          documentation:
+
+          https://docs.openshift.com/container-platform/3.9/admin_guide/seccomp.html#admin-guide-seccomp
+        scored: false
+
+      - id: 6.6
+        text: "Review Security Context Constraints"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Review SCCs:
+          oc describe scc
+
+          Use OpenShift's Security Context Constraint feature, which has been contributed
+          to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
+
+          OpenShift ships with two SCCs: restricted and privileged. The two default SCCs
+          will be created when the master is started. The restricted SCC is granted to
+          all authenticated users by default.
+
+          All pods are run under the restricted SCC by default. Running a pod under any
+          other SCC requires an account with cluster admin capabilities to grant access
+          for the service account.
+
+          SecurityContextConstraints limit what securityContext is applied to pods and
+          containers.
+
+          https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html
+        scored: false
+
+      - id: 6.7
+        text: "Manage Image Provenance using ImagePolicyWebhook admission controller"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          Review imagePolicyConfig in /etc/origin/master/master-config.yaml.
+        scored: false
+
+      - id: 6.8
+        text: "Configure Network policies as appropriate"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          If ovs-networkplugin is used, review network policies:
+          oc get networkpolicies
+
+          OpenShift supports Kubernetes NetworkPolicy via ovs-networkpolicy plugin.
+          If choosing ovs-multitenant plugin, each namespace is isolated in its own
+          netnamespace by default.
+        scored: false
+
+      - id: 6.9
+        text: "Use Security Context Constraints as compensating controls for privileged containers"
+        type: "manual"
+        remediation: |
+          [Manual test]
+          1) Determine all sccs allowing privileged containers:
+             oc get scc -ocustom-columns=NAME:.metadata.name,ALLOWS_PRIVILEGED:.allowPrivilegedContainer
+          2) Review users and groups assigned to sccs allowing priviliged containers:
+             oc describe sccs <from (1)>
+
+          Use OpenShift's Security Context Constraint feature, which has been contributed
+          to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10.
+
+          OpenShift ships with two SCCs: restricted and privileged. The two default SCCs
+          will be created when the master is started. The restricted SCC is granted to all
+          authenticated users by default.
+
+          Similar scenarios are documented in the SCC
+          documentation, which outlines granting SCC access to specific serviceaccounts.
+          Administrators may create least-restrictive SCCs based on individual container
+          needs.
+
+          For example, if a container only requires running as the root user, the anyuid
+          SCC can be used, which will not expose additional access granted by running
+          privileged containers.
+
+          https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html
+        scored: false
diff --git a/cfg/rh-0.7/node.yaml b/cfg/rh-0.7/node.yaml
index 996965d..9e0f0f4 100644
--- a/cfg/rh-0.7/node.yaml
+++ b/cfg/rh-0.7/node.yaml
@@ -1,376 +1,376 @@
----
-controls:
-id: 2
-text: "Worker Node Security Configuration"
-type: "node"
-groups:
-- id: 7
-  text: "Kubelet"
-  checks:
-  - id: 7.1
-    text: "Use Security Context Constraints to manage privileged containers as needed"
-    type: "skip"
-    scored: true
-
-  - id: 7.2
-    text: "Ensure anonymous-auth is not disabled"
-    type: "skip"
-    scored: true
-
-  - id: 7.3
-    text: "Verify that the --authorization-mode argument is set to WebHook"
-    audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "authorization-mode"
-        set: false
-      - flag: "authorization-mode"
-        compare:
-          op: has
-          value: "Webhook"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under
-      kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook".
-    scored: true
-
-  - id: 7.4
-    text: "Verify the OpenShift default for the client-ca-file argument"
-    audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml"
-    tests:
-      test_items:
-      - flag: "client-ca-file"
-        set: false
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following:
-      grep -A1 client-ca-file /etc/origin/node/node-config.yaml
-
-      Reset to the OpenShift default. 
-      See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65
-      The config file does not have this defined in kubeletArgument, but in PodManifestConfig.
-    scored: true
-
-  - id: 7.5
-    text: "Verify the OpenShift default setting for the read-only-port argument"
-    audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "read-only-port"
-        set: false
-      - flag: "read-only-port"
-        compare:
-          op: has
-          value: "0"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied.
-    scored: true
-
-  - id: 7.6
-    text: "Adjust the streaming-connection-idle-timeout argument"
-    audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "streaming-connection-idle-timeout"
-        set: false
-      - flag: "5m"
-        set: false
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout
-      value like the following in node-config.yaml.
-      
-      kubeletArguments:
-        streaming-connection-idle-timeout:
-           - "5m"
-    scored: true
-
-  - id: 7.7
-    text: "Verify the OpenShift defaults for the protect-kernel-defaults argument"
-    type: "skip"
-    scored: true
-
-  - id: 7.8
-    text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument"
-    audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "make-iptables-util-chains"
-        set: false
-      - flag: "make-iptables-util-chains"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift
-      default value of true. 
-    scored: true
-
-  - id: 7.9
-    text: "Verify that the --keep-terminated-pod-volumes argument is set to false"
-    audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml"
-    tests:
-      test_items:
-      - flag: "keep-terminated-pod-volumes"
-        compare:
-          op: has
-          value: "false"
-        set: true
-    remediation: |
-      Reset to the OpenShift defaults
-    scored: true
-
-  - id: 7.10
-    text: "Verify the OpenShift defaults for the hostname-override argument"
-    type: "skip"
-    scored: true
-
-  - id: 7.11
-    text: "Set the --event-qps argument to 0"
-    audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "event-qps"
-        set: false
-      - flag: "event-qps"
-        compare:
-          op: has
-          value: "0"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in
-      the kubeletArguments section of.
-    scored: true
-
-  - id: 7.12
-    text: "Verify the OpenShift cert-dir flag for HTTPS traffic"
-    audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml"
-    tests:
-      test_items:
-      - flag: "/etc/origin/node/certificates"
-        compare:
-          op: has
-          value: "/etc/origin/node/certificates"
-        set: true
-    remediation: |
-      Reset to the OpenShift default values.
-    scored: true
-
-  - id: 7.13
-    text: "Verify the OpenShift default of 0 for the cadvisor-port argument"
-    audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml"
-    tests:
-      bin_op: or
-      test_items:
-      - flag: "cadvisor-port"
-        set: false
-      - flag: "cadvisor-port"
-        compare:
-          op: has
-          value: "0"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag 
-      if it is set in the  kubeletArguments section.
-    scored: true
-
-  - id: 7.14
-    text: "Verify that the RotateKubeletClientCertificate argument is set to true"
-    audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml"
-    tests:
-      test_items:
-      - flag: "RotateKubeletClientCertificate=true"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true.
-    scored: true
-
-  - id: 7.15
-    text: "Verify that the RotateKubeletServerCertificate argument is set to true"
-    audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml"
-    tests:
-      test_items:
-      - flag: "RotateKubeletServerCertificate=true"
-        compare:
-          op: has
-          value: "true"
-        set: true
-    remediation: |
-      Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true.
-    scored: true
-
-
-- id: 8
-  text: "Configuration Files"
-  checks:
-  - id: 8.1
-    text: "Verify the OpenShift default permissions for the kubelet.conf file"
-    audit: "stat -c %a  /etc/origin/node/node.kubeconfig"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command on each worker node.
-      chmod 644 /etc/origin/node/node.kubeconfig
-    scored: true
-
-  - id: 8.2
-    text: "Verify the kubeconfig file ownership of root:root"
-    audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: root:root
-          set: true
-      remediation: |
-        Run the below command on each worker node.
-        chown root:root /etc/origin/node/node.kubeconfig
-      scored: true
-
-  - id: 8.3
-    text: "Verify the kubelet service file permissions of 644"
-    audit: "stat -c %a $nodesvc"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command on each worker node.
-      chmod 644 $nodesvc
-    scored: true
-
-  - id: 8.4
-    text: "Verify the kubelet service file ownership of root:root"
-    audit: "stat -c %U:%G $nodesvc"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: root:root
-          set: true
-      remediation: |
-        Run the below command on each worker node.
-        chown root:root $nodesvc
-      scored: true
-
-  - id: 8.5
-    text: "Verify the OpenShift default permissions for the proxy kubeconfig file"
-    audit: "stat -c %a /etc/origin/node/node.kubeconfig"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command on each worker node.
-      chmod 644 /etc/origin/node/node.kubeconfig
-    scored: true
-
-  - id: 8.6
-    text: "Verify the proxy kubeconfig file ownership of root:root"
-    audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: root:root
-          set: true
-      remediation: |
-        Run the below command on each worker node.
-        chown root:root /etc/origin/node/node.kubeconfig
-      scored: true
-
-  - id: 8.7
-    text: "Verify the OpenShift default permissions for the certificate authorities file."
-    audit: "stat -c %a /etc/origin/node/client-ca.crt"
-    tests:
-      bin_op: or
-      test_items:
-        - flag: "644"
-          compare:
-            op: eq
-            value: "644"
-          set: true
-        - flag: "640"
-          compare:
-            op: eq
-            value: "640"
-          set: true
-        - flag: "600"
-          compare:
-            op: eq
-            value: "600"
-          set: true
-    remediation: |
-      Run the below command on each worker node.
-      chmod 644 /etc/origin/node/client-ca.crt
-    scored: true
-
-  - id: 8.8
-    text: "Verify the client certificate authorities file ownership of root:root"
-    audit: "stat -c %U:%G /etc/origin/node/client-ca.crt"
-    tests:
-      test_items:
-        - flag: "root:root"
-          compare:
-            op: eq
-            value: root:root
-          set: true
-      remediation: |
-        Run the below command on each worker node.
-        chown root:root /etc/origin/node/client-ca.crt
-      scored: true
+---
+controls:
+id: 2
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+  - id: 7
+    text: "Kubelet"
+    checks:
+      - id: 7.1
+        text: "Use Security Context Constraints to manage privileged containers as needed"
+        type: "skip"
+        scored: true
+
+      - id: 7.2
+        text: "Ensure anonymous-auth is not disabled"
+        type: "skip"
+        scored: true
+
+      - id: 7.3
+        text: "Verify that the --authorization-mode argument is set to WebHook"
+        audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "authorization-mode"
+              set: false
+            - flag: "authorization-mode"
+              compare:
+                op: has
+                value: "Webhook"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under
+          kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook".
+        scored: true
+
+      - id: 7.4
+        text: "Verify the OpenShift default for the client-ca-file argument"
+        audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml"
+        tests:
+          test_items:
+            - flag: "client-ca-file"
+              set: false
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following:
+          grep -A1 client-ca-file /etc/origin/node/node-config.yaml
+
+          Reset to the OpenShift default.
+          See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65
+          The config file does not have this defined in kubeletArgument, but in PodManifestConfig.
+        scored: true
+
+      - id: 7.5
+        text: "Verify the OpenShift default setting for the read-only-port argument"
+        audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "read-only-port"
+              set: false
+            - flag: "read-only-port"
+              compare:
+                op: has
+                value: "0"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied.
+        scored: true
+
+      - id: 7.6
+        text: "Adjust the streaming-connection-idle-timeout argument"
+        audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "streaming-connection-idle-timeout"
+              set: false
+            - flag: "5m"
+              set: false
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout
+          value like the following in node-config.yaml.
+
+          kubeletArguments:
+            streaming-connection-idle-timeout:
+               - "5m"
+        scored: true
+
+      - id: 7.7
+        text: "Verify the OpenShift defaults for the protect-kernel-defaults argument"
+        type: "skip"
+        scored: true
+
+      - id: 7.8
+        text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument"
+        audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "make-iptables-util-chains"
+              set: false
+            - flag: "make-iptables-util-chains"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift
+          default value of true.
+        scored: true
+
+      - id: 7.9
+        text: "Verify that the --keep-terminated-pod-volumes argument is set to false"
+        audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml"
+        tests:
+          test_items:
+            - flag: "keep-terminated-pod-volumes"
+              compare:
+                op: has
+                value: "false"
+              set: true
+        remediation: |
+          Reset to the OpenShift defaults
+        scored: true
+
+      - id: 7.10
+        text: "Verify the OpenShift defaults for the hostname-override argument"
+        type: "skip"
+        scored: true
+
+      - id: 7.11
+        text: "Set the --event-qps argument to 0"
+        audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "event-qps"
+              set: false
+            - flag: "event-qps"
+              compare:
+                op: has
+                value: "0"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in
+          the kubeletArguments section of.
+        scored: true
+
+      - id: 7.12
+        text: "Verify the OpenShift cert-dir flag for HTTPS traffic"
+        audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml"
+        tests:
+          test_items:
+            - flag: "/etc/origin/node/certificates"
+              compare:
+                op: has
+                value: "/etc/origin/node/certificates"
+              set: true
+        remediation: |
+          Reset to the OpenShift default values.
+        scored: true
+
+      - id: 7.13
+        text: "Verify the OpenShift default of 0 for the cadvisor-port argument"
+        audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "cadvisor-port"
+              set: false
+            - flag: "cadvisor-port"
+              compare:
+                op: has
+                value: "0"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag
+          if it is set in the  kubeletArguments section.
+        scored: true
+
+      - id: 7.14
+        text: "Verify that the RotateKubeletClientCertificate argument is set to true"
+        audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml"
+        tests:
+          test_items:
+            - flag: "RotateKubeletClientCertificate=true"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true.
+        scored: true
+
+      - id: 7.15
+        text: "Verify that the RotateKubeletServerCertificate argument is set to true"
+        audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml"
+        tests:
+          test_items:
+            - flag: "RotateKubeletServerCertificate=true"
+              compare:
+                op: has
+                value: "true"
+              set: true
+        remediation: |
+          Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true.
+        scored: true
+
+
+  - id: 8
+    text: "Configuration Files"
+    checks:
+      - id: 8.1
+        text: "Verify the OpenShift default permissions for the kubelet.conf file"
+        audit: "stat -c %a  /etc/origin/node/node.kubeconfig"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command on each worker node.
+          chmod 644 /etc/origin/node/node.kubeconfig
+        scored: true
+
+      - id: 8.2
+        text: "Verify the kubeconfig file ownership of root:root"
+        audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: root:root
+              set: true
+          remediation: |
+            Run the below command on each worker node.
+            chown root:root /etc/origin/node/node.kubeconfig
+          scored: true
+
+      - id: 8.3
+        text: "Verify the kubelet service file permissions of 644"
+        audit: "stat -c %a $nodesvc"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command on each worker node.
+          chmod 644 $nodesvc
+        scored: true
+
+      - id: 8.4
+        text: "Verify the kubelet service file ownership of root:root"
+        audit: "stat -c %U:%G $nodesvc"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: root:root
+              set: true
+          remediation: |
+            Run the below command on each worker node.
+            chown root:root $nodesvc
+          scored: true
+
+      - id: 8.5
+        text: "Verify the OpenShift default permissions for the proxy kubeconfig file"
+        audit: "stat -c %a /etc/origin/node/node.kubeconfig"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command on each worker node.
+          chmod 644 /etc/origin/node/node.kubeconfig
+        scored: true
+
+      - id: 8.6
+        text: "Verify the proxy kubeconfig file ownership of root:root"
+        audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: root:root
+              set: true
+          remediation: |
+            Run the below command on each worker node.
+            chown root:root /etc/origin/node/node.kubeconfig
+          scored: true
+
+      - id: 8.7
+        text: "Verify the OpenShift default permissions for the certificate authorities file."
+        audit: "stat -c %a /etc/origin/node/client-ca.crt"
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "644"
+              compare:
+                op: eq
+                value: "644"
+              set: true
+            - flag: "640"
+              compare:
+                op: eq
+                value: "640"
+              set: true
+            - flag: "600"
+              compare:
+                op: eq
+                value: "600"
+              set: true
+        remediation: |
+          Run the below command on each worker node.
+          chmod 644 /etc/origin/node/client-ca.crt
+        scored: true
+
+      - id: 8.8
+        text: "Verify the client certificate authorities file ownership of root:root"
+        audit: "stat -c %U:%G /etc/origin/node/client-ca.crt"
+        tests:
+          test_items:
+            - flag: "root:root"
+              compare:
+                op: eq
+                value: root:root
+              set: true
+          remediation: |
+            Run the below command on each worker node.
+            chown root:root /etc/origin/node/client-ca.crt
+          scored: true
diff --git a/hack/debug.yaml b/hack/debug.yaml
index e26e447..9d6a998 100644
--- a/hack/debug.yaml
+++ b/hack/debug.yaml
@@ -1,3 +1,4 @@
+---
 # use this pod with: kubectl run ubuntu -it --pid=host  -- /bin/bash
 # this allows you to debug what is running on the host.
 apiVersion: v1
@@ -7,40 +8,40 @@ metadata:
 spec:
   hostPID: true
   containers:
-  - name: ubuntu
-    image: ubuntu
-    command: [ "/bin/bash", "-c", "--" ]
-    args: [ "while true; do sleep 30; done;" ]
-    volumeMounts:
+    - name: ubuntu
+      image: ubuntu
+      command: ["/bin/bash", "-c", "--"]
+      args: ["while true; do sleep 30; done;"]
+      volumeMounts:
+        - name: var-lib-kubelet
+          mountPath: /var/lib/kubelet
+        - name: etc-systemd
+          mountPath: /etc/systemd
+        - name: etc-kubernetes
+          mountPath: /etc/kubernetes
+          # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
+          # You can omit this mount if you specify --version as part of the command.
+        - name: usr-bin
+          mountPath: /usr/bin
+        - name: kind-bin
+          mountPath: /kind/bin
+      resources:
+        limits:
+          memory: "128Mi"
+          cpu: "500m"
+  volumes:
     - name: var-lib-kubelet
-      mountPath: /var/lib/kubelet
+      hostPath:
+        path: "/var/lib/kubelet"
     - name: etc-systemd
-      mountPath: /etc/systemd
+      hostPath:
+        path: "/etc/systemd"
     - name: etc-kubernetes
-      mountPath: /etc/kubernetes
-      # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 
-      # You can omit this mount if you specify --version as part of the command.           
+      hostPath:
+        path: "/etc/kubernetes"
     - name: usr-bin
-      mountPath: /usr/bin
+      hostPath:
+        path: "/usr/bin"
     - name: kind-bin
-      mountPath: /kind/bin
-    resources:
-      limits:
-        memory: "128Mi"
-        cpu: "500m"
-  volumes:
-  - name: var-lib-kubelet
-    hostPath:
-      path: "/var/lib/kubelet"
-  - name: etc-systemd
-    hostPath:
-      path: "/etc/systemd"
-  - name: etc-kubernetes
-    hostPath:
-      path: "/etc/kubernetes"
-  - name: usr-bin
-    hostPath:
-      path: "/usr/bin"
-  - name: kind-bin
-    hostPath:
-      path: "/kind/bin"
+      hostPath:
+        path: "/kind/bin"
diff --git a/hack/kind.yaml b/hack/kind.yaml
index b58811a..ca06a87 100644
--- a/hack/kind.yaml
+++ b/hack/kind.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -10,41 +11,41 @@ spec:
     spec:
       hostPID: true
       containers:
-      - name: kube-bench
-        image: aquasec/kube-bench:${VERSION}
-        command: ["kube-bench"]
-        volumeMounts:
+        - name: kube-bench
+          image: aquasec/kube-bench:${VERSION}
+          command: ["kube-bench"]
+          volumeMounts:
+            - name: var-lib-etcd
+              mountPath: /var/lib/etcd
+            - name: var-lib-kubelet
+              mountPath: /var/lib/kubelet
+            - name: etc-systemd
+              mountPath: /etc/systemd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+              # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
+              # You can omit this mount if you specify --version as part of the command.
+            - name: usr-bin
+              mountPath: /usr/bin
+            - name: kind-bin
+              mountPath: /kind/bin
+      restartPolicy: Never
+      volumes:
         - name: var-lib-etcd
-          mountPath: /var/lib/etcd
+          hostPath:
+            path: "/var/lib/etcd"
         - name: var-lib-kubelet
-          mountPath: /var/lib/kubelet
+          hostPath:
+            path: "/var/lib/kubelet"
         - name: etc-systemd
-          mountPath: /etc/systemd
+          hostPath:
+            path: "/etc/systemd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-          # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 
-          # You can omit this mount if you specify --version as part of the command.           
+          hostPath:
+            path: "/etc/kubernetes"
         - name: usr-bin
-          mountPath: /usr/bin
+          hostPath:
+            path: "/usr/bin"
         - name: kind-bin
-          mountPath: /kind/bin
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-etcd
-        hostPath:
-          path: "/var/lib/etcd"
-      - name: var-lib-kubelet
-        hostPath:
-          path: "/var/lib/kubelet"
-      - name: etc-systemd
-        hostPath:
-          path: "/etc/systemd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-      - name: usr-bin
-        hostPath:
-          path: "/usr/bin"
-      - name: kind-bin
-        hostPath:
-          path: "/kind/bin"
+          hostPath:
+            path: "/kind/bin"
diff --git a/integration/testdata/add-tls-kind-k8s114.yaml b/integration/testdata/add-tls-kind-k8s114.yaml
index 8a18973..11d5c69 100644
--- a/integration/testdata/add-tls-kind-k8s114.yaml
+++ b/integration/testdata/add-tls-kind-k8s114.yaml
@@ -1,19 +1,19 @@
+---
 apiVersion: kind.sigs.k8s.io/v1alpha3
 kind: Cluster
 networking:
   apiServerAddress: "0.0.0.0"
 
 kubeadmConfigPatchesJson6902:
-- group: kubelet.config.k8s.io
-  version: v1beta1
-  kind: KubeletConfiguration
-  patch: |
-    - op: add
-      path: /tlsCipherSuites
-      value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
+  - group: kubelet.config.k8s.io
+    version: v1beta1
+    kind: KubeletConfiguration
+    patch: |
+      - op: add
+        path: /tlsCipherSuites
+        value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
 
 nodes:
-# the control plane node config
-- role: control-plane
-  image: "kindest/node:v1.14.6"
-  
\ No newline at end of file
+  # the control plane node config
+  - role: control-plane
+    image: "kindest/node:v1.14.6"
diff --git a/job-eks.yaml b/job-eks.yaml
index 06035cf..6f82c31 100644
--- a/job-eks.yaml
+++ b/job-eks.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -7,26 +8,25 @@ spec:
     spec:
       hostPID: true
       containers:
-      - name: kube-bench
-        # Push the image to your ECR and then refer to it here
-        image: <ID.dkr.ecr.region.amazonaws.com/aquasec/kube-bench:ref>
-        command: ["kube-bench", "--version", "1.11"]
-        volumeMounts:
+        - name: kube-bench
+          # Push the image to your ECR and then refer to it here
+          image: <ID.dkr.ecr.region.amazonaws.com/aquasec/kube-bench:ref>
+          command: ["kube-bench", "--version", "1.11"]
+          volumeMounts:
+            - name: var-lib-kubelet
+              mountPath: /var/lib/kubelet
+            - name: etc-systemd
+              mountPath: /etc/systemd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+      restartPolicy: Never
+      volumes:
         - name: var-lib-kubelet
-          mountPath: /var/lib/kubelet
+          hostPath:
+            path: "/var/lib/kubelet"
         - name: etc-systemd
-          mountPath: /etc/systemd
+          hostPath:
+            path: "/etc/systemd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-kubelet
-        hostPath:
-          path: "/var/lib/kubelet"
-      - name: etc-systemd
-        hostPath:
-          path: "/etc/systemd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-
+          hostPath:
+            path: "/etc/kubernetes"
diff --git a/job-iks.yaml b/job-iks.yaml
index 24528ba..8a5fd38 100644
--- a/job-iks.yaml
+++ b/job-iks.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -7,27 +8,27 @@ spec:
     spec:
       hostPID: true
       containers:
-      - name: kube-bench
-        image: aquasec/kube-bench:latest
-        command: ["kube-bench", "--version", "1.13", "node"]
-        volumeMounts:
+        - name: kube-bench
+          image: aquasec/kube-bench:latest
+          command: ["kube-bench", "--version", "1.13", "node"]
+          volumeMounts:
+            - name: var-lib-kubelet
+              mountPath: /var/lib/kubelet
+            - name: etc-systemd
+              mountPath: /etc/systemd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+      restartPolicy: Never
+      volumes:
         - name: var-lib-kubelet
-          mountPath: /var/lib/kubelet
+          hostPath:
+            path: "/var/lib/kubelet"
         - name: etc-systemd
-          mountPath: /etc/systemd
+          hostPath:
+            path: "/lib/systemd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-kubelet
-        hostPath:
-          path: "/var/lib/kubelet"
-      - name: etc-systemd
-        hostPath:
-          path: "/lib/systemd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-      - name: usr-bin
-        hostPath:
-          path: "/usr/bin"
+          hostPath:
+            path: "/etc/kubernetes"
+        - name: usr-bin
+          hostPath:
+            path: "/usr/bin"
diff --git a/job-master.yaml b/job-master.yaml
index 7219af8..5896bd3 100644
--- a/job-master.yaml
+++ b/job-master.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -6,33 +7,33 @@ spec:
   template:
     spec:
       hostPID: true
-      nodeSelector: 
-        node-role.kubernetes.io/master: "" 
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
       tolerations:
-      - key: node-role.kubernetes.io/master
-        operator: Exists
-        effect: NoSchedule
+        - key: node-role.kubernetes.io/master
+          operator: Exists
+          effect: NoSchedule
       containers:
-      - name: kube-bench
-        image: aquasec/kube-bench:latest
-        command: ["kube-bench","master"]
-        volumeMounts:
+        - name: kube-bench
+          image: aquasec/kube-bench:latest
+          command: ["kube-bench", "master"]
+          volumeMounts:
+            - name: var-lib-etcd
+              mountPath: /var/lib/etcd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+              # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
+              # You can omit this mount if you specify --version as part of the command.
+            - name: usr-bin
+              mountPath: /usr/bin
+      restartPolicy: Never
+      volumes:
         - name: var-lib-etcd
-          mountPath: /var/lib/etcd
+          hostPath:
+            path: "/var/lib/etcd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-          # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 
-          # You can omit this mount if you specify --version as part of the command.
+          hostPath:
+            path: "/etc/kubernetes"
         - name: usr-bin
-          mountPath: /usr/bin
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-etcd
-        hostPath:
-          path: "/var/lib/etcd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-      - name: usr-bin
-        hostPath:
-          path: "/usr/bin"
+          hostPath:
+            path: "/usr/bin"
diff --git a/job-node.yaml b/job-node.yaml
index 81767bb..0bdc2a1 100644
--- a/job-node.yaml
+++ b/job-node.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -7,31 +8,31 @@ spec:
     spec:
       hostPID: true
       containers:
-      - name: kube-bench
-        image: aquasec/kube-bench:latest
-        command: ["kube-bench","node"]
-        volumeMounts:
+        - name: kube-bench
+          image: aquasec/kube-bench:latest
+          command: ["kube-bench", "node"]
+          volumeMounts:
+            - name: var-lib-kubelet
+              mountPath: /var/lib/kubelet
+            - name: etc-systemd
+              mountPath: /etc/systemd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+              # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
+              # You can omit this mount if you specify --version as part of the command.
+            - name: usr-bin
+              mountPath: /usr/bin
+      restartPolicy: Never
+      volumes:
         - name: var-lib-kubelet
-          mountPath: /var/lib/kubelet
+          hostPath:
+            path: "/var/lib/kubelet"
         - name: etc-systemd
-          mountPath: /etc/systemd
+          hostPath:
+            path: "/etc/systemd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-          # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 
-          # You can omit this mount if you specify --version as part of the command.           
+          hostPath:
+            path: "/etc/kubernetes"
         - name: usr-bin
-          mountPath: /usr/bin
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-kubelet
-        hostPath:
-          path: "/var/lib/kubelet"
-      - name: etc-systemd
-        hostPath:
-          path: "/etc/systemd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-      - name: usr-bin
-        hostPath:
-          path: "/usr/bin"
+          hostPath:
+            path: "/usr/bin"
diff --git a/job.yaml b/job.yaml
index 3fae9e1..411c164 100644
--- a/job.yaml
+++ b/job.yaml
@@ -1,3 +1,4 @@
+---
 apiVersion: batch/v1
 kind: Job
 metadata:
@@ -10,36 +11,36 @@ spec:
     spec:
       hostPID: true
       containers:
-      - name: kube-bench
-        image: aquasec/kube-bench:latest
-        command: ["kube-bench"]
-        volumeMounts:
+        - name: kube-bench
+          image: aquasec/kube-bench:latest
+          command: ["kube-bench"]
+          volumeMounts:
+            - name: var-lib-etcd
+              mountPath: /var/lib/etcd
+            - name: var-lib-kubelet
+              mountPath: /var/lib/kubelet
+            - name: etc-systemd
+              mountPath: /etc/systemd
+            - name: etc-kubernetes
+              mountPath: /etc/kubernetes
+              # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
+              # You can omit this mount if you specify --version as part of the command.
+            - name: usr-bin
+              mountPath: /usr/bin
+      restartPolicy: Never
+      volumes:
         - name: var-lib-etcd
-          mountPath: /var/lib/etcd
+          hostPath:
+            path: "/var/lib/etcd"
         - name: var-lib-kubelet
-          mountPath: /var/lib/kubelet
+          hostPath:
+            path: "/var/lib/kubelet"
         - name: etc-systemd
-          mountPath: /etc/systemd
+          hostPath:
+            path: "/etc/systemd"
         - name: etc-kubernetes
-          mountPath: /etc/kubernetes
-          # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 
-          # You can omit this mount if you specify --version as part of the command.           
+          hostPath:
+            path: "/etc/kubernetes"
         - name: usr-bin
-          mountPath: /usr/bin
-      restartPolicy: Never
-      volumes:
-      - name: var-lib-etcd
-        hostPath:
-          path: "/var/lib/etcd"
-      - name: var-lib-kubelet
-        hostPath:
-          path: "/var/lib/kubelet"
-      - name: etc-systemd
-        hostPath:
-          path: "/etc/systemd"
-      - name: etc-kubernetes
-        hostPath:
-          path: "/etc/kubernetes"
-      - name: usr-bin
-        hostPath:
-          path: "/usr/bin"
+          hostPath:
+            path: "/usr/bin"
-- 
GitLab