From 4d3144ca21c0a57fe8bfe02da14907f456a3eb9c Mon Sep 17 00:00:00 2001
From: Florent Delannoy <florent.delannoy@gmail.com>
Date: Mon, 11 Mar 2019 18:05:33 +0000
Subject: [PATCH] Support JSON and YAML configuration

Support new configuration options besides --flags:
- JSON file through `jsonpath`
- YAML file through `yamlpath`

These new options are fully backwards-compatible with the existing
tests.

Added a new profile, 1.11-json, that expects a JSON kubelet
configuration file and scores accordingly. This profile is compatible
with EKS.
---
 Gopkg.lock                |   11 +
 README.md                 |   25 +-
 cfg/1.11-json/config.yaml |   30 +
 cfg/1.11-json/master.yaml | 1446 +++++++++++++++++++++++++++++++++++++
 cfg/1.11-json/node.yaml   |  508 +++++++++++++
 check/controls_test.go    |   33 +-
 check/data                |  129 ++++
 check/test.go             |  105 ++-
 check/test_test.go        |   60 ++
 9 files changed, 2306 insertions(+), 41 deletions(-)
 create mode 100644 cfg/1.11-json/config.yaml
 create mode 100644 cfg/1.11-json/master.yaml
 create mode 100644 cfg/1.11-json/node.yaml

diff --git a/Gopkg.lock b/Gopkg.lock
index 4f431e3..e74b52e 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -189,6 +189,17 @@
   pruneopts = "UT"
   revision = "c95af922eae69f190717a0b7148960af8c55a072"
 
+[[projects]]
+  digest = "1:e8e3acc03397f71fad44385631e665c639a8d55bd187bcfa6e70b695e3705edd"
+  name = "k8s.io/client-go"
+  packages = [
+    "third_party/forked/golang/template",
+    "util/jsonpath",
+  ]
+  pruneopts = "UT"
+  revision = "e64494209f554a6723674bd494d69445fb76a1d4"
+  version = "v10.0.0"
+
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
diff --git a/README.md b/README.md
index 1bfe745..230a550 100644
--- a/README.md
+++ b/README.md
@@ -149,7 +149,7 @@ These groups are further organized under `controls` which can be of the type `ma
 ## Tests
 Tests are the items we actually look for to determine if a check is successful or not. Checks can have multiple tests, which must all be successful for the check to pass.
 
-The syntax for tests:
+The syntax for tests operating on a flag:
 ```
 tests:
 - flag:
@@ -159,6 +159,29 @@ tests:
     value:
 ...
 ```
+
+If using a JSON config file, the syntax is:
+```
+tests:
+- jsonpath:
+  set:
+  compare:
+    op:
+    value:
+...
+```
+
+And for a YAML config file:
+```
+tests:
+- yamlpath:
+  set:
+  compare:
+    op:
+    value:
+...
+```
+
 Tests have various `operations` which are used to compare the output of audit commands for success.
 These operations are:
 
diff --git a/cfg/1.11-json/config.yaml b/cfg/1.11-json/config.yaml
new file mode 100644
index 0000000..9e923de
--- /dev/null
+++ b/cfg/1.11-json/config.yaml
@@ -0,0 +1,30 @@
+---
+## Controls Files.
+# These are YAML files that hold all the details for running checks.
+#
+## Uncomment to use different control file paths.
+# masterControls: ./cfg/master.yaml
+# nodeControls: ./cfg/node.yaml
+# federatedControls: ./cfg/federated.yaml
+
+# Master nodes are controlled by EKS and not user-accessible
+master:
+  components: []
+
+node:
+  kubernetes:
+    confs:
+      - "/var/lib/kubelet/kubeconfig"
+    kubeconfig:
+      - "/var/lib/kubelet/kubeconfig"
+
+  kubelet:
+    bins:
+      - "hyperkube kubelet"
+      - "kubelet"
+    defaultconf: "/etc/kubernetes/kubelet/kubelet-config.json"
+    defaultsvc: "/etc/systemd/system/kubelet.service"
+    defaultkubeconfig: "/var/lib/kubelet/kubeconfig"
+
+  proxy:
+    defaultkubeconfig: "/var/lib/kubelet/kubeconfig"
diff --git a/cfg/1.11-json/master.yaml b/cfg/1.11-json/master.yaml
new file mode 100644
index 0000000..0456578
--- /dev/null
+++ b/cfg/1.11-json/master.yaml
@@ -0,0 +1,1446 @@
+---
+controls:
+version: 1.11
+id: 1
+text: "Master Node Security Configuration"
+type: "master"
+groups:
+- id: 1.1
+  text: "API Server"
+  checks:
+  - id: 1.1.1
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the below parameter.
+      --anonymous-auth=false
+    scored: true
+
+  - id: 1.1.2
+    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--basic-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the API server pod specification file $apiserverconf
+      on the master node and remove the --basic-auth-file=<filename>
+      parameter.
+    scored: true
+
+  - id: 1.1.3
+    text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag:  "--insecure-allow-any-token"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and remove the --insecure-allow-any-token
+      parameter.
+    scored: true
+
+  - id: 1.1.4
+    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--kubelet-https"
+        compare:
+          op: eq
+          value: true
+        set: true
+      - flag: "--kubelet-https"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and remove the --kubelet-https parameter.
+    scored: true
+
+  - id: 1.1.5
+    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-bind-address"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and remove the --insecure-bind-address
+      parameter.
+    scored: true
+
+  - id: 1.1.6
+    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-port"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      apiserver.yaml on the master node and set the below parameter.
+      --insecure-port=0
+    scored: true
+
+  - id: 1.1.7
+    text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+        - flag:  "--secure-port"
+          compare:
+            op: gt
+            value: 0
+          set: true
+        - flag: "--secure-port"
+          set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and either remove the --secure-port parameter or
+      set it to a different (non-zero) desired port.
+    scored: true
+
+  - id: 1.1.8
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the below parameter.
+      --profiling=false
+    scored: true
+
+  - id: 1.1.9
+    text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--repair-malformed-updates"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the below parameter.
+      --repair-malformed-updates=false
+    scored: true
+
+  - id: 1.1.10
+    text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: nothave
+          value: AlwaysAdmit
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to a
+      value that does not include AlwaysAdmit.
+    scored: true
+
+  - id: 1.1.11
+    text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "AlwaysPullImages"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins to
+      include AlwaysPullImages.
+      --enable-admission-plugins=...,AlwaysPullImages,...
+    scored: true
+
+  - id: 1.1.12
+    text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "DenyEscalatingExec"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to a
+      value that includes DenyEscalatingExec.
+      --enable-admission-plugins=...,DenyEscalatingExec,...
+    scored: true
+
+  - id: 1.1.13
+    text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "SecurityContextDeny"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to
+      include SecurityContextDeny.
+      --enable-admission-plugins=...,SecurityContextDeny,...
+    scored: true
+
+  - id: 1.1.14
+    text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--disable-admission-plugins"
+        compare:
+          op: nothave
+          value: "NamespaceLifecycle"
+        set: true
+      - flag: "--disable-admission-plugins"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --disable-admission-plugins parameter to
+      ensure it does not include NamespaceLifecycle.
+      --disable-admission-plugins=...,NamespaceLifecycle,...
+    scored: true
+
+  - id: 1.1.15
+    text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-path"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --audit-log-path parameter to a suitable
+      path and file where you would like audit logs to be written, for example:
+      --audit-log-path=/var/log/apiserver/audit.log
+    scored: true
+
+  - id: 1.1.16
+    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxage"
+        compare:
+          op: gte
+          value: 30
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --audit-log-maxage parameter to 30 or
+      as an appropriate number of days: --audit-log-maxage=30
+    scored: true
+
+  - id: 1.1.17
+    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxbackup"
+        compare:
+          op: gte
+          value: 10
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --audit-log-maxbackup parameter to 10
+      or to an appropriate value.
+      --audit-log-maxbackup=10
+    scored: true
+
+  - id: 1.1.18
+    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxsize"
+        compare:
+          op: gte
+          value: 100
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --audit-log-maxsize parameter to an
+      appropriate size in MB. For example, to set it as 100 MB:
+      --audit-log-maxsize=100
+    scored: true
+
+  - id: 1.1.19
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: nothave
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --authorization-mode parameter to
+      values other than AlwaysAllow. One such example could be as below.
+      --authorization-mode=RBAC
+    scored: true
+
+  - id: 1.1.20
+    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--token-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the API server pod specification file $apiserverconf
+      on the master node and remove the --token-auth-file=<filename>
+      parameter.
+    scored: true
+
+  - id: 1.1.21
+    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--kubelet-certificate-authority"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and setup the TLS connection between the
+      apiserver and kubelets. Then, edit the API server pod specification file
+      $apiserverconf on the master node and set the --kubelet-certificate-authority
+      parameter to the path to the cert file for the certificate authority.
+      --kubelet-certificate-authority=<ca-string>
+    scored: true
+
+  - id: 1.1.22
+    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--kubelet-client-certificate"
+        set: true
+      - flag: "--kubelet-client-key"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and kubelets. Then, edit API server pod specification file
+      $apiserverconf on the master node and set the
+      kubelet client certificate and key parameters as below.
+      --kubelet-client-certificate=<path/to/client-certificate-file>
+      --kubelet-client-key=<path/to/client-key-file>
+    scored: true
+
+  - id: 1.1.23
+    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-lookup"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the below parameter.
+      --service-account-lookup=true
+    scored: true
+
+  - id: 1.1.24
+    text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "PodSecurityPolicy"
+        set: true
+    remediation: |
+      Follow the documentation and create Pod Security Policy objects as per your environment.
+      Then, edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to a
+      value that includes PodSecurityPolicy :
+      --enable-admission-plugins=...,PodSecurityPolicy,...
+      Then restart the API Server.
+    scored: true
+
+  - id: 1.1.25
+    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-key-file"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --service-account-key-file parameter
+      to the public key file for service accounts:
+      --service-account-key-file=<filename>
+    scored: true
+
+  - id: 1.1.26
+    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as
+      appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--etcd-certfile"
+        set: true
+      - flag: "--etcd-keyfile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and etcd. Then, edit the API server pod specification file
+      $apiserverconf on the master node and set the etcd
+      certificate and key file parameters.
+      --etcd-certfile=<path/to/client-certificate-file>
+      --etcd-keyfile=<path/to/client-key-file>
+    scored: true
+
+  - id: 1.1.27
+    text: "Ensure that the admission control plugin ServiceAccount is set(Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "ServiceAccount"
+        set: true
+    remediation: |
+      Follow the documentation and create ServiceAccount objects as per your environment.
+      Then, edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to a
+      value that includes ServiceAccount.
+      --enable-admission-plugins=...,ServiceAccount,...
+    scored: true
+
+  - id: 1.1.28
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
+    as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--tls-cert-file"
+        set: true
+      - flag: "--tls-private-key-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+      Then, edit the API server pod specification file $apiserverconf
+      on the master node and set the TLS certificate and private key file
+      parameters.
+      --tls-cert-file=<path/to/tls-certificate-file>
+      --tls-private-key-file=<path/to/tls-key-file>
+    scored: true
+
+  - id: 1.1.29
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-ca-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+      Then, edit the API server pod specification file $apiserverconf
+      on the master node and set the client certificate authority file.
+      --client-ca-file=<path/to/client-ca-file>
+    scored: true
+
+  - id: 1.1.30
+    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--tls-cipher-suites"
+        compare:
+          op: has
+          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the below parameter.
+      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+    scored: false
+
+  - id: 1.1.31
+    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--etcd-cafile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and etcd. Then, edit the API server pod specification file
+      $apiserverconf on the master node and set the etcd
+      certificate authority file parameter.
+      --etcd-cafile=<path/to/ca-file>
+    scored: true
+
+  - id: 1.1.32
+    text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: has
+          value: "Node"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      on the master node and set the --authorization-mode parameter to a
+      value that includes Node.
+      --authorization-mode=Node,RBAC
+    scored: true
+
+  - id: 1.1.33
+    text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "NodeRestriction"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure NodeRestriction plug-in on
+      kubelets. Then, edit the API server pod specification file $apiserverconf
+      on the master node and set the --enable-admission-plugins parameter to a
+      value that includes NodeRestriction.
+      --enable-admission-plugins=...,NodeRestriction,...
+    scored: true
+
+  - id: 1.1.34
+    text: "Ensure that the --experimental-encryption-provider-config argument is
+    set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--experimental-encryption-provider-config"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file.
+      Then, edit the API server pod specification file $apiserverconf on the
+      master node and set the --experimental-encryption-provider-config parameter
+      to the path of that file:
+      --experimental-encryption-provider-config=</path/to/EncryptionConfig/File>
+    scored: true
+
+  - id: 1.1.35
+    text: "Ensure that the encryption provider is set to aescbc (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
+      choose aescbc as the encryption provider.
+      For example,
+      kind: EncryptionConfig
+      apiVersion: v1
+      resources:
+        - resources:
+          - secrets
+            providers:
+            - aescbc:
+                keys:
+                - name: key1
+                  secret: <32-byte base64-encoded secret>
+    scored: true
+
+  - id: 1.1.36
+    text: "Ensure that the admission control plugin EventRateLimit is set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "EventRateLimit"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set the desired limits in a
+      configuration file. Then, edit the API server pod specification file
+      $apiserverconf and set the below parameters.
+      --enable-admission-plugins=...,EventRateLimit,...
+      --admission-control-config-file=<path/to/configuration/file>
+    scored: true
+
+  - id: 1.1.37
+    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--feature-gates"
+        compare:
+          op: nothave
+          value: "AdvancedAuditing=false"
+        set: true
+      - flag: "--feature-gates"
+        set: false
+    remediation: |
+      Follow the Kubernetes documentation and set the desired audit policy in the
+      /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf
+      and set the below parameters.
+      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+    scored: true
+
+  - id: 1.1.38
+    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--request-timeout"
+        set: false
+      - flag: "--request-timeout"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf
+      and set the below parameter as appropriate and if needed. For example,
+      --request-timeout=300s
+    scored: true
+
+  - id: 1.1.39
+    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--tls-cipher-suites"
+        compare:
+          op: eq
+          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+        set: true
+    remediation: |
+      Edit the API server pod specification file /etc/kubernetes/manifests
+      kube-apiserver.yaml on the master node and set the below parameter.
+      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+    scored: false
+
+- id: 1.2
+  text: "Scheduler"
+  checks:
+  - id: 1.2.1
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $schedulerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the Scheduler pod specification file $schedulerconf
+      file on the master node and set the below parameter.
+      --profiling=false
+    scored: true
+
+  - id: 1.2.2
+    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+    audit: "ps -ef | grep $schedulerbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--address"
+        compare:
+          op: eq
+          value: "127.0.0.1"
+        set: true
+      - flag: "--address"
+        set: false
+    remediation: |
+      Edit the Scheduler pod specification file $schedulerconf
+      file on the master node and ensure the correct value for the
+      --address parameter.
+    scored: true
+
+- id: 1.3
+  text: "Controller Manager"
+  checks:
+  - id: 1.3.1
+    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--terminated-pod-gc-threshold"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example:
+      --terminated-pod-gc-threshold=10
+    scored: true
+
+  - id: 1.3.2
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      on the master node and set the below parameter.
+      --profiling=false
+    scored: true
+
+  - id: 1.3.3
+    text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--use-service-account-credentials"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      on the master node to set the below parameter.
+      --use-service-account-credentials=true
+    scored: true
+
+  - id: 1.3.4
+    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-private-key-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      on the master node and set the --service-account-private-
+      key-file parameter to the private key file for service accounts.
+      --service-account-private-key-file=<filename>
+    scored: true
+
+  - id: 1.3.5
+    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--root-ca-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      on the master node and set the --root-ca-file parameter to
+      the certificate bundle file.
+      --root-ca-file=<path/to/file>
+    scored: true
+
+  - id: 1.3.6
+    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--feature-gates"
+        compare:
+          op: eq
+          value: "RotateKubeletServerCertificate=true"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      controller-manager.yaml on the master node and set the --feature-gates parameter to
+      include RotateKubeletServerCertificate=true.
+      --feature-gates=RotateKubeletServerCertificate=true
+    scored: true
+
+  - id: 1.3.7
+    text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--address"
+        compare:
+          op: eq
+          value: "127.0.0.1"
+        set: true
+      - flag: "--address"
+        set: false
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf
+      controller-manager.yaml on the master node and ensure the correct value
+      for the --address parameter.
+    scored: true
+
+- id: 1.4
+  text: "Configuration Files"
+  checks:
+  - id: 1.4.1
+    text: "Ensure that the API server pod specification file permissions are
+    set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $apiserverconf
+    scored: true
+
+  - id: 1.4.2
+    text: "Ensure that the API server pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $apiserverconf
+    scored: true
+
+  - id: 1.4.3
+    text: "Ensure that the controller manager pod specification file
+    permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $controllermanagerconf
+    scored: true
+
+  - id: 1.4.4
+    text: "Ensure that the controller manager pod specification file
+    ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $controllermanagerconf
+    scored: true
+
+  - id: 1.4.5
+    text: "Ensure that the scheduler pod specification file permissions are set
+    to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $schedulerconf
+    scored: true
+
+  - id: 1.4.6
+    text: "Ensure that the scheduler pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+    tests:
+      test_items:
+        - flag: "root:root"
+          compare:
+            op: eq
+            value: "root:root"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $schedulerconf
+    scored: true
+
+  - id: 1.4.7
+    text: "Ensure that the etcd pod specification file permissions are set to
+    644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $etcdconf
+    scored: true
+
+  - id: 1.4.8
+    text: "Ensure that the etcd pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $etcdconf
+    scored: true
+
+  - id: 1.4.9
+    text: "Ensure that the Container Network Interface file permissions are
+    set to 644 or more restrictive (Not Scored)"
+    audit: "stat -c %a <path/to/cni/files>"
+    type: manual
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 <path/to/cni/files>
+    scored: true
+
+  - id: 1.4.10
+    text: "Ensure that the Container Network Interface file ownership is set
+    to root:root (Not Scored)"
+    audit: "stat -c %U:%G <path/to/cni/files>"
+    type: manual
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root <path/to/cni/files>
+    scored: true
+
+  - id: 1.4.11
+    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
+    tests:
+      test_items:
+      - flag: "700"
+        compare:
+          op: eq
+          value: "700"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+      from the below command:
+      ps -ef | grep $etcdbin
+      Run the below command (based on the etcd data directory found above). For example,
+      chmod 700 /var/lib/etcd
+    scored: true
+
+  - id: 1.4.12
+    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
+    tests:
+      test_items:
+      - flag: "etcd:etcd"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+      from the below command:
+      ps -ef | grep $etcdbin
+      Run the below command (based on the etcd data directory found above). For example,
+      chown etcd:etcd /var/lib/etcd
+    scored: true
+
+  - id: 1.4.13
+    text: "Ensure that the admin.conf file permissions are set to 644 or
+    more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 /etc/kubernetes/admin.conf
+    scored: true
+
+  - id: 1.4.14
+    text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root /etc/kubernetes/admin.conf
+    scored: true
+
+  - id: 1.4.15
+    text: "Ensure that the scheduler.conf file permissions are set to 644 or
+    more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the
+      master node. For example, chmod 644 /etc/kubernetes/scheduler.conf
+    scored: true
+
+  - id: 1.4.16
+    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the
+      master node. For example, chown root:root /etc/kubernetes/scheduler.conf
+    scored: true
+
+  - id: 1.4.17
+    text: "Ensure that the controller-manager.conf file permissions are set
+    to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the
+      master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf
+    scored: true
+
+  - id: 1.4.18
+    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the
+      master node. For example, chown root:root /etc/kubernetes/controller-manager.conf
+    scored: true
+
+- id: 1.5
+  text: "etcd"
+  checks:
+  - id: 1.5.1
+    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--cert-file"
+        set: true
+      - flag:  "--key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure TLS encryption.
+      Then, edit the etcd pod specification file $etcdconf on the
+      master node and set the below parameters.
+      --ca-file=</path/to/ca-file>
+      --key-file=</path/to/key-file>
+    scored: true
+
+  - id: 1.5.2
+    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and set the below parameter.
+      --client-cert-auth="true"
+    scored: true
+
+  - id: 1.5.3
+    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--auto-tls"
+        set: false
+      - flag: "--auto-tls"
+        compare:
+          op: eq
+          value: false
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and either remove the --auto-tls parameter or set it to false.
+        --auto-tls=false
+    scored: true
+
+  - id: 1.5.4
+    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
+    set as appropriate (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    type: "manual"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--peer-cert-file"
+        set: true
+      - flag: "--peer-key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure peer TLS encryption as appropriate
+      for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
+      master node and set the below parameters.
+      --peer-client-file=</path/to/peer-cert-file>
+      --peer-key-file=</path/to/peer-key-file>
+    scored: true
+
+  - id: 1.5.5
+    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    type: "manual"
+    tests:
+      test_items:
+      - flag: "--peer-client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and set the below parameter.
+      --peer-client-cert-auth=true
+    scored: true
+
+  - id: 1.5.6
+    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    type: "manual"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--peer-auto-tls"
+        set: false
+      - flag: "--peer-auto-tls"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and either remove the --peer-auto-tls parameter or set it to false.
+      --peer-auto-tls=false
+    scored: true
+
+  - id: 1.5.7
+    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    type: "manual"
+    tests:
+      test_items:
+      - flag: "--trusted-ca-file"
+        set: true
+    remediation: |
+      Follow the etcd documentation and create a dedicated certificate authority setup for the
+      etcd service.
+      Then, edit the etcd pod specification file $etcdconf on the
+      master node and set the below parameter.
+      --trusted-ca-file=</path/to/ca-file>
+    scored: false
+
+- id: 1.6
+  text: "General Security Primitives"
+  checks:
+  - id: 1.6.1
+    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+    type: "manual"
+    remediation: |
+      Remove any unneeded clusterrolebindings :
+      kubectl delete clusterrolebinding [name]
+    scored: false
+
+  - id: 1.6.2
+    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create namespaces for objects in your deployment as you
+      need them.
+    scored: false
+
+  - id: 1.6.3
+    text: "Create network segmentation using Network Policies (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create NetworkPolicy objects as you need them.
+    scored: false
+
+  - id: 1.6.4
+    text: "Ensure that the seccomp profile is set to docker/default in your pod
+    definitions (Not Scored)"
+    type: "manual"
+    remediation: |
+      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+      would need to enable alpha features in the apiserver by passing "--feature-
+      gates=AllAlpha=true" argument.
+      Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
+      parameter to "--feature-gates=AllAlpha=true"
+      KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+      Based on your system, restart the kube-apiserver service. For example:
+      systemctl restart kube-apiserver.service
+      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+      example is as below:
+      apiVersion: v1
+      kind: Pod
+      metadata:
+        name: trustworthy-pod
+        annotations:
+          seccomp.security.alpha.kubernetes.io/pod: docker/default
+      spec:
+        containers:
+          - name: trustworthy-container
+            image: sotrustworthy:latest
+    scored: false
+
+  - id: 1.6.5
+    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and apply security contexts to your pods. For a
+      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+      Containers.
+    scored: false
+
+  - id: 1.6.6
+    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and setup image provenance.
+    scored: false
+
+  - id: 1.6.7
+    text: "Configure Network policies as appropriate (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and setup network policies as appropriate.
+      For example, you could create a "default" isolation policy for a Namespace by creating a
+      NetworkPolicy that selects all pods but does not allow any traffic:
+      apiVersion: networking.k8s.io/v1
+      kind: NetworkPolicy
+      metadata:
+        name: default-deny
+      spec:
+        podSelector:
+    scored: false
+
+  - id: 1.6.8
+    text: "Place compensating controls in the form of PSP and RBAC for
+    privileged containers usage (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
+    scored: false
+
+- id: 1.7
+  text: "PodSecurityPolicies"
+  checks:
+  - id: 1.7.1
+    text: "Do not admit privileged containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false.
+    scored: false
+
+  - id: 1.7.2
+    text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)"
+    type: "manual"
+    remediation: |
+     Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false.
+    scored: false
+
+  - id: 1.7.3
+    text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false.
+    scored: false
+
+  - id: 1.7.4
+    text: "Do not admit containers wishing to share the host network namespace (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false.
+    scored: false
+
+  - id: 1.7.5
+    text: "Do not admit containers with allowPrivilegeEscalation (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false.
+    scored: false
+
+  - id: 1.7.6
+    text: "Do not admit root containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0.
+    scored: false
+
+  - id: 1.7.7
+    text: "Do not admit containers with dangerous capabilities (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+    scored: false
diff --git a/cfg/1.11-json/node.yaml b/cfg/1.11-json/node.yaml
new file mode 100644
index 0000000..3bd277b
--- /dev/null
+++ b/cfg/1.11-json/node.yaml
@@ -0,0 +1,508 @@
+---
+controls:
+version: 1.11
+id: 2
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+- id: 2.1
+  text: "Kubelet"
+  checks:
+  - id: 2.1.1
+    text: "Ensure that the --allow-privileged argument is set to false (Scored)"
+    audit: "ps -fC $kubeletbin"
+    tests:
+      test_items:
+      - flag: "--allow-privileged"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletsvc
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --allow-privileged=false
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.2
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.authentication.anonymous.enabled}"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+      false .
+      If using executable arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --anonymous-auth=false
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.3
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.authorization.mode}"
+        compare:
+          op: noteq
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
+      If using executable arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_AUTHZ_ARGS variable.
+      --authorization-mode=Webhook
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.4
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.authentication.x509.clientCAFile}"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
+      the location of the client CA file.
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_AUTHZ_ARGS variable.
+      --client-ca-file=<path/to/client-ca-file>
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.5
+    text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: or
+      test_items:
+      - jsonpath: "{.readOnlyPort}"
+        set: false
+      - jsonpath: "{.readOnlyPort}"
+        compare:
+          op: eq
+          value: "0"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --read-only-port=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.6
+    text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: or
+      test_items:
+      - jsonpath: "{.streamingConnectionIdleTimeout}"
+        set: false
+      - jsonpath: "{.streamingConnectionIdleTimeout}"
+        compare:
+          op: noteq
+          value: 0
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
+      value other than 0.
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --streaming-connection-idle-timeout=5m
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.7
+    text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.protectKernelDefaults}"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --protect-kernel-defaults=true
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.8
+    text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: or
+      test_items:
+      - jsonpath: "{.makeIPTablesUtilChains}"
+        set: false
+      - jsonpath: "{.makeIPTablesUtilChains}"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      remove the --make-iptables-util-chains argument from the
+      KUBELET_SYSTEM_PODS_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.9
+    text: "Ensure that the --hostname-override argument is not set (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.hostnameOverride}"
+        set: false
+    remediation: |
+      Edit the kubelet service file $kubeletsvc
+      on each worker node and remove the --hostname-override argument from the
+      KUBELET_SYSTEM_PODS_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.10
+    text: "Ensure that the --event-qps argument is set to 0 (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.eventRecordQPS}"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --event-qps=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.11
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: and
+      test_items:
+      - jsonpath: "{.tlsCertFile}"
+        set: true
+      - jsonpath: "{.tlsPrivateKeyFile}"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
+      file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
+      corresponding private key file.
+      If using command line arguments, edit the kubelet service file
+      $kubeletsvc on each worker node and
+      set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+      --tls-cert-file=<path/to/tls-certificate-file>
+      file=<path/to/tls-key-file>
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.12
+    text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: or
+      test_items:
+      - jsonpath: "{.cadvisorPort}"
+        compare:
+          op: eq
+          value: 0
+        set: true
+      - jsonpath: "{.cadvisorPort}"
+        set: false
+    remediation: |
+      Edit the kubelet service file $kubeletsvc
+      on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
+      --cadvisor-port=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.13
+    text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      bin_op: or
+      test_items:
+      - jsonpath: "{.rotateCertificates}"
+        set: false
+      - jsonpath: "{.rotateCertificates}"
+        compare:
+          op: noteq
+          value: "false"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
+      If using command line arguments, edit the kubelet service file $kubeletsvc 
+      on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.14
+    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.featureGates.RotateKubeletServerCertificate}"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletsvc
+      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+      --feature-gates=RotateKubeletServerCertificate=true
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.15
+    text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
+    audit: "cat $kubeletconf"
+    tests:
+      test_items:
+      - jsonpath: "{.tlsCipherSuites}"
+        compare:
+          op: eq
+          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+      If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter.
+      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+    scored: false
+
+- id: 2.2
+  text: "Configuration Files"
+  checks:
+    - id: 2.2.1
+      text: "Ensure that the kubelet.conf file permissions are set to 644 or
+      more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'"
+      tests:
+        bin_op: or
+        test_items:
+          - flag: "644"
+            compare:
+              op: eq
+              value: "644"
+            set: true
+          - flag: "640"
+            compare:
+              op: eq
+              value: "640"
+            set: true
+          - flag: "600"
+            compare:
+              op: eq
+              value: "600"
+            set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 644 $kubeletkubeconfig
+      scored: true
+
+    - id: 2.2.2
+      text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'"
+      tests:
+        test_items:
+          - flag: "root:root"
+            compare:
+              op: eq
+              value: root:root
+            set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chown root:root $kubeletkubeconfig
+      scored: true
+
+    - id: 2.2.3
+      text: "Ensure that the kubelet service file permissions are set to 644 or
+      more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'"
+      tests:
+        bin_op: or
+        test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: 644
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 755 $kubeletsvc
+      scored: true
+
+    - id: 2.2.4
+      text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'"
+      tests:
+        test_items:
+        - flag: "root:root"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chown root:root $kubeletsvc
+      scored: true
+
+    - id: 2.2.5
+      text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'"
+      tests:
+        bin_op: or
+        test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 644 $proxykubeconfig
+      scored: true
+
+    - id: 2.2.6
+      text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'"
+      tests:
+        test_items:
+        - flag: "root:root"
+          set: true
+      remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $proxykubeconfig
+      scored: true
+
+    - id: 2.2.7
+      text: "Ensure that the certificate authorities file permissions are set to
+      644 or more restrictive (Scored)"
+      type: manual
+      remediation: |
+        Run the following command to modify the file permissions of the --client-ca-file
+        chmod 644 <filename>
+      scored: true
+
+    - id: 2.2.8
+      text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'"
+      type: manual
+      remediation: |
+        Run the following command to modify the ownership of the --client-ca-file .
+        chown root:root <filename>
+      scored: true
+
+    - id: 2.2.9
+      text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'"
+      tests:
+        test_items:
+        - flag: "root:root"
+          set: true
+      remediation: |
+        Run the following command (using the config file location identied in the Audit step)
+        chown root:root $kubeletconf
+      scored: true
+
+    - id: 2.2.10
+      text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'"
+      tests:
+        bin_op: or
+        test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+      remediation: |
+        Run the following command (using the config file location identied in the Audit step)
+        chmod 644 $kubeletconf
+      scored: true
diff --git a/check/controls_test.go b/check/controls_test.go
index 3cf9b60..17c62e5 100644
--- a/check/controls_test.go
+++ b/check/controls_test.go
@@ -2,6 +2,8 @@ package check
 
 import (
 	"io/ioutil"
+	"os"
+	"path/filepath"
 	"testing"
 
 	yaml "gopkg.in/yaml.v2"
@@ -11,31 +13,28 @@ const cfgDir = "../cfg/"
 
 // validate that the files we're shipping are valid YAML
 func TestYamlFiles(t *testing.T) {
-	// TODO: make this list dynamic
-	dirs := []string{"1.6/", "1.7/"}
-
-	for _, dir := range dirs {
-		dir = cfgDir + dir
-
-		files, err := ioutil.ReadDir(dir)
+	err := filepath.Walk(cfgDir, func(path string, info os.FileInfo, err error) error {
 		if err != nil {
-			t.Fatalf("error reading %s directory: %v", dir, err)
+			t.Fatalf("failure accessing path %q: %v\n", path, err)
 		}
-
-		for _, file := range files {
-
-			fileName := file.Name()
-			in, err := ioutil.ReadFile(dir + fileName)
+		if !info.IsDir() {
+			t.Logf("reading file: %s", path)
+			in, err := ioutil.ReadFile(path)
 			if err != nil {
-				t.Fatalf("error opening file %s: %v", fileName, err)
+				t.Fatalf("error opening file %s: %v", path, err)
 			}
 
 			c := new(Controls)
-
 			err = yaml.Unmarshal(in, c)
-			if err != nil {
-				t.Fatalf("failed to load YAML from %s: %v", fileName, err)
+			if err == nil {
+				t.Logf("YAML file successfully unmarshalled: %s", path)
+			} else {
+				t.Fatalf("failed to load YAML from %s: %v", path, err)
 			}
 		}
+		return nil
+	})
+	if err != nil {
+		t.Fatalf("failure walking cfg dir: %v\n", err)
 	}
 }
diff --git a/check/data b/check/data
index 88bdc85..b3a4cfe 100644
--- a/check/data
+++ b/check/data
@@ -157,4 +157,133 @@ groups:
               value: Something
             set: true
 
+    - id: 14
+      text: "jsonpath correct value on field"
+      tests:
+        test_items:
+          - jsonpath: "{.readOnlyPort}"
+            compare:
+              op: eq
+              value: 15000
+            set: true
+          - jsonpath: "{.readOnlyPort}"
+            compare:
+              op: gte
+              value: 15000
+            set: true
+          - jsonpath: "{.readOnlyPort}"
+            compare:
+              op: lte
+              value: 15000
+            set: true
+
+    - id: 15
+      text: "jsonpath correct case-sensitive value on string field"
+      tests:
+        test_items:
+          - jsonpath: "{.stringValue}"
+            compare:
+              op: noteq
+              value: "None"
+            set: true
+          - jsonpath: "{.stringValue}"
+            compare:
+              op: noteq
+              value: "webhook,Something,RBAC"
+            set: true
+          - jsonpath: "{.stringValue}"
+            compare:
+              op: eq
+              value: "WebHook,Something,RBAC"
+            set: true
+
+    - id: 16
+      text: "jsonpath correct value on boolean field"
+      tests:
+        test_items:
+          - jsonpath: "{.trueValue}"
+            compare:
+              op: noteq
+              value: somethingElse
+            set: true
+          - jsonpath: "{.trueValue}"
+            compare:
+              op: noteq
+              value: false
+            set: true
+          - jsonpath: "{.trueValue}"
+            compare:
+              op: eq
+              value: true
+            set: true
+
+    - id: 17
+      text: "jsonpath field absent"
+      tests:
+        test_items:
+          - jsonpath: "{.notARealField}"
+            set: false
+
+    - id: 18
+      text: "jsonpath correct value on nested field"
+      tests:
+        test_items:
+          - jsonpath: "{.authentication.anonymous.enabled}"
+            compare:
+              op: eq
+              value: "false"
+            set: true
+
+    - id: 19
+      text: "yamlpath correct value on field"
+      tests:
+        test_items:
+          - yamlpath: "{.readOnlyPort}"
+            compare:
+              op: gt
+              value: 14999
+            set: true
+
+    - id: 20
+      text: "yamlpath field absent"
+      tests:
+        test_items:
+          - yamlpath: "{.fieldThatIsUnset}"
+            set: false
 
+    - id: 21
+      text: "yamlpath correct value on nested field"
+      tests:
+        test_items:
+          - yamlpath: "{.authentication.anonymous.enabled}"
+            compare:
+              op: eq
+              value: "false"
+            set: true
+
+    - id: 22
+      text: "jsonpath on invalid json"
+      tests:
+        test_items:
+          - jsonpath: "{.authentication.anonymous.enabled}"
+            compare:
+              op: eq
+              value: "false"
+            set: true
+
+    - id: 23
+      text: "jsonpath with broken expression"
+      tests:
+        test_items:
+          - jsonpath: "{.missingClosingBrace"
+            set: true
+
+    - id: 24
+      text: "yamlpath on invalid yaml"
+      tests:
+        test_items:
+          - yamlpath: "{.authentication.anonymous.enabled}"
+            compare:
+              op: eq
+              value: "false"
+            set: true
diff --git a/check/test.go b/check/test.go
index 7a74634..6ac8d0a 100644
--- a/check/test.go
+++ b/check/test.go
@@ -15,11 +15,16 @@
 package check
 
 import (
+	"bytes"
+	"encoding/json"
 	"fmt"
 	"os"
 	"regexp"
 	"strconv"
 	"strings"
+
+	yaml "gopkg.in/yaml.v2"
+	"k8s.io/client-go/util/jsonpath"
 )
 
 // test:
@@ -37,11 +42,13 @@ const (
 )
 
 type testItem struct {
-	Flag    string
-	Output  string
-	Value   string
-	Set     bool
-	Compare compare
+	Flag     string
+	Jsonpath string
+	Yamlpath string
+	Output   string
+	Value    string
+	Set      bool
+	Compare  compare
 }
 
 type compare struct {
@@ -54,33 +61,85 @@ type testOutput struct {
 	actualResult string
 }
 
+func failTestItem(s string) *testOutput {
+	return &testOutput{testResult: false, actualResult: s}
+}
+
 func (t *testItem) execute(s string) *testOutput {
 	result := &testOutput{}
-	match := strings.Contains(s, t.Flag)
+	var match bool
+	var flagVal string
+
+	if t.Flag != "" {
+		// Flag comparison: check if the flag is present in the input
+		match = strings.Contains(s, t.Flag)
+	} else {
+		// Means either t.Jsonpath != "" or t.Yamlpath != ""
+		// Find out and convert the input as needed
+		buf := new(bytes.Buffer)
+		var jsonInterface interface{}
+		var pathExpression string
+
+		if t.Yamlpath != "" {
+			pathExpression = t.Yamlpath
+			err := yaml.Unmarshal([]byte(s), &jsonInterface)
+			if err != nil {
+				fmt.Fprintf(os.Stderr, "failed to load YAML from provided input \"%s\": %v\n", s, err)
+				return failTestItem("failed to load YAML")
+			}
+		} else if t.Jsonpath != "" {
+			pathExpression = t.Jsonpath
+			err := json.Unmarshal([]byte(s), &jsonInterface)
+			if err != nil {
+				fmt.Fprintf(os.Stderr, "failed to load JSON from provided input: \"%s\": %v\n", s, err)
+				return failTestItem("failed to load JSON")
+			}
+		}
+
+		// Parse the jsonpath/yamlpath expression...
+		j := jsonpath.New("jsonpath")
+		j.AllowMissingKeys(true)
+		err := j.Parse(pathExpression)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", pathExpression, err)
+			return failTestItem("unable to parse path expression")
+		}
+
+		err = j.Execute(buf, jsonInterface)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", pathExpression, err)
+			return failTestItem("error executing path expression")
+		}
+
+		jsonpathResult := fmt.Sprintf("%s", buf)
+		match = (jsonpathResult != "")
+		flagVal = jsonpathResult
+	}
 
 	if t.Set {
-		var flagVal string
 		isset := match
 
 		if isset && t.Compare.Op != "" {
-			// Expects flags in the form;
-			// --flag=somevalue
-			// --flag
-			// somevalue
-			//pttn := `(` + t.Flag + `)(=)*([^\s,]*) *`
-			pttn := `(` + t.Flag + `)(=)*([^\s]*) *`
-			flagRe := regexp.MustCompile(pttn)
-			vals := flagRe.FindStringSubmatch(s)
-
-			if len(vals) > 0 {
-				if vals[3] != "" {
-					flagVal = vals[3]
+			if t.Flag != "" {
+				// Expects flags in the form;
+				// --flag=somevalue
+				// --flag
+				// somevalue
+				//pttn := `(` + t.Flag + `)(=)*([^\s,]*) *`
+				pttn := `(` + t.Flag + `)(=)*([^\s]*) *`
+				flagRe := regexp.MustCompile(pttn)
+				vals := flagRe.FindStringSubmatch(s)
+
+				if len(vals) > 0 {
+					if vals[3] != "" {
+						flagVal = vals[3]
+					} else {
+						flagVal = vals[1]
+					}
 				} else {
-					flagVal = vals[1]
+					fmt.Fprintf(os.Stderr, "invalid flag in testitem definition")
+					os.Exit(1)
 				}
-			} else {
-				fmt.Fprintf(os.Stderr, "invalid flag in testitem definition")
-				os.Exit(1)
 			}
 
 			result.actualResult = strings.ToLower(flagVal)
diff --git a/check/test_test.go b/check/test_test.go
index 4b96e07..24ba757 100644
--- a/check/test_test.go
+++ b/check/test_test.go
@@ -110,6 +110,38 @@ func TestTestExecute(t *testing.T) {
 			controls.Groups[0].Checks[13],
 			"2:45 ../kubernetes/kube-apiserver --option --admission-control=Something ---audit-log-maxage=40",
 		},
+		{
+			controls.Groups[0].Checks[14],
+			"{\"readOnlyPort\": 15000}",
+		},
+		{
+			controls.Groups[0].Checks[15],
+			"{\"stringValue\": \"WebHook,Something,RBAC\"}",
+		},
+		{
+			controls.Groups[0].Checks[16],
+			"{\"trueValue\": true}",
+		},
+		{
+			controls.Groups[0].Checks[17],
+			"{\"readOnlyPort\": 15000}",
+		},
+		{
+			controls.Groups[0].Checks[18],
+			"{\"authentication\": { \"anonymous\": {\"enabled\": false}}}",
+		},
+		{
+			controls.Groups[0].Checks[19],
+			"readOnlyPort: 15000",
+		},
+		{
+			controls.Groups[0].Checks[20],
+			"readOnlyPort: 15000",
+		},
+		{
+			controls.Groups[0].Checks[21],
+			"authentication:\n  anonymous:\n    enabled: false",
+		},
 	}
 
 	for _, c := range cases {
@@ -119,3 +151,31 @@ func TestTestExecute(t *testing.T) {
 		}
 	}
 }
+
+func TestTestExecuteExceptions(t *testing.T) {
+
+	cases := []struct {
+		*Check
+		str string
+	}{
+		{
+			controls.Groups[0].Checks[22],
+			"this is not valid json {} at all",
+		},
+		{
+			controls.Groups[0].Checks[23],
+			"{\"key\": \"value\"}",
+		},
+		{
+			controls.Groups[0].Checks[24],
+			"broken } yaml\nenabled: true",
+		},
+	}
+
+	for _, c := range cases {
+		res := c.Tests.execute(c.str).testResult
+		if res {
+			t.Errorf("%s, expected:%v, got:%v\n", c.Text, false, res)
+		}
+	}
+}
-- 
GitLab