diff --git a/cfg/1.8/federated.yaml b/cfg/1.8/federated.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b324c508e10283f81697408ef4b6aa19dfec28cd
--- /dev/null
+++ b/cfg/1.8/federated.yaml
@@ -0,0 +1,327 @@
+---
+controls:
+version: 1.8
+id: 3
+text: "Federated Deployments"
+type: "federated"
+groups:
+- id: 3.1
+  text: "Federation API Server"
+  checks:
+  - id: 3.1.1
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the deployment specs and set --anonymous-auth=false .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.2
+    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--basic-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the deployment specs and remove "--basic-auth-file=<filename>" .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.3
+    text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-allow-any-token"
+        set: false
+    remediation: |
+      Edit the deployment specs and remove --insecure-allow-any-token .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.4
+    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-bind-address"
+        set: false
+    remediation: |
+      Edit the deployment specs and remove --insecure-bind-address .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.5
+    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-port"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the deployment specs and set --insecure-port=0 .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.6
+    text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests: 
+      bin_op: or
+      test_items:
+      - flag:  "--secure-port"
+        compare:
+          op: gt
+          value: 0
+        set: true
+      - flag: "--secure-port"
+        set: false
+    remediation: |
+      Edit the deployment specs and set the --secure-port argument to the desired port.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.7
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the deployment specs and set "--profiling=false" :
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    score: true
+
+  - id: 3.1.8
+    text: "Ensure that the admission control policy is not set to AlwaysAdmit (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: nothave
+          value: AlwaysAdmit 
+        set: true
+    remediation: |
+      Edit the deployment specs and set --admission-control argument to a value that does not
+      include AlwaysAdmit .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.9
+    text: "Ensure that the admission control policy is set to NamespaceLifecycle (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "admission-control"
+        compare:
+          op: has
+          value: "NamespaceLifecycle"
+        set: true
+    remediation: |
+      Edit the deployment specs and set --admission-control argument to a value that includes
+      NamespaceLifecycle .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.10
+    text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-path"
+        set: true
+    remediation: "Edit the deployment specs and set --audit-log-path argument as appropriate.\n
+            kubectl edit deployments federation-apiserver-deployment --namespace=federation-system"
+    scored: true
+
+  - id: 3.1.11
+    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxage"
+        compare:
+          op: gte
+          value: 30
+        set: true
+    remediation: |
+      Edit the deployment specs and set --audit-log-maxage to 30 or as appropriate.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+            
+  - id: 3.1.12
+    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxbackup"
+        compare:
+          op: gte
+          value: 10
+        set: true
+    remediation: |
+      Edit the deployment specs and set --audit-log-maxbackup to 10 or as appropriate.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.13
+    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxsize"
+        compare:
+          op: gte
+          value: 100
+        set: true
+    remediation: |
+      Edit the deployment specs and set --audit-log-maxsize=100 to 100 or as appropriate.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.14
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: nothave
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      Edit the deployment specs and set --authorization-mode argument to a value other than
+      AlwaysAllow
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.15
+    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--token-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the deployment specs and remove the --token-auth-file=<filename> argument.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.16
+    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-lookup"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the deployment specs and set "--service-account-lookup=true" .
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.17
+    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-key-file"
+        set: true
+    remediation: |
+      Edit the deployment specs and set --service-account-key-file argument as appropriate.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.18
+    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as 
+    appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--etcd-certfile"
+        set: true
+      - flag: "--etcd-keyfile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      federation apiserver and etcd. Then, edit the deployment specs and set "--etcd-
+      certfile=<path/to/client-certificate-file>" and "--etcd-
+      keyfile=<path/to/client-key-file>" arguments.
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+  - id: 3.1.19
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as 
+    appropriate (Scored)"
+    audit: "ps -ef | grep $fedapiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--tls-cert-file"
+        set: true
+      - flag: "--tls-private-key-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the federation
+      apiserver. Then, edit the deployment specs and set "--tls-cert-file=<path/to/tls-
+      certificate-file>" and "--tls-private-key-file=<path/to/tls-key-file>" :
+      kubectl edit deployments federation-apiserver-deployment --
+      namespace=federation-system
+    scored: true
+
+- id: 3.2
+  text: "Federation Controller Manager"
+  checks:
+  - id: 3.2.1
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $fedcontrollermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the deployment specs and set "--profiling=false" :
+      kubectl edit deployments federation-controller-manager-deployment --
+      namespace=federation-system
+    scored: true
diff --git a/cfg/1.8/master.yaml b/cfg/1.8/master.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..92918a9fe8cf1b82ad40f491c0e044ca29460282
--- /dev/null
+++ b/cfg/1.8/master.yaml
@@ -0,0 +1,1328 @@
+---
+controls:
+version: 1.8
+id: 1
+text: "Master Node Security Configuration"
+type: "master"
+groups:
+- id: 1.1
+  text: "API Server"
+  checks:
+  - id: 1.1.1
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: | 
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the below parameter.
+      --anonymous-auth=false
+
+    scored: true
+
+  - id: 1.1.2
+    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--basic-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the API server pod specification file $apiserverpodspec
+      on the master node and remove the --basic-auth-file=<filename>
+      parameter.
+    scored: true
+
+  - id: 1.1.3
+    text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag:  "--insecure-allow-any-token"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and remove the --insecure-allow-any-token
+      parameter.
+    scored: true
+
+  - id: 1.1.4
+    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests: 
+      bin_op: or
+      test_items:
+      - flag: "--kubelet-https"
+        compare:
+          op: eq
+          value: true
+        set: true
+      - flag: "--kubelet-https"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and remove the --kubelet-https parameter.
+    scored: true
+
+  - id: 1.1.5
+    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-bind-address"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and remove the --insecure-bind-address
+      parameter.
+    scored: true
+
+  - id: 1.1.6
+    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-port"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      apiserver.yaml on the master node and set the below parameter.
+      --insecure-port=0
+    scored: true
+
+  - id: 1.1.7
+    text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests: 
+      bin_op: or
+      test_items:
+        - flag:  "--secure-port"
+          compare:
+            op: gt
+            value: 0
+          set: true
+        - flag: "--secure-port"
+          set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and either remove the --secure-port parameter or
+      set it to a different (non-zero) desired port.
+    scored: true
+
+  - id: 1.1.8
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the below parameter.
+      --profiling=false
+    scored: true
+
+  - id: 1.1.9
+    text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--repair-malformed-updates"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the below parameter.
+      --repair-malformed-updates=false
+    scored: true
+
+  - id: 1.1.10
+    text: "Ensure that the admission control policy is not set to AlwaysAdmit (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: nothave
+          value: AlwaysAdmit 
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to a
+      value that does not include AlwaysAdmit .
+    scored: true
+
+  - id: 1.1.11
+    text: "Ensure that the admission control policy is set to AlwaysPullImages (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "AlwaysPullImages"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to
+      include AlwaysPullImages .
+      --admission-control=...,AlwaysPullImages,...
+    scored: true
+
+  - id: 1.1.12
+    text: "Ensure that the admission control policy is set to DenyEscalatingExec (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "DenyEscalatingExec"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to a
+      value that includes DenyEscalatingExec .
+      --admission-control=...,DenyEscalatingExec,...
+    scored: true
+
+  - id: 1.1.13
+    text: "Ensure that the admission control policy is set to SecurityContextDeny (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "SecurityContextDeny"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to
+      include SecurityContextDeny .
+      --admission-control=...,SecurityContextDeny,...
+    scored: true
+
+  - id: 1.1.14
+    text: "Ensure that the admission control policy is set to NamespaceLifecycle (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "admission-control"
+        compare:
+          op: has
+          value: "NamespaceLifecycle"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to
+      include NamespaceLifecycle .
+      --admission-control=...,NamespaceLifecycle,...
+    scored: true
+
+  - id: 1.1.15
+    text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-path"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --audit-log-path parameter to a suitable
+    path and file where you would like audit logs to be written, for example:
+            --audit-log-path=/var/log/apiserver/audit.log
+    scored: true
+
+  - id: 1.1.16
+    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxage"
+        compare:
+          op: gte
+          value: 30
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --audit-log-maxage parameter to 30 or
+    as an appropriate number of days:
+            --audit-log-maxage=30
+    scored: true
+
+  - id: 1.1.17
+    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxbackup"
+        compare:
+          op: gte
+          value: 10
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --audit-log-maxbackup parameter to 10
+      or to an appropriate value.
+      --audit-log-maxbackup=10
+    scored: true
+
+  - id: 1.1.18
+    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxsize"
+        compare:
+          op: gte
+          value: 100
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --audit-log-maxsize parameter to an
+      appropriate size in MB. For example, to set it as 100 MB:
+      --audit-log-maxsize=100
+    scored: true
+
+  - id: 1.1.19
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: nothave
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --authorization-mode parameter to
+      values other than AlwaysAllow . One such example could be as below.
+      --authorization-mode=RBAC
+    scored: true
+
+  - id: 1.1.20
+    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--token-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then,
+      edit the API server pod specification file $apiserverpodspec
+      on the master node and remove the --token-auth-file=<filename>
+      parameter.
+    scored: true
+
+  - id: 1.1.21
+    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--kubelet-certificate-authority"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and setup the TLS connection between the apiserver
+      and kubelets. Then, edit the API server pod specification file
+      $apiserverpodspec on the master node and set the --
+      kubelet-certificate-authority parameter to the path to the cert file for the certificate
+      authority.
+      --kubelet-certificate-authority=<ca-string>
+    scored: true
+
+  - id: 1.1.22
+    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are
+      set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--kubelet-client-certificate"
+        set: true
+      - flag: "--kubelet-client-key"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and kubelets. Then, edit API server pod specification file
+      $apiserverpodspec on the master node and set the
+      kubelet client certificate and key parameters as below.
+      --kubelet-client-certificate=<path/to/client-certificate-file>
+      --kubelet-client-key=<path/to/client-key-file>
+    scored: true
+
+  - id: 1.1.23
+    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-lookup"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the below parameter.
+      --service-account-lookup=true
+    scored: true
+
+  - id: 1.1.24
+    text: "Ensure that the admission control policy is set to PodSecurityPolicy (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "PodSecurityPolicy"
+        set: true
+    remediation: |
+      Follow the documentation and create Pod Security Policy objects as per your environment.
+      Then, edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to a
+      value that includes PodSecurityPolicy :
+      --admission-control=...,PodSecurityPolicy,...
+
+      Then restart the API Server.
+    scored: true
+
+  - id: 1.1.25
+    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-key-file"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --service-account-key-file parameter
+      to the public key file for service accounts:
+      --service-account-key-file=<filename>
+    scored: true
+
+  - id: 1.1.26
+    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as 
+      appropriate (Scored"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--etcd-certfile"
+        set: true
+      - flag: "--etcd-keyfile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and etcd. Then, edit the API server pod specification file
+      $apiserverpodspec on the master node and set the etcd
+      certificate and key file parameters.
+      --etcd-certfile=<path/to/client-certificate-file>
+      --etcd-keyfile=<path/to/client-key-file>
+    scored: true
+
+  - id: 1.1.27
+    text: "Ensure that the admission control policy is set to ServiceAccount (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "ServiceAccount"
+        set: true
+    remediation: |
+      Follow the documentation and create ServiceAccount objects as per your environment.
+      Then, edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to a
+      value that includes ServiceAccount .
+      --admission-control=...,ServiceAccount,...
+    scored: true
+
+  - id: 1.1.28
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set
+    as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--tls-cert-file"
+        set: true
+      - flag: "--tls-private-key-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+      Then, edit the API server pod specification file $apiserverpodspec
+      on the master node and set the TLS certificate and private key file
+      parameters.
+      --tls-cert-file=<path/to/tls-certificate-file>
+      --tls-private-key-file=<path/to/tls-key-file>
+    scored: true
+
+  - id: 1.1.29
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-ca-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
+      Then, edit the API server pod specification file $apiserverpodspec
+      on the master node and set the client certificate authority file.
+      --client-ca-file=<path/to/client-ca-file>
+    scored: true
+
+  - id: 1.1.30
+    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--etcd-cafile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the
+      apiserver and etcd. Then, edit the API server pod specification file
+      $apiserverpodspec on the master node and set the etcd
+      certificate authority file parameter.
+      --etcd-cafile=<path/to/ca-file>
+    scored: true
+
+  - id: 1.1.31
+    text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: has
+          value: "Node"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --authorization-mode parameter to a
+      value that includes Node .
+      --authorization-mode=Node,RBAC
+    scored: true
+
+  - id: 1.1.32
+    text: "Ensure that the admission control policy is set to NodeRestriction (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "NodeRestriction"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
+      Then, edit the API server pod specification file $apiserverpodspec
+      on the master node and set the --admission-control parameter to a
+      value that includes NodeRestriction.
+      --admission-control=...,NodeRestriction,...
+    scored: true
+
+  - id: 1.1.33
+    text: "1.1.34 Ensure that the --experimental-encryption-provider-config argument is
+    set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--experimental-encryption-provider-config"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file. Then, edit
+      the API server pod specification file $apiserverpodspec
+      on the master node and set the --experimental-encryption-provider-config parameter
+      to the path of that file:
+      --experimental-encryption-provider-config=</path/to/EncryptionConfig/File>
+    scored: true
+
+  - id: 1.1.34
+    text: "Ensure that the encryption provider is set to aescbc (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file,
+      choose aescbc as the encryption provider.
+      For example,
+      kind: EncryptionConfig
+      apiVersion: v1
+      resources:
+        - resources:
+          - secrets
+            providers:
+            - aescbc:
+                keys:
+                - name: key1
+                  secret: <32-byte base64-encoded secret>
+    scored: true
+
+  - id: 1.1.35
+    text: "Ensure that the admission control policy is set to EventRateLimit (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--admission-control"
+        compare:
+          op: has
+          value: "EventRateLimit"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set the desired limits in a configuration file.
+      Then, edit the API server pod specification file $apiserverpodspec
+      and set the below parameters.
+      --admission-control=EventRateLimit
+      --admission-control-config-file=<path/to/configuration/file>
+    scored: true
+
+  - id: 1.1.36
+    text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and set the desired audit policy in the
+      /etc/kubernetes/audit-policy.yaml file.
+      Then, edit the API server pod specification file $apiserverpodspec
+      and set the below parameters.
+      --audit-policy-file=/etc/kubernetes/audit-policy.yaml
+    scored: true
+
+  - id: 1.1.37
+    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $apiserverbin | grep -v grep"
+    type: "manual"
+    remediation: |
+      Edit the API server pod specification file $apiserverpodspec
+      and set the below parameter as appropriate and if needed. For example,
+      --request-timeout=300
+    scored: true
+
+- id: 1.2
+  text: "Scheduler"
+  checks:
+  - id: 1.2.1
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $schedulerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Remediation:
+        Edit the Scheduler pod specification file $apiserverpodspec
+        file on the master node and set the below parameter.
+        --profiling=false
+    scored: true
+
+- id: 1.3
+  text: "Controller Manager"
+  checks:
+  - id: 1.3.1
+    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+        - flag: "--terminated-pod-gc-threshold"
+          set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      on the master node and set the --terminated-pod-gc-
+      threshold to an appropriate threshold, for example:
+      --terminated-pod-gc-threshold=10
+    scored: true
+
+  - id: 1.3.2
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      on the master node and set the below parameter.
+      --profiling=false
+    scored: true
+
+  - id: 1.3.3
+    text: "Ensure that the --use-service-account-credentials argument is set"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--use-service-account-credentials"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      on the master node to set the below parameter.
+      --use-service-account-credentials=true
+    scored: true
+
+  - id: 1.3.4
+    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-private-key-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      on the master node and set the --service-account-private-
+      key-file parameter to the private key file for service accounts.
+      --service-account-private-key-file=<filename>
+    scored: true
+
+  - id: 1.3.5
+    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--root-ca-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      on the master node and set the --root-ca-file parameter to
+      the certificate bundle file`.
+      --root-ca-file=<path/to/file>
+    scored: true
+
+  - id: 1.3.6
+    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and apply security contexts to your pods. For a
+      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+      Containers.
+    scored: false
+
+  - id: 1.3.7
+    text: " Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+    audit: "ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "RotateKubeletServerCertificate"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $apiserverpodspec
+      controller-manager.yaml on the master node and set the --feature-gates parameter to
+      include RotateKubeletServerCertificate=true.
+      --feature-gates=RotateKubeletServerCertificate=true
+    scored: true
+
+- id: 1.4
+  text: "Configuration Files"
+  checks:
+  - id: 1.4.1
+    text: "Ensure that the API server pod specification file permissions are
+    set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverpodspec; then stat -c %a $apiserverpodspec; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $apiserverpodspec
+    scored: true
+
+  - id: 1.4.2
+    text: "Ensure that the API server pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverpodspec; then stat -c %U:%G $apiserverpodspec; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $apiserverpodspec
+    scored: true
+
+  - id: 1.4.3
+    text: "Ensure that the controller manager pod specification file
+    permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerpodspec; then stat -c %a $controllermanagerpodspec; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $controllermanagerpodspec
+    scored: true
+
+  - id: 1.4.4
+    text: "Ensure that the controller manager pod specification file
+    ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerpodspec; then stat -c %U:%G $controllermanagerpodspec; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $controllermanagerpodspec
+    scored: true
+
+  - id: 1.4.5
+    text: "Ensure that the scheduler pod specification file permissions are set
+    to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerpodspec; then stat -c %a $schedulerpodspec; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $schedulerpodspec
+    scored: true
+
+  - id: 1.4.6
+    text: "Ensure that the scheduler pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerpodspec; then stat -c %U:%G $schedulerpodspec; fi'"
+    tests:
+      test_items:
+        - flag: "root:root"
+          compare:
+            op: eq
+            value: "root:root"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $schedulerpodspec
+    scored: true
+
+  - id: 1.4.7
+    text: "Ensure that the etcd pod specification file permissions are set to
+    644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdpodspec; then stat -c %a $etcdpodspec; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $etcdpodspec
+    scored: true
+
+  - id: 1.4.8
+    text: "Ensure that the etcd pod specification file ownership is set to
+    root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdpodspec; then stat -c %U:%G $etcdpodspec; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $etcdpodspec
+    scored: true
+
+  - id: 1.4.9
+    text: "Ensure that the Container Network Interface file permissions are
+    set to 644 or more restrictive (Not Scored)"
+    audit: "stat -c %a <path/to/cni/files>"
+    type: manual
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 <path/to/cni/files>
+    scored: true
+
+  - id: 1.4.10
+    text: "Ensure that the Container Network Interface file ownership is set
+    to root:root (Not Scored)"
+    audit: "stat -c %U:%G <path/to/cni/files>"
+    type: manual
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root <path/to/cni/files>
+    scored: true
+
+  - id: 1.4.11
+    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep | grep -o data-dir=.* | cut -d= -f2 | xargs stat -c %a"
+    tests:
+      test_items:
+      - flag: "700"
+        compare:
+          op: eq
+          value: "700"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+      from the below command:
+      ps -ef | grep $etcdbin
+      Run the below command (based on the etcd data directory found above). For example,
+      chmod 700 /var/lib/etcd
+    scored: true
+
+  - id: 1.4.12
+    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep | grep -o data-dir=.* | cut -d= -f2 | xargs stat -c %U:%G"
+    tests:
+      test_items:
+      - flag: "etcd:etcd"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir ,
+      from the below command:
+      ps -ef | grep $etcdbin
+      Run the below command (based on the etcd data directory found above). For example,
+      chown etcd:etcd /var/lib/etcd
+    scored: true
+
+  - id: 1.4.13
+    text: "Ensure that the admin.conf file permissions are set to 644 or
+    more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+    tests:
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 /etc/kubernetes/admin.conf
+    scored: true
+
+  - id: 1.4.14
+    text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root /etc/kubernetes/admin.conf
+    scored: true
+
+  - id: 1.4.15
+    text: "Ensure that the scheduler.conf file permissions are set to 644 or
+    more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf then stat -c %a $schedulerconf; fi'"
+    tests:
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $schedulerconf
+    scored: true
+
+  - id: 1.4.16
+    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $schedulerconf
+    scored: true
+
+  - id: 1.4.17
+    text: "Ensure that the controller-manager.conf file permissions are set
+    to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf then stat -c %a $controllermanagerconf; fi'"
+    tests:
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chmod 644 $controllermanagerconf
+    scored: true
+
+  - id: 1.4.18
+    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example,
+      chown root:root $controllermanagerconf
+    scored: true
+
+- id: 1.5
+  text: "etcd"
+  checks:
+  - id: 1.5.1
+    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--cert-file"
+        set: true
+      - flag:  "--key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure TLS encryption.
+      Then, edit the etcd pod specification file $etcdpodspec on the
+      master node and set the below parameters.
+      --ca-file=</path/to/ca-file>
+      --key-file=</path/to/key-file>
+    scored: true
+
+  - id: 1.5.2
+    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and set the below parameter.
+      --client-cert-auth="true"
+    scored: true
+
+  - id: 1.5.3
+    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--auto-tls"
+        set: false
+      - flag: "--auto-tls"
+        compare:
+          op: neq
+          value: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and either remove the --auto-tls parameter or set it to false .
+        --auto-tls=false
+    scored: true
+
+  - id: 1.5.4
+    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set
+    as appropriate (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--peer-cert-file"
+        set: true
+      - flag: "--peer-key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure peer TLS encryption as appropriate
+      for your etcd cluster.
+      Then, edit the etcd pod specification file $etcdpodspec on the
+      master node and set the below parameters.
+      --peer-client-file=</path/to/peer-cert-file>
+      --peer-key-file=</path/to/peer-key-file>
+    scored: true
+
+  - id: 1.5.5
+    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--peer-client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and set the below parameter.
+      --peer-client-cert-auth=true
+    scored: true
+
+  - id: 1.5.6
+    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--peer-auto-tls"
+        set: false
+      - flag: "--peer-auto-tls"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and either remove the --peer-auto-tls parameter or set it to false .
+      --peer-auto-tls=false
+    scored: true
+
+  - id: 1.5.7
+    text: "Ensure that the --wal-dir argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--wal-dir"
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and set the below parameter.
+      --wal-dir=</path/to/log/dir>
+    scored: true
+
+  - id: 1.5.8
+    text: "Ensure that the --max-wals argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--max-wals"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdpodspec on the master
+      node and set the below parameter.
+      --max-wals=0
+    scored: true
+
+  - id: 1.5.9
+    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+    audit: "ps -ef | grep $etcdbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--trusted-ca-file"
+        set: true
+    remediation: |
+      Follow the etcd documentation and create a dedicated certificate authority setup for the
+      etcd service.
+      Then, edit the etcd pod specification file $etcdpodspec on the
+      master node and set the below parameter.
+      --trusted-ca-file=</path/to/ca-file>
+    scored: false 
+
+- id: 1.6
+  text: "General Security Primitives"
+  checks:
+  - id: 1.6.1
+    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+    type: "manual"
+    remediation: |
+      Remove any unneeded clusterrolebindings :
+      kubectl delete clusterrolebinding [name]
+    scored: false
+
+  - id: 1.6.2
+    text: "Create Pod Security Policies for your cluster (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create and enforce Pod Security Policies for your cluster.
+      Additionally, you could refer the "CIS Security Benchmark for Docker" and follow the
+      suggested Pod Security Policies for your environment.
+    scored: false
+
+  - id: 1.6.3
+    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create namespaces for objects in your deployment as you
+      need them.
+    scored: false
+
+  - id: 1.6.4
+    text: "Create network segmentation using Network Policies (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create NetworkPolicy objects as you need them.
+    scored: false
+
+  - id: 1.6.5
+    text: "Ensure that the seccomp profile is set to docker/default in your pod 
+    definitions (Not Scored)"
+    type: "manual"
+    remediation: |
+      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+      would need to enable alpha features in the apiserver by passing "--feature-
+      gates=AllAlpha=true" argument.
+      Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS
+      parameter to "--feature-gates=AllAlpha=true"
+      KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+      Based on your system, restart the kube-apiserver service. For example:
+      systemctl restart kube-apiserver.service
+      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+      example is as below:
+      apiVersion: v1
+      kind: Pod
+      metadata:
+        name: trustworthy-pod
+        annotations:
+          seccomp.security.alpha.kubernetes.io/pod: docker/default
+      spec:
+        containers:
+          - name: trustworthy-container
+            image: sotrustworthy:latest
+    scored: false
+
+  - id: 1.6.6
+    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and apply security contexts to your pods. For a
+      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+      Containers.
+    scored: false
+
+  - id: 1.6.7
+    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and setup image provenance.
+    scored: false
+
+  - id: 1.6.8
+    text: "Configure Network policies as appropriate (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and setup network policies as appropriate.
+      For example, you could create a "default" isolation policy for a Namespace by creating a
+      NetworkPolicy that selects all pods but does not allow any traffic:
+      apiVersion: networking.k8s.io/v1
+      kind: NetworkPolicy
+      metadata:
+        name: default-deny
+      spec:
+        podSelector:
+    scored: false
+
+  - id: 1.6.9
+    text: "Place compensating controls in the form of PSP and RBAC for
+    privileged containers usage (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster.
+    scored: false
diff --git a/cfg/1.8/node.yaml b/cfg/1.8/node.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c8c51f24e18510e0f57456fb65bbbec23a37cce7
--- /dev/null
+++ b/cfg/1.8/node.yaml
@@ -0,0 +1,440 @@
+---
+controls:
+version: 1.8
+id: 2
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+- id: 2.1
+  text: "Kubelet"
+  checks:
+  - id: 2.1.1
+    text: "Ensure that the --allow-privileged argument is set to false (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--allow-privileged"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --allow-privileged=false
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.2
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --anonymous-auth=false
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.3
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: nothave
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable.
+      --authorization-mode=Webhook
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.4
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-ca-file"
+        set: true 
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable.
+      --client-ca-file=<path/to/client-ca-file>
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.5
+    text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--read-only-port"
+        compare:
+          op: eq
+          value: 0
+        set: true 
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --read-only-port=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.6
+    text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--streaming-connection-idle-timeout"
+        compare:
+          op: noteq
+          value: 0
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --streaming-connection-idle-timeout=5m
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.7
+    text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--protect-kernel-defaults"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --protect-kernel-defaults=true
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.8
+    text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--make-iptables-util-chains"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and remove the --make-iptables-util-chains argument from the
+      KUBELET_SYSTEM_PODS_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.9
+    text: "Ensure that the --keep-terminated-pod-volumes argument is set to false (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--keep-terminated-pod-volumes"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --keep-terminated-pod-volumes=false
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.10
+    text: "Ensure that the --hostname-override argument is not set (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--hostname-override"
+        set: false
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and remove the --hostname-override argument from the
+      KUBELET_SYSTEM_PODS_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.11
+    text: "Ensure that the --event-qps argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--event-qps"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+      --event-qps=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.12
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--tls-cert-file"
+        set: true
+      - flag: "--tls-private-key-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the Kubelet.
+      Then edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-
+      kubeadm.conf on each worker node and set the below parameters in
+      KUBELET_CERTIFICATE_ARGS variable.
+      --tls-cert-file=<path/to/tls-certificate-file>
+      file=<path/to/tls-key-file>
+      --tls-private-key-
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.13
+    text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--cadvisor-port"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
+      --cadvisor-port=0
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.14
+    text: "Ensure that the RotateKubeletClientCertificate argument is set to true"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "RotateKubeletClientCertificate"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and remove the --feature-
+      gates=RotateKubeletClientCertificate=false argument from the
+      KUBELET_CERTIFICATE_ARGS variable.
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 2.1.15
+    text: "Ensure that the RotateKubeletServerCertificate argument is set to true"
+    audit: "ps -ef | grep $kubeletbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "RotateKubeletServerCertificate"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the kubelet service file $kubeletunitfile
+      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+      --feature-gates=RotateKubeletServerCertificate=true
+      Based on your system, restart the kubelet service. For example:
+      systemctl daemon-reload
+      systemctl restart kubelet.service
+    scored: true
+
+- id: 2.2
+  text: "Configuration Files"
+  checks:
+    - id: 2.2.1
+      text: "Ensure that the kubelet.conf file permissions are set to 644 or
+      more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'"
+      tests:
+        bin_op: or
+        test_items:
+          - flag: "644"
+            compare:
+              op: eq
+              value: "644"
+            set: true
+          - flag: "640"
+            compare:
+              op: eq
+              value: "640"
+            set: true
+          - flag: "600"
+            compare:
+              op: eq
+              value: "600"
+            set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 644 $kubeletconf
+      scored: true
+
+    - id: 2.2.2
+      text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'"
+      tests:
+        test_items:
+          - flag: "root:root"
+            compare:
+              op: eq
+              value: root:root
+            set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chown root:root /etc/kubernetes/kubelet.conf
+      scored: true
+
+    - id: 2.2.3
+      text: "Ensure that the kubelet service file permissions are set to 644 or
+      more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletunitfile; then stat -c %a $kubeletunitfile; fi'"
+      tests:
+        bin_op: or
+        test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: 644
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 755 $kubeletunitfile
+      scored: true
+
+    - id: 2.2.4
+      text: "Ensure that the kubelet service file permissions are set to 644 or
+      more restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $kubeletunitfile; then stat -c %U:%G $kubeletunitfile; fi'"
+      tests:
+        test_items:
+        - flag: "root:root"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chown root:root $kubeletunitfile
+      scored: true
+
+    - id: 2.2.5
+      text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more
+      restrictive (Scored)"
+      audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'"
+      tests:
+        bin_op: or
+        test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+      remediation: |
+        Run the below command (based on the file location on your system) on the each worker
+        node. For example,
+        chmod 644 $proxyconf
+      scored: true
+
+    - id: 2.2.6
+      text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
+      tests:
+        test_items:
+        - flag: "root:root"
+          set: true
+      remediation: |
+          Run the below command (based on the file location on your system) on the each worker
+          node. For example,
+          chown root:root $proxyconf
+      scored: true
+
+    - id: 2.2.7
+      text: "Ensure that the certificate authorities file permissions are set to
+      644 or more restrictive (Scored)"
+      type: manual
+      remediation: |
+        Run the following command to modify the file permissions of the --client-ca-file
+        chmod 644 <filename>
+      scored: true
+
+    - id: 2.2.8
+      text: "Ensure that the client certificate authorities file ownership is set to root:root"
+      audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'"
+      type: manual
+      remediation: |
+        Run the following command to modify the ownership of the --client-ca-file .
+        chown root:root <filename>
+      scored: true
diff --git a/cfg/config.yaml b/cfg/config.yaml
index 8ad2e89acf585f7482c8abc9ff0f793d3130e6f1..8512c75f888279ed710f6783da297a5c05c65da2 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -26,42 +26,54 @@ master:
       - "hyperkube apiserver"
       - "apiserver"
     confs:
-      - /etc/kubernetes/manifests/kube-apiserver.yaml
       - /etc/kubernetes/apiserver.conf
       - /etc/kubernetes/apiserver
     defaultconf: /etc/kubernetes/apiserver
 
+    podspecs:
+      - /etc/kubernetes/manifests/kube-apiserver.yaml
+    defaultpodspec: /etc/kubernetes/manifests/kube-apiserver.yaml
+
   scheduler:
     bins:
       - "kube-scheduler"
       - "hyperkube scheduler"
       - "scheduler"
     confs: 
-      - /etc/kubernetes/manifests/kube-scheduler.yaml
       - /etc/kubernetes/scheduler.conf
       - /etc/kubernetes/scheduler
     defaultconf: /etc/kubernetes/scheduler
 
+    podspecs:
+      - /etc/kubernetes/manifests/kube-scheduler.yaml
+    defaultpodspec: /etc/kubernetes/manifests/kube-scheduler.yaml
+
   controllermanager:
     bins:
       - "kube-controller-manager"
       - "hyperkube controller-manager"
       - "controller-manager"
     confs:
-      - /etc/kubernetes/manifests/kube-controller-manager.yaml
       - /etc/kubernetes/controller-manager.conf
       - /etc/kubernetes/controller-manager
     defaultconf: /etc/kubernetes/controller-manager
 
+    podspecs:
+      - /etc/kubernetes/manifests/kube-controller-manager.yaml
+    defaultpodspec: /etc/kubernetes/manifests/kube-controller-manager.yaml
+
   etcd:
     optional: true
     bins:
       - "etcd"
     confs:
-      - /etc/kubernetes/manifests/etcd.yaml
       - /etc/etcd/etcd.conf
     defaultconf: /etc/etcd/etcd.conf
 
+    podspecs:
+      - /etc/kubernetes/manifests/etcd.yaml
+    defaultpodspec: /etc/kubernetes/manifests/etcd.yaml
+
   flanneld:
     optional: true
     bins:
@@ -87,6 +99,10 @@ node:
       - /etc/kubernetes/kubelet.conf
       - /etc/kubernetes/kubelet 
     defaultconf: "/etc/kubernetes/kubelet.conf"
+
+    unitfiles:
+     - /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+    defaultunitfile: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
   
   proxy:
     bins:
diff --git a/cmd/common.go b/cmd/common.go
index b618cff550dffaeb82a082c113eefe58af5753c6..553656d6daa7c6f51083b1c5f7c4df3061b08814 100644
--- a/cmd/common.go
+++ b/cmd/common.go
@@ -24,24 +24,6 @@ import (
 )
 
 var (
-	apiserverBin            string
-	apiserverConf           string
-	schedulerBin            string
-	schedulerConf           string
-	controllerManagerBin    string
-	controllerManagerConf   string
-	config                  string
-	etcdBin                 string
-	etcdConf                string
-	flanneldBin             string
-	flanneldConf            string
-	kubeletBin              string
-	kubeletConf             string
-	proxyBin                string
-	proxyConf               string
-	fedApiserverBin         string
-	fedControllerManagerBin string
-
 	errmsgs string
 )
 
@@ -66,7 +48,9 @@ func runChecks(t check.NodeType) {
 	// Get the set of exectuables and config files we care about on this type of node. This also
 	// checks that the executables we need for the node type are running.
 	binmap := getBinaries(typeConf)
-	confmap := getConfigFiles(typeConf)
+	confmap := getConfigFiles(typeConf, "conf")
+	podspecmap := getConfigFiles(typeConf, "podspec")
+	unitfilemap := getConfigFiles(typeConf, "unitfile")
 
 	switch t {
 	case check.MASTER:
@@ -88,6 +72,8 @@ func runChecks(t check.NodeType) {
 	s := string(in)
 	s = makeSubstitutions(s, "bin", binmap)
 	s = makeSubstitutions(s, "conf", confmap)
+	s = makeSubstitutions(s, "podspec", podspecmap)
+	s = makeSubstitutions(s, "unitfile", unitfilemap)
 
 	glog.V(1).Info(fmt.Sprintf("Using config file: %s\n", viper.ConfigFileUsed()))
 	glog.V(1).Info(fmt.Sprintf("Using benchmark file: %s\n", path))
diff --git a/cmd/util.go b/cmd/util.go
index 7524eaf595e6d0964c129f1a3c1c25a0ce3207aa..dfd8b235076bdcc214af022f0176a564e5a100e5 100644
--- a/cmd/util.go
+++ b/cmd/util.go
@@ -117,7 +117,9 @@ func getBinaries(v *viper.Viper) map[string]string {
 }
 
 // getConfigFiles finds which of the set of candidate config files exist
-func getConfigFiles(v *viper.Viper) map[string]string {
+// accepts a string 't' which indicates the type of config file, conf,
+// podspec or untifile.
+func getConfigFiles(v *viper.Viper, t string) map[string]string {
 	confmap := make(map[string]string)
 
 	for _, component := range v.GetStringSlice("components") {
@@ -127,10 +129,10 @@ func getConfigFiles(v *viper.Viper) map[string]string {
 		}
 
 		// See if any of the candidate config files exist
-		conf := findConfigFile(s.GetStringSlice("confs"))
+		conf := findConfigFile(s.GetStringSlice(t + "s"))
 		if conf == "" {
-			if s.IsSet("defaultconf") {
-				conf = s.GetString("defaultconf")
+			if s.IsSet("default" + t) {
+				conf = s.GetString("default" + t)
 				glog.V(2).Info(fmt.Sprintf("Using default config file name '%s' for component %s", conf, component))
 			} else {
 				// Default the config file name that we'll substitute to the name of the component
diff --git a/cmd/util_test.go b/cmd/util_test.go
index e9c6eac49373e16fe49dfcbc216ac7f01571a1a0..50044cf4d03823f987518e03a894a80438a70a05 100644
--- a/cmd/util_test.go
+++ b/cmd/util_test.go
@@ -282,7 +282,7 @@ func TestGetConfigFiles(t *testing.T) {
 			e = c.statResults
 			eIndex = 0
 
-			m := getConfigFiles(v)
+			m := getConfigFiles(v, "conf")
 			if !reflect.DeepEqual(m, c.exp) {
 				t.Fatalf("Got %v\nExpected %v", m, c.exp)
 			}