diff --git a/README.md b/README.md
index 61a33c9d49e4cda855cabed1b28835071c66d4f0..0c657692d81b47f54dd65b63052ce8f4b4c662ae 100644
--- a/README.md
+++ b/README.md
@@ -42,12 +42,13 @@ Table of Contents
       
 ## CIS Kubernetes Benchmark support
 
-kube-bench supports the tests for Kubernetes as defined in the CIS Benchmarks 1.3.0 to 1.4.1 respectively. 
+kube-bench supports the tests for Kubernetes as defined in the CIS Benchmarks 1.3.0 to 1.5.0 respectively. 
 
 | CIS Kubernetes Benchmark | kube-bench config | Kubernetes versions |
 |---|---|---|
 | 1.3.0| cis-1.3 | 1.11-1.12 |
-| 1.4.1| cis-1.4 | 1.13- |
+| 1.4.1| cis-1.4 | 1.13-1.14 |
+| 1.5.0 | cis-1.5 | 1.15- |
 
 
 By default, kube-bench will determine the test set to run based on the Kubernetes version running on the machine.
@@ -97,6 +98,25 @@ Alternatively, you can specify `--benchmark` to run a specific CIS Benchmark ver
 kube-bench node --benchmark cis-1.4
 ```
 
+If you want to target specific CIS Benchmark `target` (i.e master, node, etcd, etc...)
+you can use the `run --targets` subcommand.
+```
+kube-bench --benchmark cis-1.4 run --targets master,node
+```
+or
+```
+kube-bench --benchmark cis-1.5 run --targets master,node,etcd,policies
+```
+
+The following table shows the valid targets based on the CIS Benchmark version.
+| CIS Benchmark | Targets |
+|---|---|
+| cis-1.3| master, node |
+| cis-1.4| master, node |
+| cis-1.5| master, controlplane, node, etcd, policies |
+
+If no targets are specified, `kube-bench` will determine the appropriate targets based on the CIS Benchmark version.
+
 `controls` for the various versions of CIS Benchmark can be found in directories
 with same name as the CIS Benchmark versions under `cfg/`, for example `cfg/cis-1.4`.
 
diff --git a/cfg/cis-1.5/config.yaml b/cfg/cis-1.5/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4d9b1b8c7cbcf3fb9f331aada0c059e4cdfa9418
--- /dev/null
+++ b/cfg/cis-1.5/config.yaml
@@ -0,0 +1,2 @@
+---
+## Version-specific settings that override the values in cfg/config.yaml
\ No newline at end of file
diff --git a/cfg/cis-1.5/controlplane.yaml b/cfg/cis-1.5/controlplane.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e13a7ad66b217ea57329910170be856f5e9ba54a
--- /dev/null
+++ b/cfg/cis-1.5/controlplane.yaml
@@ -0,0 +1,35 @@
+---
+controls:
+version: 1.5
+id: 3
+text: "Control Plane Configuration"
+type: "controlplane"
+groups:
+- id: 3.1
+  text: "Authentication and Authorization"
+  checks:
+  - id: 3.1.1
+    text: "Client certificate authentication should not be used for users (Not Scored) "
+    type: "manual"
+    remediation: |
+      Alternative mechanisms provided by Kubernetes such as the use of OIDC should be 
+      implemented in place of client certificates. 
+    scored: false
+
+- id: 3.2
+  text: "Logging"
+  checks:
+  - id: 3.2.1
+    text: "Ensure that a minimal audit policy is created (Scored) "
+    type: "manual"
+    remediation: |
+      Create an audit policy file for your cluster. 
+    scored: true
+
+  - id: 3.2.2
+    text: "Ensure that the audit policy covers key security concerns (Not Scored) "
+    type: "manual"
+    remediation: |
+      Consider modification of the audit policy in use on the cluster to include these items, at a 
+      minimum. 
+    scored: false
diff --git a/cfg/cis-1.5/etcd.yaml b/cfg/cis-1.5/etcd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c813da34ab9a88321dcc608d221c0e3ccd95dba8
--- /dev/null
+++ b/cfg/cis-1.5/etcd.yaml
@@ -0,0 +1,131 @@
+---
+controls:
+version: 1.15
+id: 2
+text: "Etcd Node Configuration"
+type: "etcd"
+groups:
+- id: 2
+  text: "Etcd Node Configuration Files"
+  checks:
+  - id: 2.1
+    text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--cert-file"
+        set: true
+      - flag:  "--key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure TLS encryption.
+      Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml 
+      on the master node and set the below parameters.
+      --cert-file=</path/to/ca-file>
+      --key-file=</path/to/key-file>
+    scored: true
+    
+  - id: 2.2
+    text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and set the below parameter.
+      --client-cert-auth="true"
+    scored: true
+
+  - id: 2.3
+    text: "Ensure that the --auto-tls argument is not set to true (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--auto-tls"
+        set: false
+      - flag: "--auto-tls"
+        compare:
+          op: eq
+          value: false
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and either remove the --auto-tls parameter or set it to false.
+        --auto-tls=false
+    scored: true
+    
+  - id: 2.4
+    text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
+    set as appropriate (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--peer-cert-file"
+        set: true
+      - flag: "--peer-key-file"
+        set: true
+    remediation: |
+      Follow the etcd service documentation and configure peer TLS encryption as appropriate
+      for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
+      master node and set the below parameters.
+      --peer-client-file=</path/to/peer-cert-file>
+      --peer-key-file=</path/to/peer-key-file>
+    scored: true
+    
+  - id: 2.5
+    text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      test_items:
+      - flag: "--peer-client-cert-auth"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and set the below parameter.
+      --peer-client-cert-auth=true
+    scored: true
+    
+  - id: 2.6
+    text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--peer-auto-tls"
+        set: false
+      - flag: "--peer-auto-tls"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the etcd pod specification file $etcdconf on the master
+      node and either remove the --peer-auto-tls parameter or set it to false.
+      --peer-auto-tls=false
+    scored: true
+    
+  - id: 2.7
+    text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
+    audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
+    tests:
+      test_items:
+      - flag: "--trusted-ca-file"
+        set: true
+    remediation: |
+      [Manual test]
+      Follow the etcd documentation and create a dedicated certificate authority setup for the
+      etcd service.
+      Then, edit the etcd pod specification file $etcdconf on the
+      master node and set the below parameter.
+      --trusted-ca-file=</path/to/ca-file>
+    scored: false
diff --git a/cfg/cis-1.5/master.yaml b/cfg/cis-1.5/master.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83315dd36ddf7c77f162679d716e1013ede741aa
--- /dev/null
+++ b/cfg/cis-1.5/master.yaml
@@ -0,0 +1,1114 @@
+---
+controls:
+version: 1.5
+id: 1
+text: "Master Node Security Configuration"
+type: "master"
+groups:
+- id: 1.1
+  text: "Master Node Configuration Files "
+  checks:
+  - id: 1.1.1
+    text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the
+      master node.
+      For example, chmod 644 $apiserverconf 
+    scored: true
+
+  - id: 1.1.2
+    text: "Ensure that the API server pod specification file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example, 
+      chown root:root $apiserverconf 
+    scored: true
+
+  - id: 1.1.3
+    text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "644"
+        compare:
+          op: eq
+          value: "644"
+        set: true
+      - flag: "640"
+        compare:
+          op: eq
+          value: "640"
+        set: true
+      - flag: "600"
+        compare:
+          op: eq
+          value: "600"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node.
+      For example, 
+      chmod 644 $controllermanagerconf 
+    scored: true
+
+  - id: 1.1.4
+    text: "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root $controllermanagerconf 
+    scored: true
+
+  - id: 1.1.5
+    text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 $schedulerconf 
+    scored: true
+
+  - id: 1.1.6
+    text: "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
+    tests:
+      test_items:
+        - flag: "root:root"
+          compare:
+            op: eq
+            value: "root:root"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root $schedulerconf 
+    scored: true
+
+  - id: 1.1.7
+    text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 $etcdconf 
+    scored: true
+
+  - id: 1.1.8
+    text: "Ensure that the etcd pod specification file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root $etcdconf 
+    scored: true
+
+  - id: 1.1.9
+    text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Scored)"
+    audit: "stat -c %a <path/to/cni/files>"
+    type: "manual"
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 <path/to/cni/files> 
+    scored: false
+
+  - id: 1.1.10
+    text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Scored)"
+    audit: "stat -c %U:%G <path/to/cni/files>"
+    type: "manual"
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root <path/to/cni/files> 
+    scored: false
+
+  - id: 1.1.11
+    text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)"
+    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a
+    tests:
+      test_items:
+      - flag: "700"
+        compare:
+          op: eq
+          value: "700"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir, 
+      from the below command: 
+      ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, 
+      chmod 700 /var/lib/etcd 
+    scored: true
+
+  - id: 1.1.12
+    text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
+    audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
+    tests:
+      test_items:
+      - flag: "etcd:etcd"
+        set: true
+    remediation: |
+      On the etcd server node, get the etcd data directory, passed as an argument --data-dir, 
+      from the below command: 
+      ps -ef | grep etcd 
+      Run the below command (based on the etcd data directory found above). 
+      For example, chown etcd:etcd /var/lib/etcd 
+    scored: true
+
+  - id: 1.1.13
+    text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 /etc/kubernetes/admin.conf 
+    scored: true
+
+  - id: 1.1.14
+    text: "Ensure that the admin.conf file ownership is set to root:root (Scored) "
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root /etc/kubernetes/admin.conf 
+    scored: true
+
+  - id: 1.1.15
+    text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 /etc/kubernetes/scheduler.conf 
+    scored: true
+
+  - id: 1.1.16
+    text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root /etc/kubernetes/scheduler.conf 
+    scored: true
+
+  - id: 1.1.17
+    text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'"
+    tests:
+      bin_op: or
+      test_items:
+        - flag: "644"
+          compare:
+            op: eq
+            value: "644"
+          set: true
+        - flag: "640"
+          compare:
+            op: eq
+            value: "640"
+          set: true
+        - flag: "600"
+          compare:
+            op: eq
+            value: "600"
+          set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod 644 /etc/kubernetes/controller-manager.conf 
+    scored: true
+
+  - id: 1.1.18
+    text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
+    audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'"
+    tests:
+      test_items:
+      - flag: "root:root"
+        compare:
+          op: eq
+          value: "root:root"
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown root:root /etc/kubernetes/controller-manager.conf 
+    scored: true
+
+  - id: 1.1.19
+    text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)"
+    audit: "ls -laR /etc/kubernetes/pki/"
+    type: "manual"
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chown -R root:root /etc/kubernetes/pki/ 
+    scored: true
+
+  - id: 1.1.20
+    text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored) "
+    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt"
+    type: "manual"
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod -R 644 /etc/kubernetes/pki/*.crt 
+    scored: true
+
+  - id: 1.1.21
+    text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored)"
+    audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key"
+    type: "manual"
+    remediation: |
+      Run the below command (based on the file location on your system) on the master node. 
+      For example, 
+      chmod -R 600 /etc/kubernetes/pki/*.key 
+    scored: true
+
+- id: 1.2
+  text: "API Server"
+  checks:
+  - id: 1.2.1
+    text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the below parameter. 
+      --anonymous-auth=false 
+    scored: false
+
+  - id: 1.2.2
+    text: "Ensure that the --basic-auth-file argument is not set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--basic-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then, 
+      edit the API server pod specification file $apiserverconf 
+      on the master node and remove the --basic-auth-file=<filename> parameter. 
+    scored: true
+
+  - id: 1.2.3
+    text: "Ensure that the --token-auth-file parameter is not set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--token-auth-file"
+        set: false
+    remediation: |
+      Follow the documentation and configure alternate mechanisms for authentication. Then, 
+      edit the API server pod specification file $apiserverconf 
+      on the master node and remove the --token-auth-file=<filename> parameter. 
+    scored: true
+
+  - id: 1.2.4
+    text: "Ensure that the --kubelet-https argument is set to true (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--kubelet-https"
+        compare:
+          op: eq
+          value: true
+        set: true
+      - flag: "--kubelet-https"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and remove the --kubelet-https parameter. 
+    scored: true
+
+  - id: 1.2.5
+    text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--kubelet-client-certificate"
+        set: true
+      - flag: "--kubelet-client-key"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the 
+      apiserver and kubelets. Then, edit API server pod specification file 
+      $apiserverconf on the master node and set the 
+      kubelet client certificate and key parameters as below. 
+      --kubelet-client-certificate=<path/to/client-certificate-file> 
+      --kubelet-client-key=<path/to/client-key-file> 
+    scored: true
+
+  - id: 1.2.6
+    text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--kubelet-certificate-authority"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and setup the TLS connection between 
+      the apiserver and kubelets. Then, edit the API server pod specification file 
+      $apiserverconf on the master node and set the 
+      --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. 
+      --kubelet-certificate-authority=<ca-string> 
+    scored: true
+
+  - id: 1.2.7
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: nothave
+          value: "AlwaysAllow"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. 
+      One such example could be as below. 
+      --authorization-mode=RBAC 
+    scored: true
+
+  - id: 1.2.8
+    text: "Ensure that the --authorization-mode argument includes Node (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: has
+          value: "Node"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --authorization-mode parameter to a value that includes Node. 
+      --authorization-mode=Node,RBAC 
+    scored: true
+
+  - id: 1.2.9
+    text: "Ensure that the --authorization-mode argument includes RBAC (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--authorization-mode"
+        compare:
+          op: has
+          value: "RBAC"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --authorization-mode parameter to a value that includes RBAC, 
+      for example: 
+      --authorization-mode=Node,RBAC 
+    scored: true
+
+  - id: 1.2.10
+    text: "Ensure that the admission control plugin EventRateLimit is set (Not Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "EventRateLimit"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set the desired limits in a configuration file. 
+      Then, edit the API server pod specification file $apiserverconf 
+      and set the below parameters. 
+      --enable-admission-plugins=...,EventRateLimit,... 
+      --admission-control-config-file=<path/to/configuration/file> 
+    scored: false
+
+  - id: 1.2.11
+    text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: nothave
+          value: AlwaysAdmit
+        set: true
+      - flag: "--enable-admission-plugins"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and either remove the --enable-admission-plugins parameter, or set it to a 
+      value that does not include AlwaysAdmit. 
+    scored: true
+
+  - id: 1.2.12
+    text: "Ensure that the admission control plugin AlwaysPullImages is set (Not Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "AlwaysPullImages"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --enable-admission-plugins parameter to include 
+      AlwaysPullImages. 
+      --enable-admission-plugins=...,AlwaysPullImages,... 
+    scored: false
+
+  - id: 1.2.13
+    text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Not Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "SecurityContextDeny"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --enable-admission-plugins parameter to include 
+      SecurityContextDeny, unless PodSecurityPolicy is already in place. 
+      --enable-admission-plugins=...,SecurityContextDeny,... 
+    scored: false
+
+  - id: 1.2.14
+    text: "Ensure that the admission control plugin ServiceAccount is set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "ServiceAccount"
+        set: true
+      - flag: "--enable-admission-plugins"
+        set: false
+    remediation: |
+      Follow the documentation and create ServiceAccount objects as per your environment. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and ensure that the --disable-admission-plugins parameter is set to a 
+      value that does not include ServiceAccount.
+    scored: true
+
+  - id: 1.2.15
+    text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--disable-admission-plugins"
+        compare:
+          op: nothave
+          value: "NamespaceLifecycle"
+        set: true
+      - flag: "--disable-admission-plugins"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --disable-admission-plugins parameter to 
+      ensure it does not include NamespaceLifecycle. 
+    scored: true
+
+  - id: 1.2.16
+    text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "PodSecurityPolicy"
+        set: true
+    remediation: |
+      Follow the documentation and create Pod Security Policy objects as per your environment. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the --enable-admission-plugins parameter to a 
+      value that includes PodSecurityPolicy: 
+      --enable-admission-plugins=...,PodSecurityPolicy,... 
+      Then restart the API Server.
+    scored: true
+
+  - id: 1.2.17
+    text: "Ensure that the admission control plugin NodeRestriction is set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--enable-admission-plugins"
+        compare:
+          op: has
+          value: "NodeRestriction"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the --enable-admission-plugins parameter to a 
+      value that includes NodeRestriction. 
+      --enable-admission-plugins=...,NodeRestriction,... 
+    scored: true
+
+  - id: 1.2.18
+    text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-bind-address"
+        set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and remove the --insecure-bind-address parameter. 
+    scored: true
+
+  - id: 1.2.19
+    text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--insecure-port"
+        compare:
+          op: eq
+          value: 0
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the below parameter. 
+      --insecure-port=0 
+    scored: true
+
+  - id: 1.2.20
+    text: "Ensure that the --secure-port argument is not set to 0 (Scored) "
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+        - flag:  "--secure-port"
+          compare:
+            op: gt
+            value: 0
+          set: true
+        - flag: "--secure-port"
+          set: false
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and either remove the --secure-port parameter or 
+      set it to a different (non-zero) desired port. 
+    scored: true
+
+  - id: 1.2.21
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "/bin/ps -ef | grep $apiserver | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the below parameter. 
+      --profiling=false 
+    scored: true
+
+  - id: 1.2.22
+    text: "Ensure that the --audit-log-path argument is set (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-path"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --audit-log-path parameter to a suitable path and 
+      file where you would like audit logs to be written, for example: 
+      --audit-log-path=/var/log/apiserver/audit.log 
+    scored: true
+
+  - id: 1.2.23
+    text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxage"
+        compare:
+          op: gte
+          value: 30
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days:
+      --audit-log-maxage=30 
+    scored: true
+
+  - id: 1.2.24
+    text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxbackup"
+        compare:
+          op: gte
+          value: 10
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate 
+      value. 
+      --audit-log-maxbackup=10 
+    scored: true
+
+  - id: 1.2.25
+    text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--audit-log-maxsize"
+        compare:
+          op: gte
+          value: 100
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. 
+      For example, to set it as 100 MB: 
+      --audit-log-maxsize=100 
+    scored: true
+
+  - id: 1.2.26
+    text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--request-timeout"
+        set: false
+      - flag: "--request-timeout"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      and set the below parameter as appropriate and if needed. 
+      For example, 
+      --request-timeout=300s 
+    scored: true
+
+  - id: 1.2.27
+    text: "Ensure that the --service-account-lookup argument is set to true (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--service-account-lookup"
+        set: false
+      - flag: "--service-account-lookup"
+        compare:
+          op: eq
+          value: true
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the below parameter. 
+      --service-account-lookup=true 
+      Alternatively, you can delete the --service-account-lookup parameter from this file so 
+      that the default takes effect. 
+    scored: true
+
+  - id: 1.2.28
+    text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-key-file"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the --service-account-key-file parameter 
+      to the public key file for service accounts: 
+      --service-account-key-file=<filename> 
+    scored: true
+
+  - id: 1.2.29
+    text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Scored) "
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--etcd-certfile"
+        set: true
+      - flag: "--etcd-keyfile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the etcd certificate and key file parameters. 
+      --etcd-certfile=<path/to/client-certificate-file>  
+      --etcd-keyfile=<path/to/client-key-file> 
+    scored: true
+
+  - id: 1.2.30
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      bin_op: and
+      test_items:
+      - flag: "--tls-cert-file"
+        set: true
+      - flag: "--tls-private-key-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the TLS certificate and private key file parameters. 
+      --tls-cert-file=<path/to/tls-certificate-file>  
+      --tls-private-key-file=<path/to/tls-key-file> 
+    scored: true
+
+  - id: 1.2.31
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--client-ca-file"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the client certificate authority file. 
+      --client-ca-file=<path/to/client-ca-file> 
+    scored: true
+
+  - id: 1.2.32
+    text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--etcd-cafile"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the etcd certificate authority file parameter. 
+      --etcd-cafile=<path/to/ca-file> 
+    scored: true
+
+  - id: 1.2.33
+    text: "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--encryption-provider-config"
+        set: true
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file. 
+      Then, edit the API server pod specification file $apiserverconf 
+      on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config=</path/to/EncryptionConfig/File> 
+    scored: true
+
+  - id: 1.2.34
+    text: "Ensure that encryption providers are appropriately configured (Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and configure a EncryptionConfig file. 
+      In this file, choose aescbc, kms or secretbox as the encryption provider. 
+    scored: true
+
+  - id: 1.2.35
+    text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
+    audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--tls-cipher-suites"
+        compare:
+          op: has
+          value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+        set: true
+    remediation: |
+      Edit the API server pod specification file $apiserverconf 
+      on the master node and set the below parameter. 
+      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 
+    scored: false
+
+- id: 1.3
+  text: "Controller Manager"
+  checks:
+  - id: 1.3.1
+    text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--terminated-pod-gc-threshold"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, 
+      for example: 
+      --terminated-pod-gc-threshold=10 
+    scored: true
+
+  - id: 1.3.2
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and set the below parameter. 
+      --profiling=false 
+    scored: true
+
+  - id: 1.3.3
+    text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--use-service-account-credentials"
+        compare:
+          op: noteq
+          value: false
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node to set the below parameter. 
+      --use-service-account-credentials=true 
+    scored: true
+
+  - id: 1.3.4
+    text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--service-account-private-key-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and set the --service-account-private-key-file parameter 
+      to the private key file for service accounts. 
+      --service-account-private-key-file=<filename> 
+    scored: true
+
+  - id: 1.3.5
+    text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--root-ca-file"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and set the --root-ca-file parameter to the certificate bundle file`. 
+      --root-ca-file=<path/to/file> 
+    scored: true
+
+  - id: 1.3.6
+    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--feature-gates"
+        compare:
+          op: eq
+          value: "RotateKubeletServerCertificate=true"
+        set: true
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. 
+      --feature-gates=RotateKubeletServerCertificate=true 
+    scored: true
+
+  - id: 1.3.7
+    text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored)"
+    audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--bind-address"
+        compare:
+          op: eq
+          value: "127.0.0.1"
+        set: true
+      - flag: "--bind-address"
+        set: false
+    remediation: |
+      Edit the Controller Manager pod specification file $controllermanagerconf 
+      on the master node and ensure the correct value for the --bind-address parameter 
+    scored: true
+
+- id: 1.4
+  text: "Scheduler"
+  checks:
+  - id: 1.4.1
+    text: "Ensure that the --profiling argument is set to false (Scored)"
+    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+    tests:
+      test_items:
+      - flag: "--profiling"
+        compare:
+          op: eq
+          value: false
+        set: true
+    remediation: |
+      Edit the Scheduler pod specification file $schedulerconf file 
+      on the master node and set the below parameter. 
+      --profiling=false 
+    scored: true
+
+  - id: 1.4.2
+    text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Scored) "
+    audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
+    tests:
+      bin_op: or
+      test_items:
+      - flag: "--bind-address"
+        compare:
+          op: eq
+          value: "127.0.0.1"
+        set: true
+      - flag: "--bind-address"
+        set: false
+    remediation: |
+      Edit the Scheduler pod specification file $schedulerconf 
+      on the master node and ensure the correct value for the --bind-address parameter 
+    scored: true
diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba2a81dc86df12c53cfb187989df28397bd66503
--- /dev/null
+++ b/cfg/cis-1.5/node.yaml
@@ -0,0 +1,505 @@
+---
+controls:
+version: 1.5
+id: 4
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+- id: 4.1
+  text: "Worker Node Configuration Files"
+  checks:
+  - id: 4.1.1
+    text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'' '
+    tests:
+      test_items:
+      - flag: "644"
+        set: true
+        compare:
+          op: eq
+          value: "644"
+      - flag: "640"
+        set: true
+        compare:
+          op: eq
+          value: "640"
+      - flag: "600"
+        set: true
+        compare:
+          op: eq
+          value: "600"
+      bin_op: or
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, 
+      chmod 755 $kubeletsvc 
+    scored: true
+
+  - id: 4.1.2
+    text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
+    tests:
+      test_items:
+      - flag: root:root
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, 
+      chown root:root $kubeletsvc 
+    scored: true
+
+  - id: 4.1.3
+    text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
+    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
+    tests:
+      test_items:
+      - flag: "644"
+        set: true
+        compare:
+          op: eq
+          value: "644"
+      - flag: "640"
+        set: true
+        compare:
+          op: eq
+          value: "640"
+      - flag: "600"
+        set: true
+        compare:
+          op: eq
+          value: "600"
+      bin_op: or
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, 
+      chmod 644 $proykubeconfig 
+    scored: true
+
+  - id: 4.1.4
+    text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
+    audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
+    tests:
+      test_items:
+      - flag: root:root
+        set: true
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, chown root:root $proxykubeconfig 
+    scored: true
+
+  - id: 4.1.5
+    text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'' '
+    tests:
+      test_items:
+      - flag: "644"
+        set: true
+        compare:
+          op: eq
+          value: "644"
+      - flag: "640"
+        set: true
+        compare:
+          op: eq
+          value: "640"
+      - flag: "600"
+        set: true
+        compare:
+          op: eq
+          value: "600"
+      bin_op: or
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, 
+      chmod 644 $kubeletkubeconfig 
+    scored: true
+
+  - id: 4.1.6
+    text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
+    tests:
+      test_items:
+      - flag: root:root
+        set: true
+        compare:
+          op: eq
+          value: root:root
+    remediation: |
+      Run the below command (based on the file location on your system) on the each worker node. 
+      For example, 
+      chown root:root $kubeletkubeconfig 
+    scored: true
+
+  - id: 4.1.7
+    text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)"
+    types: "manual"
+    remediation: |
+      Run the following command to modify the file permissions of the 
+      --client-ca-file chmod 644 <filename> 
+    scored: true
+
+  - id: 4.1.8
+    text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' '
+    tests:
+      test_items:
+      - flag: root:root
+        set: true
+        compare:
+          op: eq
+          value: root:root
+    remediation: |
+      Run the following command to modify the ownership of the --client-ca-file. 
+      chown root:root <filename> 
+    scored: true
+
+  - id: 4.1.9
+    text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
+    tests:
+      test_items:
+      - flag: "644"
+        set: true
+        compare:
+          op: eq
+          value: "644"
+      - flag: "640"
+        set: true
+        compare:
+          op: eq
+          value: "640"
+      - flag: "600"
+        set: true
+        compare:
+          op: eq
+          value: "600"
+      bin_op: or
+    remediation: |
+      Run the following command (using the config file location identied in the Audit step) 
+      chmod 644 $kubeletconf 
+    scored: true
+
+  - id: 4.1.10
+    text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
+    audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+    tests:
+      test_items:
+      - flag: root:root
+        set: true
+    remediation: |
+      Run the following command (using the config file location identied in the Audit step) 
+      chown root:root $kubeletconf 
+    scored: true
+
+- id: 4.2
+  text: "Kubelet"
+  checks:
+  - id: 4.2.1
+    text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: "--anonymous-auth"
+        path: '{.authentication.anonymous.enabled}'
+        set: true
+        compare:
+          op: eq
+          value: false
+    remediation: |
+      If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+      false. 
+      If using executable arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
+      --anonymous-auth=false 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service
+    scored: true
+
+  - id: 4.2.2
+    text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --authorization-mode
+        path: '{.authorization.mode}'
+        set: true
+        compare:
+          op: nothave
+          value: AlwaysAllow
+    remediation: |
+      If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If 
+      using executable arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_AUTHZ_ARGS variable. 
+      --authorization-mode=Webhook 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service  
+    scored: true
+
+  - id: 4.2.3
+    text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --client-ca-file
+        path: '{.authentication.x509.clientCAFile}'
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to 
+      the location of the client CA file. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_AUTHZ_ARGS variable. 
+      --client-ca-file=<path/to/client-ca-file> 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service 
+    scored: true
+
+  - id: 4.2.4
+    text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: "--read-only-port"
+        path: '{.readOnlyPort}'
+        set: true
+        compare:
+          op: eq
+          value: 0
+    remediation: |
+      If using a Kubelet config file, edit the file to set readOnlyPort to 0. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
+      --read-only-port=0 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service 
+    scored: true
+
+  - id: 4.2.5
+    text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --streaming-connection-idle-timeout
+        path: '{.streamingConnectionIdleTimeout}'
+        set: true
+        compare:
+          op: noteq
+          value: 0
+      - flag: --streaming-connection-idle-timeout
+        path: '{.streamingConnectionIdleTimeout}'
+        set: false
+      bin_op: or
+    remediation: |
+      If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a 
+      value other than 0. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
+      --streaming-connection-idle-timeout=5m 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: true
+
+  - id: 4.2.6
+    text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --protect-kernel-defaults
+        path: '{.protectKernelDefaults}'
+        set: true
+        compare:
+          op: eq
+          value: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set protectKernelDefaults: true. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
+      --protect-kernel-defaults=true 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: true
+
+  - id: 4.2.7
+    text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored) "
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --make-iptables-util-chains
+        path: '{.makeIPTablesUtilChains}'
+        set: true
+        compare:
+          op: eq
+          value: true
+      - flag: --make-iptables-util-chains
+        path: '{.makeIPTablesUtilChains}'
+        set: false
+      bin_op: or
+    remediation: |
+      If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      remove the --make-iptables-util-chains argument from the 
+      KUBELET_SYSTEM_PODS_ARGS variable. 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: true
+
+  - id: 4.2.8
+    text: "Ensure that the --hostname-override argument is not set (Not Scored)"
+    # This is one of those properties that can only be set as a command line argument. 
+    # To check if the property is set as expected, we need to parse the kubelet command 
+    # instead reading the Kubelet Configuration file.
+    audit: "/bin/ps -fC $kubeletbin "
+    tests:
+      test_items:
+      - flag: --hostname-override
+        set: false
+    remediation: |
+      Edit the kubelet service file $kubeletsvc 
+      on each worker node and remove the --hostname-override argument from the
+      KUBELET_SYSTEM_PODS_ARGS variable. 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service 
+    scored: false
+
+  - id: 4.2.9
+    text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Not Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --event-qps
+        path: '{.eventRecordQPS}'
+        set: true
+        compare:
+          op: eq
+          value: 0
+    remediation: |
+      If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: false
+
+  - id: 4.2.10
+    text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --tls-cert-file
+        path: '{.tlsCertFile}'
+        set: true
+      - flag: --tls-private-key-file
+        path: '{.tlsPrivateKeyFile}'
+        set: true
+    remediation: |
+      If using a Kubelet config file, edit the file to set tlsCertFile to the location 
+      of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile 
+      to the location of the corresponding private key file. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the below parameters in KUBELET_CERTIFICATE_ARGS variable. 
+      --tls-cert-file=<path/to/tls-certificate-file>  
+      --tls-private-key-file=<path/to/tls-key-file> 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: true
+
+  - id: 4.2.11
+    text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --rotate-certificates
+        path: '{.rotateCertificates}'
+        set: true
+        compare:
+          op: eq
+          value: true
+      - flag: --rotate-certificates
+        path: '{.rotateCertificates}'
+        set: false
+      bin_op: or
+    remediation: |
+      If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
+      remove it altogether to use the default value. 
+      If using command line arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
+      variable. 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service 
+    scored: true
+
+  - id: 4.2.12
+    text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: RotateKubeletServerCertificate
+        path: '{.featureGates.RotateKubeletServerCertificate}'
+        set: true
+        compare:
+          op: eq
+          value: true
+    remediation: |
+      Edit the kubelet service file $kubeletsvc 
+      on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. 
+      --feature-gates=RotateKubeletServerCertificate=true 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service   
+    scored: true
+
+  - id: 4.2.13
+    text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
+    audit: "/bin/ps -fC $kubeletbin"      
+    audit_config: "/bin/cat $kubeletconf"
+    tests:
+      test_items:
+      - flag: --tls-cipher-suites
+        path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
+        set: true
+        compare:
+          op: valid_elements
+          value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+    remediation: |
+      If using a Kubelet config file, edit the file to set TLSCipherSuites: to 
+      TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
+      or to a subset of these values. 
+      If using executable arguments, edit the kubelet service file 
+      $kubeletsvc on each worker node and 
+      set the --tls-cipher-suites parameter as follows, or to a subset of these values.  
+      --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 
+      Based on your system, restart the kubelet service. For example: 
+      systemctl daemon-reload 
+      systemctl restart kubelet.service 
+    scored: false
diff --git a/cfg/cis-1.5/policies.yaml b/cfg/cis-1.5/policies.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5e24ec12f81320fd3c85daa6f3fb05e2b186e069
--- /dev/null
+++ b/cfg/cis-1.5/policies.yaml
@@ -0,0 +1,239 @@
+---
+controls:
+version: 1.5
+id: 5
+text: "Kubernetes Policies"
+type: "policies"
+groups:
+- id: 5.1
+  text: "RBAC and Service Accounts"
+  checks:
+  - id: 5.1.1
+    text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
+    type: "manual"
+    remediation: |
+      Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
+      if they need this role or if they could use a role with fewer privileges. 
+      Where possible, first bind users to a lower privileged role and then remove the
+      clusterrolebinding to the cluster-admin role : 
+      kubectl delete clusterrolebinding [name]   
+    scored: false
+
+  - id: 5.1.2
+    text: "Minimize access to secrets (Not Scored)"
+    type: "manual"
+    remediation: |
+      Where possible, remove get, list and watch access to secret objects in the cluster. 
+    scored: false
+
+  - id: 5.1.3
+    text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
+    type: "manual"
+    remediation: |
+      Where possible replace any use of wildcards in clusterroles and roles with specific
+      objects or actions.
+    scored: false
+    
+  - id: 5.1.4
+    text: "Minimize access to create pods (Not Scored)"
+    type: "manual"
+    Remediation: |
+      Where possible, remove create access to pod objects in the cluster. 
+    scored: false
+
+  - id: 5.1.5
+    text: "Ensure that default service accounts are not actively used. (Scored)"
+    type: "manual"
+    remediation: |
+      Create explicit service accounts wherever a Kubernetes workload requires specific access
+      to the Kubernetes API server. 
+      Modify the configuration of each default service account to include this value
+      automountServiceAccountToken: false 
+    scored: true
+
+  - id: 5.1.6
+    text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
+    type: "manual"
+    remediation: |
+      Modify the definition of pods and service accounts which do not need to mount service
+      account tokens to disable it. 
+    scored: false
+
+- id: 5.2
+  text: "Pod Security Policies"
+  checks:
+  - id: 5.2.1
+    text: "Minimize the admission of privileged containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that
+      the .spec.privileged field is omitted or set to false. 
+    scored: false
+
+  - id: 5.2.2
+    text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.hostPID field is omitted or set to false. 
+    scored: true
+
+  - id: 5.2.3
+    text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.hostIPC field is omitted or set to false. 
+    scored: true
+
+  - id: 5.2.4
+    text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.hostNetwork field is omitted or set to false. 
+    scored: true
+
+  - id: 5.2.5
+    text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.allowPrivilegeEscalation field is omitted or set to false.
+    scored: true
+
+  - id: 5.2.6
+    text: "Minimize the admission of root containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of 
+      UIDs not including 0.
+    scored: false
+
+  - id: 5.2.7
+    text: "Minimize the admission of containers with the NET_RAW capability (Not Scored)"
+    type: "manual"
+    remediation: |
+      Create a PSP as described in the Kubernetes documentation, ensuring that the 
+      .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+    scored: false
+
+  - id: 5.2.8
+    text: "Minimize the admission of containers with added capabilities (Not Scored)"
+    type: "manual"
+    remediation: |
+      Ensure that allowedCapabilities is not present in PSPs for the cluster unless 
+      it is set to an empty array. 
+    scored: false
+
+  - id: 5.2.9
+    text: "Minimize the admission of containers with capabilities assigned (Not Scored) "
+    type: "manual"
+    remediation: |
+      Review the use of capabilites in applications runnning on your cluster. Where a namespace
+      contains applicaions which do not require any Linux capabities to operate consider adding 
+      a PSP which forbids the admission of containers which do not drop all capabilities. 
+    scored: false
+
+- id: 5.3
+  text: "Network Policies and CNI"
+  checks:
+  - id: 5.3.1
+    text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
+    type: "manual"
+    remediation: |
+      If the CNI plugin in use does not support network policies, consideration should be given to
+      making use of a different plugin, or finding an alternate mechanism for restricting traffic
+      in the Kubernetes cluster. 
+    scored: false
+
+  - id: 5.3.2
+    text: "Ensure that all Namespaces have Network Policies defined (Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create NetworkPolicy objects as you need them. 
+    scored: true
+
+- id: 5.4
+  text: "Secrets Management"
+  checks:
+  - id: 5.4.1
+    text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
+    type: "manual"
+    remediation: |
+      if possible, rewrite application code to read secrets from mounted secret files, rather than
+      from environment variables. 
+    scored: false
+
+  - id: 5.4.2
+    text: "Consider external secret storage (Not Scored)"
+    type: "manual"
+    remediation: |
+      Refer to the secrets management options offered by your cloud provider or a third-party
+      secrets management solution. 
+    scored: false
+
+- id: 5.5
+  text: "Extensible Admission Control"
+  checks:
+  - id: 5.5.1
+    text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and setup image provenance. 
+    scored: false
+
+- id: 5.6
+  text: "General Policies"
+  checks:
+  - id: 5.6.1
+    text: "Create administrative boundaries between resources using namespaces (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the documentation and create namespaces for objects in your deployment as you need
+      them. 
+    scored: false
+
+  - id: 5.6.2
+    text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
+    type: "manual"
+    remediation: |
+      Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+      would need to enable alpha features in the apiserver by passing "--feature-
+      gates=AllAlpha=true" argument. 
+      Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
+      parameter to "--feature-gates=AllAlpha=true" 
+      KUBE_API_ARGS="--feature-gates=AllAlpha=true" 
+      Based on your system, restart the kube-apiserver service. For example: 
+      systemctl restart kube-apiserver.service 
+      Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+      example is as below: 
+      apiVersion: v1 
+      kind: Pod 
+      metadata: 
+        name: trustworthy-pod   
+        annotations:   
+          seccomp.security.alpha.kubernetes.io/pod: docker/default 
+      spec: 
+        containers:   
+          - name: trustworthy-container       
+            image: sotrustworthy:latest 
+    scored: false
+
+  - id: 5.6.3
+    text: "Apply Security Context to Your Pods and Containers (Not Scored)"
+    type: "manual"
+    remediation: |
+      Follow the Kubernetes documentation and apply security contexts to your pods. For a
+      suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+      Containers. 
+    scored: false
+
+  - id: 5.6.4
+    text: "The default namespace should not be used (Scored)"
+    type: "manual"
+    remediation: |
+      Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+      resources and that all new resources are created in a specific namespace. 
+    scored: true
diff --git a/cfg/config.yaml b/cfg/config.yaml
index a89dcc114f9e7dcc5abdad6c79f26b3c090eb329..71699cab6e7759601c5fe7ee5ca0e02cc67ec6e7 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -142,11 +142,34 @@ node:
     defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml
     defaultkubeconfig: "/etc/kubernetes/proxy.conf"
 
+etcd:
+  components:
+    - etcd
+   
+  etcd:
+    bins:
+      - "etcd"
+    confs:
+      - /etc/kubernetes/manifests/etcd.yaml
+      - /etc/kubernetes/manifests/etcd.manifest
+      - /etc/etcd/etcd.conf
+      - /var/snap/etcd/common/etcd.conf.yml
+    defaultconf: /etc/kubernetes/manifests/etcd.yaml
+
+controlplane:
+  components: []
+
+policies:
+  components: []
+
+
 version_mapping:
   "1.11": "cis-1.3"
   "1.12": "cis-1.3"
   "1.13": "cis-1.4"
   "1.14": "cis-1.4"
-  "1.15": "cis-1.4"
+  "1.15": "cis-1.5"
+  "1.16": "cis-1.5"
+  "1.17": "cis-1.5"
   "ocp-3.10": "rh-0.7"
   "ocp-3.11": "rh-0.7"
\ No newline at end of file
diff --git a/check/check.go b/check/check.go
index bec5284820cc1801590d4223fe4ffcf641dc8297..23e8c760dcacd1998756e53bb3a553b67a6e2098 100644
--- a/check/check.go
+++ b/check/check.go
@@ -49,6 +49,13 @@ const (
 	// FEDERATED a federated deployment.
 	FEDERATED NodeType = "federated"
 
+	// ETCD an etcd node
+	ETCD NodeType = "etcd"
+	// CONTROLPLANE a control plane node
+	CONTROLPLANE NodeType = "controlplane"
+	// POLICIES a node to run policies from
+	POLICIES NodeType = "policies"
+
 	// MANUAL Check Type
 	MANUAL string = "manual"
 )
diff --git a/cmd/common.go b/cmd/common.go
index de3c6e81c8ba0fa3449b716e6ae1dedd5e247d95..a6a58bc48641b09b2068c24e3c5d29729adbf297 100644
--- a/cmd/common.go
+++ b/cmd/common.go
@@ -219,6 +219,12 @@ func loadConfig(nodetype check.NodeType) string {
 		file = masterFile
 	case check.NODE:
 		file = nodeFile
+	case check.CONTROLPLANE:
+		file = controlplaneFile
+	case check.ETCD:
+		file = etcdFile
+	case check.POLICIES:
+		file = policiesFile
 	}
 
 	benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
@@ -313,22 +319,32 @@ func getBenchmarkVersion(kubeVersion, benchmarkVersion string, v *viper.Viper) (
 
 // isMaster verify if master components are running on the node.
 func isMaster() bool {
-	glog.V(2).Info("Checking if the current node is running master components")
-	masterConf := viper.Sub(string(check.MASTER))
-	if masterConf == nil {
-		glog.V(2).Info("No master components found to be running")
+	return isThisNodeRunning(check.MASTER)
+}
+
+// isEtcd verify if etcd components are running on the node.
+func isEtcd() bool {
+	return isThisNodeRunning(check.ETCD)
+}
+
+func isThisNodeRunning(nodeType check.NodeType) bool {
+	glog.V(2).Infof("Checking if the current node is running %s components", nodeType)
+	etcdConf := viper.Sub(string(nodeType))
+	if etcdConf == nil {
+		glog.V(2).Infof("No %s components found to be running", nodeType)
 		return false
 	}
-	components, err := getBinariesFunc(masterConf, check.MASTER)
 
+	components, err := getBinariesFunc(etcdConf, nodeType)
 	if err != nil {
 		glog.V(2).Info(err)
 		return false
 	}
 	if len(components) == 0 {
-		glog.V(2).Info("No master binaries specified")
+		glog.V(2).Infof("No %s binaries specified", nodeType)
 		return false
 	}
+
 	return true
 }
 
@@ -360,3 +376,34 @@ func PrintOutput(output string, outputFile string) {
 		}
 	}
 }
+
+var benchmarkVersionToTargetsMap = map[string][]string{
+	"cis-1.3": []string{string(check.MASTER), string(check.NODE)},
+	"cis-1.4": []string{string(check.MASTER), string(check.NODE)},
+	"cis-1.5": []string{string(check.MASTER), string(check.NODE), string(check.CONTROLPLANE), string(check.ETCD), string(check.POLICIES)},
+}
+
+// validTargets helps determine if the targets
+// are legitimate for the benchmarkVersion.
+func validTargets(benchmarkVersion string, targets []string) bool {
+	providedTargets, found := benchmarkVersionToTargetsMap[benchmarkVersion]
+	if !found {
+		return false
+	}
+
+	for _, pt := range targets {
+		f := false
+		for _, t := range providedTargets {
+			if pt == strings.ToLower(t) {
+				f = true
+				break
+			}
+		}
+
+		if !f {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/cmd/common_test.go b/cmd/common_test.go
index 85e60a41a423cd5f1424b959eef92c5548ce8d79..f0dc131d8b06b3f80965abab9761e263ce5d4d19 100644
--- a/cmd/common_test.go
+++ b/cmd/common_test.go
@@ -192,7 +192,10 @@ func TestMapToCISVersion(t *testing.T) {
 		{kubeVersion: "1.11", succeed: true, exp: "cis-1.3"},
 		{kubeVersion: "1.12", succeed: true, exp: "cis-1.3"},
 		{kubeVersion: "1.13", succeed: true, exp: "cis-1.4"},
-		{kubeVersion: "1.16", succeed: true, exp: "cis-1.4"},
+		{kubeVersion: "1.14", succeed: true, exp: "cis-1.4"},
+		{kubeVersion: "1.15", succeed: true, exp: "cis-1.5"},
+		{kubeVersion: "1.16", succeed: true, exp: "cis-1.5"},
+		{kubeVersion: "1.17", succeed: true, exp: "cis-1.5"},
 		{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
 		{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
 		{kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"},
@@ -340,6 +343,49 @@ func TestGetBenchmarkVersion(t *testing.T) {
 	}
 }
 
+func TestValidTargets(t *testing.T) {
+	cases := []struct {
+		name      string
+		benchmark string
+		targets   []string
+		expected  bool
+	}{
+		{
+			name:      "cis-1.3 no etcd",
+			benchmark: "cis-1.3",
+			targets:   []string{"master", "etcd"},
+			expected:  false,
+		},
+		{
+			name:      "cis-1.4 valid",
+			benchmark: "cis-1.4",
+			targets:   []string{"master", "node"},
+			expected:  true,
+		},
+		{
+			name:      "cis-1.5 no dummy",
+			benchmark: "cis-1.5",
+			targets:   []string{"master", "node", "controlplane", "etcd", "dummy"},
+			expected:  false,
+		},
+		{
+			name:      "cis-1.5 valid",
+			benchmark: "cis-1.5",
+			targets:   []string{"master", "node", "controlplane", "etcd", "policies"},
+			expected:  true,
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			ret := validTargets(c.benchmark, c.targets)
+			if ret != c.expected {
+				t.Fatalf("Expected %t, got %t", c.expected, ret)
+			}
+		})
+	}
+}
+
 func loadConfigForTest() (*viper.Viper, error) {
 	viperWithData := viper.New()
 	viperWithData.SetConfigFile(filepath.Join("..", cfgDir, "config.yaml"))
diff --git a/cmd/root.go b/cmd/root.go
index a94faf1a886089da56e905293a6610eb51670dfe..543269f95d2be3bc80c8d76cc85bc22069e46fe3 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -44,6 +44,9 @@ var (
 	pgSQL              bool
 	masterFile         = "master.yaml"
 	nodeFile           = "node.yaml"
+	etcdFile           = "etcd.yaml"
+	controlplaneFile   = "controlplane.yaml"
+	policiesFile       = "policies.yaml"
 	noResults          bool
 	noSummary          bool
 	noRemediations     bool
@@ -59,14 +62,40 @@ var RootCmd = &cobra.Command{
 	Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
 	Long:  `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
 	Run: func(cmd *cobra.Command, args []string) {
+		benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
+		if err != nil {
+			exitWithError(err)
+		}
+
 		if isMaster() {
 			glog.V(1).Info("== Running master checks ==\n")
-			filename := loadConfig(check.MASTER)
-			runChecks(check.MASTER, filename)
+			runChecks(check.MASTER, loadConfig(check.MASTER))
+
+			// Control Plane is only valid for CIS 1.5 and later,
+			// this a gatekeeper for previous versions
+			if validTargets(benchmarkVersion, []string{string(check.CONTROLPLANE)}) {
+				glog.V(1).Info("== Running control plane checks ==\n")
+				runChecks(check.CONTROLPLANE, loadConfig(check.CONTROLPLANE))
+			}
+		}
+
+		// Etcd is only valid for CIS 1.5 and later,
+		// this a gatekeeper for previous versions.
+		if isEtcd() && validTargets(benchmarkVersion, []string{string(check.ETCD)}) {
+			glog.V(1).Info("== Running etcd checks ==\n")
+			runChecks(check.ETCD, loadConfig(check.ETCD))
 		}
+
 		glog.V(1).Info("== Running node checks ==\n")
-		filename := loadConfig(check.NODE)
-		runChecks(check.NODE, filename)
+		runChecks(check.NODE, loadConfig(check.NODE))
+
+		// Policies is only valid for CIS 1.5 and later,
+		// this a gatekeeper for previous versions.
+		if validTargets(benchmarkVersion, []string{string(check.POLICIES)}) {
+			glog.V(1).Info("== Running policies checks ==\n")
+			runChecks(check.POLICIES, loadConfig(check.POLICIES))
+		}
+
 	},
 }
 
diff --git a/cmd/run.go b/cmd/run.go
index 95663116129b4080d4ad22d9c72c79c1c107dba6..b95defac98ac4f4d0ddbfa1e27a93e2501050993 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -14,10 +14,10 @@ import (
 
 func init() {
 	RootCmd.AddCommand(runCmd)
-	runCmd.Flags().StringSliceP("sections", "s", []string{},
-		`Specify sections of the benchmark to run. These names need to match the filenames in the cfg/<version> directory.
-	For example, to run the tests specified in master.yaml and etcd.yaml, specify --sections=master,etcd 
-	If no sections are specified, run tests from all files in the cfg/<version> directory.
+	runCmd.Flags().StringSliceP("targets", "s", []string{},
+		`Specify targets of the benchmark to run. These names need to match the filenames in the cfg/<version> directory.
+	For example, to run the tests specified in master.yaml and etcd.yaml, specify --targets=master,etcd 
+	If no targets are specified, run tests from all files in the cfg/<version> directory.
 	`)
 }
 
@@ -27,7 +27,7 @@ var runCmd = &cobra.Command{
 	Short: "Run tests",
 	Long:  `Run tests. If no arguments are specified, runs tests from all files`,
 	Run: func(cmd *cobra.Command, args []string) {
-		sections, err := cmd.Flags().GetStringSlice("sections")
+		targets, err := cmd.Flags().GetStringSlice("targets")
 		if err != nil {
 			exitWithError(err)
 		}
@@ -37,20 +37,24 @@ var runCmd = &cobra.Command{
 			exitWithError(err)
 		}
 
+		glog.V(2).Infof("Checking targets %v for %v", targets, benchmarkVersion)
+		if len(targets) > 0 && !validTargets(benchmarkVersion, targets) {
+			exitWithError(fmt.Errorf(fmt.Sprintf(`The specified --targets "%s" does not apply to the CIS Benchmark %s \n Valid targets %v`, strings.Join(targets, ","), benchmarkVersion, benchmarkVersionToTargetsMap[benchmarkVersion])))
+		}
+
 		// Merge version-specific config if any.
 		path := filepath.Join(cfgDir, benchmarkVersion)
 		mergeConfig(path)
 
-		err = run(sections, benchmarkVersion)
+		err = run(targets, benchmarkVersion)
 		if err != nil {
 			fmt.Printf("Error in run: %v\n", err)
 		}
 	},
 }
 
-func run(sections []string, benchmarkVersion string) (err error) {
-
-	yamlFiles, err := getTestYamlFiles(sections, benchmarkVersion)
+func run(targets []string, benchmarkVersion string) (err error) {
+	yamlFiles, err := getTestYamlFiles(targets, benchmarkVersion)
 	if err != nil {
 		return err
 	}
@@ -66,12 +70,11 @@ func run(sections []string, benchmarkVersion string) (err error) {
 	return nil
 }
 
-func getTestYamlFiles(sections []string, benchmarkVersion string) (yamlFiles []string, err error) {
-
-	// Check that the specified sections have corresponding YAML files in the config directory
+func getTestYamlFiles(targets []string, benchmarkVersion string) (yamlFiles []string, err error) {
+	// Check that the specified targets have corresponding YAML files in the config directory
 	configFileDirectory := filepath.Join(cfgDir, benchmarkVersion)
-	for _, section := range sections {
-		filename := section + ".yaml"
+	for _, target := range targets {
+		filename := translate(target) + ".yaml"
 		file := filepath.Join(configFileDirectory, filename)
 		if _, err := os.Stat(file); err != nil {
 			return nil, fmt.Errorf("file %s not found for version %s", filename, benchmarkVersion)
@@ -79,7 +82,7 @@ func getTestYamlFiles(sections []string, benchmarkVersion string) (yamlFiles []s
 		yamlFiles = append(yamlFiles, file)
 	}
 
-	// If no sections were specified, we will run tests from all the files in the directory
+	// If no targets were specified, we will run tests from all the files in the directory
 	if len(yamlFiles) == 0 {
 		yamlFiles, err = getYamlFilesFromDir(configFileDirectory)
 		if err != nil {
@@ -89,3 +92,7 @@ func getTestYamlFiles(sections []string, benchmarkVersion string) (yamlFiles []s
 
 	return yamlFiles, err
 }
+
+func translate(target string) string {
+	return strings.Replace(strings.ToLower(target), "worker", "node", -1)
+}
diff --git a/cmd/run_test.go b/cmd/run_test.go
index dd330c1206f297c31c9d99cd83ef1eee3483133a..2be443f28edc172be947ff62e07a3a816fae1629 100644
--- a/cmd/run_test.go
+++ b/cmd/run_test.go
@@ -10,34 +10,34 @@ import (
 func TestGetTestYamlFiles(t *testing.T) {
 	cases := []struct {
 		name      string
-		sections  []string
+		targets   []string
 		benchmark string
 		succeed   bool
 		expCount  int
 	}{
 		{
-			name:      "Specify two sections",
-			sections:  []string{"one", "two"},
+			name:      "Specify two targets",
+			targets:   []string{"one", "two"},
 			benchmark: "benchmark",
 			succeed:   true,
 			expCount:  2,
 		},
 		{
-			name:      "Specify a section that doesn't exist",
-			sections:  []string{"one", "missing"},
+			name:      "Specify a target that doesn't exist",
+			targets:   []string{"one", "missing"},
 			benchmark: "benchmark",
 			succeed:   false,
 		},
 		{
-			name:      "No sections specified - should return everything except config.yaml",
-			sections:  []string{},
+			name:      "No targets specified - should return everything except config.yaml",
+			targets:   []string{},
 			benchmark: "benchmark",
 			succeed:   true,
 			expCount:  3,
 		},
 		{
 			name:      "Specify benchmark that doesn't exist",
-			sections:  []string{"one"},
+			targets:   []string{"one"},
 			benchmark: "missing",
 			succeed:   false,
 		},
@@ -67,7 +67,7 @@ func TestGetTestYamlFiles(t *testing.T) {
 
 	for _, c := range cases {
 		t.Run(c.name, func(t *testing.T) {
-			yamlFiles, err := getTestYamlFiles(c.sections, c.benchmark)
+			yamlFiles, err := getTestYamlFiles(c.targets, c.benchmark)
 			if err != nil && c.succeed {
 				t.Fatalf("Error %v", err)
 			}
@@ -82,3 +82,41 @@ func TestGetTestYamlFiles(t *testing.T) {
 		})
 	}
 }
+
+func TestTranslate(t *testing.T) {
+	cases := []struct {
+		name     string
+		original string
+		expected string
+	}{
+		{
+			name:     "keep",
+			original: "controlplane",
+			expected: "controlplane",
+		},
+		{
+			name:     "translate",
+			original: "worker",
+			expected: "node",
+		},
+		{
+			name:     "translateLower",
+			original: "Worker",
+			expected: "node",
+		},
+		{
+			name:     "Lower",
+			original: "ETCD",
+			expected: "etcd",
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			ret := translate(c.original)
+			if ret != c.expected {
+				t.Fatalf("Expected %q, got %q", c.expected, ret)
+			}
+		})
+	}
+}
diff --git a/cmd/util.go b/cmd/util.go
index 61ab7f82254a80850f716217cc73effbee2e24a1..b079a5e76849cfbfb527c10a22e11463ac2ec328 100644
--- a/cmd/util.go
+++ b/cmd/util.go
@@ -391,12 +391,18 @@ The following %q programs have been searched, but none of them have been found:
 These program names are provided in the config.yaml, section '%s.%s.bins'
 `
 
-	componentRoleName := "master node"
-	componentType := "master"
+	var componentRoleName, componentType string
+	switch nodetype {
 
-	if nodetype == check.NODE {
+	case check.NODE:
 		componentRoleName = "worker node"
 		componentType = "node"
+	case check.ETCD:
+		componentRoleName = "etcd node"
+		componentType = "etcd"
+	default:
+		componentRoleName = "master node"
+		componentType = "master"
 	}
 
 	binList := ""
diff --git a/integration/integration_test.go b/integration/integration_test.go
index b5c03527c04cde673ab580a9fc25e50b3fd4f7fd..ee9b76e7a2264c2dcf1f5a596fe9bb1dca599112 100644
--- a/integration/integration_test.go
+++ b/integration/integration_test.go
@@ -40,19 +40,19 @@ func TestRunWithKind(t *testing.T) {
 	}{
 		{
 			TestName:      "job",
-			KindCfg:       "./testdata/add-tls-kind.yaml",
+			KindCfg:       "./testdata/add-tls-kind-k8s114.yaml",
 			KubebenchYAML: "../job.yaml",
 			ExpectedFile:  "./testdata/job.data",
 		},
 		{
 			TestName:      "job-node",
-			KindCfg:       "./testdata/add-tls-kind.yaml",
+			KindCfg:       "./testdata/add-tls-kind-k8s114.yaml",
 			KubebenchYAML: "../job-node.yaml",
 			ExpectedFile:  "./testdata/job-node.data",
 		},
 		{
 			TestName:      "job-master",
-			KindCfg:       "./testdata/add-tls-kind.yaml",
+			KindCfg:       "./testdata/add-tls-kind-k8s114.yaml",
 			KubebenchYAML: "../job-master.yaml",
 			ExpectedFile:  "./testdata/job-master.data",
 		},
diff --git a/integration/testdata/add-tls-kind.yaml b/integration/testdata/add-tls-kind-k8s114.yaml
similarity index 71%
rename from integration/testdata/add-tls-kind.yaml
rename to integration/testdata/add-tls-kind-k8s114.yaml
index 3e385009b20058dda526533bf48e3727dc7d6e1f..8a189731ed0696110252cad8b14e5b8dc42d74bd 100644
--- a/integration/testdata/add-tls-kind.yaml
+++ b/integration/testdata/add-tls-kind-k8s114.yaml
@@ -10,4 +10,10 @@ kubeadmConfigPatchesJson6902:
   patch: |
     - op: add
       path: /tlsCipherSuites
-      value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
\ No newline at end of file
+      value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
+
+nodes:
+# the control plane node config
+- role: control-plane
+  image: "kindest/node:v1.14.6"
+  
\ No newline at end of file