From 68c2ee2ebf9aa727302b9eca44185872b997d8a8 Mon Sep 17 00:00:00 2001
From: Yoav Rotem <yoavrotems97@gmail.com>
Date: Thu, 29 Apr 2021 17:08:41 +0300
Subject: [PATCH] Add support for Redhat openshift 4.0 cis 1.1.0 (#860)

---
 cfg/rh-1.0/config.yaml       |    2 +
 cfg/rh-1.0/controlplane.yaml |   62 ++
 cfg/rh-1.0/etcd.yaml         |  154 +++++
 cfg/rh-1.0/master.yaml       | 1262 ++++++++++++++++++++++++++++++++++
 cfg/rh-1.0/node.yaml         |  453 ++++++++++++
 cfg/rh-1.0/policies.yaml     |  283 ++++++++
 cmd/common.go                |    6 +-
 cmd/common_test.go           |   31 +-
 cmd/master.go                |    2 +-
 cmd/node.go                  |    2 +-
 cmd/root.go                  |    2 +-
 cmd/run.go                   |    2 +-
 cmd/util.go                  |   17 +-
 cmd/util_test.go             |   17 +-
 14 files changed, 2267 insertions(+), 28 deletions(-)
 create mode 100644 cfg/rh-1.0/config.yaml
 create mode 100644 cfg/rh-1.0/controlplane.yaml
 create mode 100644 cfg/rh-1.0/etcd.yaml
 create mode 100644 cfg/rh-1.0/master.yaml
 create mode 100644 cfg/rh-1.0/node.yaml
 create mode 100644 cfg/rh-1.0/policies.yaml

diff --git a/cfg/rh-1.0/config.yaml b/cfg/rh-1.0/config.yaml
new file mode 100644
index 0000000..b783945
--- /dev/null
+++ b/cfg/rh-1.0/config.yaml
@@ -0,0 +1,2 @@
+---
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/rh-1.0/controlplane.yaml b/cfg/rh-1.0/controlplane.yaml
new file mode 100644
index 0000000..3922263
--- /dev/null
+++ b/cfg/rh-1.0/controlplane.yaml
@@ -0,0 +1,62 @@
+---
+controls:
+version: rh-1.0
+id: 3
+text: "Control Plane Configuration"
+type: "controlplane"
+groups:
+  - id: 3.1
+    text: "Authentication and Authorization"
+    checks:
+      - id: 3.1.1
+        text: "Client certificate authentication should not be used for users (Manual)"
+        audit: |
+          # To verify user authentication is enabled
+          oc describe authentication
+          # To verify that an identity provider is configured
+          oc get identity
+          # To verify that a custom cluster-admin user exists
+          oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
+          # To verity that kbueadmin is removed, no results should be returned
+          oc get secrets kubeadmin -n kube-system
+        type: manual
+        remediation: |
+          Configure an identity provider for the OpenShift cluster.
+          Understanding identity provider configuration | Authentication | OpenShift
+          Container Platform 4.5. Once an identity provider has been defined,
+          you can use RBAC to define and apply permissions.
+          After you define an identity provider and create a new cluster-admin user,
+          remove the kubeadmin user to improve cluster security.
+        scored: false
+
+  - id: 3.2
+    text: "Logging"
+    checks:
+      - id: 3.2.1
+        text: "Ensure that a minimal audit policy is created (Manual)"
+        audit: |
+          #To view kube apiserver log files
+          oc adm node-logs --role=master --path=kube-apiserver/
+          #To view openshift apiserver log files
+          oc adm node-logs --role=master --path=openshift-apiserver/
+          #To verify kube apiserver audit config
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]'
+          #To verify openshift apiserver audit config
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]'
+        type: manual
+        remediation: |
+          No remediation required.
+        scored: false
+
+      - id: 3.2.2
+        text: "Ensure that the audit policy covers key security concerns (Manual)"
+        audit: |
+          #To verify openshift apiserver audit config
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]'
+          #To verify kube apiserver audit config
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]'
+        type: manual
+        remediation: |
+          In OpenShift 4.6 and higher, if appropriate for your needs,
+          modify the audit policy.
+        scored: false
diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml
new file mode 100644
index 0000000..13cd3fd
--- /dev/null
+++ b/cfg/rh-1.0/etcd.yaml
@@ -0,0 +1,154 @@
+---
+controls:
+version: rh-1.0
+id: 2
+text: "Etcd Node Configuration"
+type: "etcd"
+groups:
+  - id: 2
+    text: "Etcd Node Configuration Files"
+    checks:
+      - id: 2.1
+        text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
+        audit: |
+          # For --cert-file
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+          # For --key-file
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "file"
+              compare:
+                op: regex
+                value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)'
+        remediation: |
+          OpenShift does not use the etcd-certfile or etcd-keyfile flags.
+          Certificates for etcd are managed by the etcd cluster operator.
+        scored: false
+
+      - id: 2.2
+        text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
+        audit: |
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "--client-cert-auth"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          This setting is managed by the cluster etcd operator. No remediation required."
+        scored: false
+
+      - id: 2.3
+        text: "Ensure that the --auto-tls argument is not set to true (Manual)"
+        audit: |
+          # Returns 0 if found, 1 if not found
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>&1>/dev/null ; echo exit_code=$?
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "exit_code"
+              compare:
+                op: eq
+                value: "1"
+        remediation: |
+          This setting is managed by the cluster etcd operator. No remediation required.e
+        scored: false
+
+      - id: 2.4
+        text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
+        audit: |
+          # For --peer-cert-file
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+          # For --peer-key-file
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "file"
+              compare:
+                op: regex
+                value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)'
+        remediation: |
+          None. This configuration is managed by the etcd operator.
+        scored: false
+
+      - id: 2.5
+        text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
+        audit: |
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "--peer-client-cert-auth"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          This setting is managed by the cluster etcd operator. No remediation required.
+        scored: false
+
+      - id: 2.6
+        text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
+        audit: |
+          # Returns 0 if found, 1 if not found
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>&1>/dev/null ; echo exit_code=$?
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "exit_code"
+              compare:
+                op: eq
+                value: "1"
+        remediation: |
+          This setting is managed by the cluster etcd operator. No remediation required.
+        scored: false
+
+      - id: 2.7
+        text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
+        audit: |
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+          for i in $(oc get pods -oname -n openshift-etcd)
+          do
+            oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "file"
+              compare:
+                op: regex
+                value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)'
+        remediation: |
+          None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
+        scored: false
diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml
new file mode 100644
index 0000000..dfeb7ec
--- /dev/null
+++ b/cfg/rh-1.0/master.yaml
@@ -0,0 +1,1262 @@
+---
+controls:
+version: rh-1.0
+id: 1
+text: "Master Node Security Configuration"
+type: "master"
+groups:
+  - id: 1.1
+    text: "Master Node Configuration Files"
+    checks:
+      - id: 1.1.1
+        text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name )
+          do
+            oc exec -n openshift-kube-apiserver $i --   stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml;
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.2
+        text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name )
+          do
+            oc exec -n openshift-kube-apiserver $i -- \
+            stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.3
+        text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager)
+          do
+            oc exec -n openshift-kube-controller-manager $i --   stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml;
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.4
+        text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager)
+          do
+            oc exec -n openshift-kube-controller-manager $i -- \
+            stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.5
+        text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name )
+          do
+            oc exec -n openshift-kube-scheduler $i --   stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml;
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.6
+        text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))"
+        audit: |
+          for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name )
+          do
+            oc exec -n openshift-kube-scheduler $i -- \
+            stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.7
+        text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))"
+        audit: |
+          for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd )
+          do
+            oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.8
+        text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
+        audit: |
+          for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd )
+          do
+            oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: true
+
+      - id: 1.1.9
+        text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          # For CNI multus
+          for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null
+          for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null
+          # For SDN pods
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
+          # For OVS pods
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.10
+        text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
+        audit: |
+          # For CNI multus
+          for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null
+          for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null
+          # For SDN pods
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null
+          # For OVS pods in 4.5
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null
+          for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null
+          # For OVS pods in 4.6 TBD
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.11
+        text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "700"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.12
+        text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.13
+        text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))"
+        audit: |
+          for i in $(oc get nodes -o name)
+          do
+            oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.14
+        text: "Ensure that the admin.conf file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $(oc get nodes -o name)
+          do
+            oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.15
+        text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name)
+          do
+            oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.16
+        text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name)
+          do
+            oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.17
+        text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name)
+          do
+            oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.18
+        text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name)
+          do
+            oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.19
+        text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
+        audit: |
+          # Should return root:root for all files and directories
+          for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}')
+          do
+            # echo $i static-pod-certs
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
+            # echo $i static-pod-resources
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
+          done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "root:root"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: true
+
+      - id: 1.1.20
+        text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}')
+          do
+            # echo $i static-pod-certs
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \;
+          done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+      - id: 1.1.21
+        text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)"
+        audit: |
+          for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}')
+          do
+            # echo $i static-pod-certs
+            oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \;
+          done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "600"
+        remediation: |
+          No remediation required; file permissions are managed by the operator.
+        scored: false
+
+  - id: 1.2
+    text: "API Server"
+    checks:
+      - id: 1.2.1
+        text: "Ensure that anonymous requests are authorized (Manual)"
+        audit: |
+          # To verify that userGroups include system:unauthenticated
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]'
+          # To verify that userGroups include system:unauthenticated
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[].userGroups'
+          # To verify RBAC is enabled
+          oc get clusterrolebinding
+          oc get clusterrole
+          oc get rolebinding
+          oc get role
+        tests:
+          test_items:
+            - flag: "system:unauthenticated"
+        remediation: |
+          None required. The default configuration should not be modified.
+        scored: false
+
+      - id: 1.2.2
+        text: "Ensure that the --basic-auth-file argument is not set (Manual)"
+        audit: |
+          oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth"
+          oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth"
+          # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form
+          oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "basic-auth-file"
+              set: false
+            - flag: "available"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          None required. --basic-auth-file cannot be configured on OpenShift.
+        scored: false
+
+      - id: 1.2.3
+        text: "Ensure that the --token-auth-file parameter is not set (Manual)"
+        audit: |
+          # Verify that the token-auth-file flag is not present
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments'
+          #Verify that the authentication operator is running
+          oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "token-auth-file"
+              set: false
+            - flag: "available"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          None is required.
+        scored: false
+
+      - id: 1.2.4
+        text: "Use https for kubelet connections (Manual)"
+        audit: |
+          #for 4.5
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo'
+          #for 4.6
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          #for both 4.5 and 4.6
+          oc -n openshift-apiserver describe secret serving-cert
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt"
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key"
+        remediation: |
+          No remediation is required.
+          OpenShift platform components use X.509 certificates for authentication.
+          OpenShift manages the CAs and certificates for platform components. This is not configurable.
+        scored: false
+
+      - id: 1.2.5
+        text: "Ensure that the kubelet uses certificates to authenticate (Manual)"
+        audit: |
+          #for 4.5
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo'
+          #for 4.6
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          #for both 4.5 and 4.6
+          oc -n openshift-apiserver describe secret serving-cert
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt"
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key"
+        remediation: |
+          No remediation is required.
+          OpenShift platform components use X.509 certificates for authentication.
+          OpenShift manages the CAs and certificates for platform components.
+          This is not configurable.
+        scored: false
+
+      - id: 1.2.6
+        text: "Verify that the kubelet certificate authority is set as appropriate (Manual)"
+        audit: |
+          # for 4.5
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo'
+          # for 4.6
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+        tests:
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt"
+        remediation: |
+          No remediation is required.
+          OpenShift platform components use X.509 certificates for authentication.
+          OpenShift manages the CAs and certificates for platform components.
+          This is not configurable.
+        scored: false
+
+      - id: 1.2.7
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)"
+        audit: |
+          # To verify that the authorization-mode argument is not used
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+          # To verify RBAC is configured:
+          oc get clusterrolebinding
+          oc get clusterrole
+          oc get rolebinding
+          oc get role
+        audit_config: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+        tests:
+          bin_op: or
+          test_items:
+            - path: "{.authorization-mode}"
+              compare:
+                op: nothave
+                value: "AlwaysAllow"
+            - path: "{.authorization-mode}"
+              flag: "authorization-mode"
+              set: false
+        remediation: |
+          None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode.
+        scored: false
+
+      - id: 1.2.8
+        text: "Verify that the Node authorizer is enabled (Manual)"
+        audit: |
+          # For OCP 4.5 and earlier verify that authorization-mode is not used
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments'
+          # For OCP 4.5 and earlier verify that authorization-mode is not used
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode
+            oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode
+          done
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        audit_config: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+        tests:
+          bin_op: or
+          test_items:
+            - path: "{.authorization-mode}"
+              compare:
+                op: has
+                value: "Node"
+            - path: "{.authorization-mode}"
+              flag: "authorization-mode"
+              set: false
+        remediation: |
+          No remediation is required.
+        scored: false
+
+      - id: 1.2.9
+        text: "Verify that RBAC is enabled (Manual)"
+        audit: |
+          # For 4.5 To verify that the authorization-mode argument is not used
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+          # To verify RBAC is used
+          oc get clusterrolebinding
+          oc get clusterrole
+          oc get rolebinding
+          oc get role
+          # For 4.6, verify that the authorization-mode argument includes RBAC
+        audit_config: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
+        tests:
+          bin_op: or
+          test_items:
+            - path: "{.authorization-mode}"
+              compare:
+                op: has
+                value: "RBAC"
+            - path: "{.authorization-mode}"
+              flag: "authorization-mode"
+              set: false
+        remediation: |
+          None. It is not possible to disable RBAC.
+        scored: false
+
+      - id: 1.2.10
+        text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)"
+        audit: |
+          #Verify the APIPriorityAndFairness feature-gate
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments'
+          #Verify the set of admission-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "APIPriorityAndFairness=true"
+            - flag: "EventRateLimit"
+              set: false
+        remediation: |
+          No remediation is required
+        scored: false
+
+      - id: 1.2.11
+        text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)"
+        audit: |
+          #Verify the set of admission-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        tests:
+          test_items:
+            - flag: "AlwaysAdmit"
+              set: false
+        remediation: |
+          No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift.
+        scored: false
+
+      - id: 1.2.12
+        text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
+        audit: |
+          #Verify the set of admissi    on-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        tests:
+          test_items:
+            - flag: "AlwaysPullImages"
+              set: false
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 1.2.13
+        text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)"
+        audit: |
+          #Verify the set of admission-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"')
+          [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+          #Verify that SecurityContextConstraints are deployed
+          oc get scc
+          oc describe scc restricted
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "SecurityContextConstraint"
+              set: true
+            - flag: "anyuid"
+            - flag: "hostaccess"
+            - flag: "hostmount-anyuid"
+            - flag: "hostnetwork"
+            - flag: "node-exporter"
+            - flag: "nonroot"
+            - flag: "privileged"
+            - flag: "restricted"
+        remediation: |
+          None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4.
+        scored: false
+
+      - id: 1.2.14
+        text: "Ensure that the admission control plugin ServiceAccount is set (Manual)"
+        audit: |
+          #Verify the list of admission controllers for 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"')
+          [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+          #Verify that Service Accounts are present
+          oc get sa -A
+        tests:
+          test_items:
+            - flag: "ServiceAccount"
+              set: true
+        remediation: |
+          None required. OpenShift is configured to use service accounts by default.
+        scored: false
+
+      - id: 1.2.15
+        text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)"
+        audit: |
+          #Verify the list of admission controllers for 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"')
+          [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        tests:
+          test_items:
+            - flag: "NamespaceLifecycle"
+        remediation: |
+          Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle.
+        scored: false
+
+      - id: 1.2.16
+        text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)"
+        audit: |
+          #Verify the set of admission-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"')
+          [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+          #Verify that SecurityContextConstraints are deployed
+          oc get scc
+          oc describe scc restricted
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "SecurityContextConstraint"
+            - flag: "anyuid"
+            - flag: "hostaccess"
+            - flag: "hostmount-anyuid"
+            - flag: "hostnetwork"
+            - flag: "node-exporter"
+            - flag: "nonroot"
+            - flag: "privileged"
+            - flag: "restricted"
+        remediation: |
+          None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled.
+        scored: false
+
+      - id: 1.2.17
+        text: "Ensure that the admission control plugin NodeRestriction is set (Manual)"
+        audit: |
+          # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132
+          #Verify the set of admission-plugins for OCP 4.6 and higher
+          oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
+          output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"')
+          [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output
+          #Check that no overrides are configured
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
+        tests:
+          test_items:
+            - flag: "NodeRestriction"
+        remediation: |
+          The NodeRestriction plugin cannot be disabled.
+        scored: false
+
+      - id: 1.2.18
+        text: "Ensure that the --insecure-bind-address argument is not set (Manual)"
+        audit: |
+          # InsecureBindAddress=true should not be in the results
+          oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}'
+          # Result should be only 6443
+          oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
+          # Result should be only 8443
+          oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "insecure-bind-address"
+              set: false
+            - flag: 6443
+            - flag: 8443
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 1.2.19
+        text: "Ensure that the --insecure-port argument is set to 0 (Manual)"
+        audit: |
+          # Should return 6443
+          oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
+          # For OCP 4.6 and above
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]'
+          output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]')
+          [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "\"0\""
+            - flag: "6443"
+        remediation: |
+          None required. The configuration is managed by the API server operator.
+        scored: false
+
+      - id: 1.2.20
+        text: "Ensure that the --secure-port argument is not set to 0 (Manual)"
+        audit: |
+          oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig'
+          # Should return only 6443
+          echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'`
+        tests:
+          bin_op: and
+          test_items:
+            - flag: '"bindAddress": "0.0.0.0:6443"'
+            - flag: "ports"
+              compare:
+                op: regex
+                value: '\s*(?:6443\s*){1,}$'
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 1.2.21
+        text: "Ensure that the healthz endpoint is protected by RBAC (Manual)"
+        type: manual
+        audit: |
+          # Verify endpoints
+          oc -n openshift-kube-apiserver describe endpoints
+          # Check config for ports, livenessProbe, readinessProbe, healthz
+          oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers'
+          # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role
+          oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}')
+          # Following should return 403 Forbidden
+          oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k
+          # Create a service account to test RBAC
+          oc create -n openshift-kube-apiserver sa permission-test-sa
+          # Should return 403 Forbidden
+          SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa)
+          oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
+          # Cleanup
+          oc delete -n openshift-kube-apiserver sa permission-test-sa
+          # As cluster admin, should succeed
+          CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
+          oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
+        remediation: |
+          None required as profiling data is protected by RBAC.
+        scored: false
+
+      - id: 1.2.22
+        text: "Ensure that the --audit-log-path argument is set (Manual)"
+        audit: |
+          # Should return “/var/log/kube-apiserver/audit.log"
+          output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
+          output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
+          POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
+          oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null
+          # Should return 0
+          echo exit_code=$?
+          # Should return "/var/log/openshift-apiserver/audit.log"
+          output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
+          output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
+          POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}')
+          oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null
+          # Should return 0
+          echo exit_code=$?
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "/var/log/kube-apiserver/audit.log"
+            - flag: "/var/log/openshift-apiserver/audit.log"
+            - flag: "exit_code=0"
+            - flag: "null"
+        remediation: |
+          None required. This is managed by the cluster apiserver operator.
+        scored: false
+
+      - id: 1.2.23
+        text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation for log forwarding. Forwarding logs to third party systems
+          https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html
+
+        scored: false
+
+      - id: 1.2.24
+        text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)"
+        audit: |
+          #NOTICE
+          output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles)
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true
+          output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles)
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true
+          output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true
+          output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "maximumRetainedFiles"
+              compare:
+                op: gte
+                value: 10
+            - flag: "audit-log-maxbackup"
+              compare:
+                op: gte
+                value: 10
+        remediation: |
+          Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10
+        scored: false
+
+      - id: 1.2.25
+        text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)"
+        audit: |
+          #NOTICE
+          output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes)
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true
+          output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes)
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true
+          output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true
+          output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?')
+          [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "maximumFileSizeMegabytes"
+              compare:
+                op: gte
+                value: 100
+            - flag: "audit-log-maxsize"
+              compare:
+                op: gte
+                value: 100
+        remediation: |
+          Set the audit-log-maxsize parameter to 100 or as an appropriate number.
+          maximumFileSizeMegabytes: 100
+        scored: false
+
+      - id: 1.2.26
+        text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
+        audit: |
+          echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds`
+        tests:
+          test_items:
+            - flag: "requestTimeoutSeconds"
+        remediation: |
+          TBD
+        scored: false
+
+      - id: 1.2.27
+        text: "Ensure that the --service-account-lookup argument is set to true (Manual)"
+        audit: |
+          # For OCP 4.5
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup
+          # For OCP 4.6 and above
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]'
+          output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]')
+          [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output
+        tests:
+          test_items:
+            - flag: "service-account-lookup=true"
+        remediation: |
+          TBD
+        scored: false
+
+      - id: 1.2.28
+        text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)"
+        audit: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[]
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs"
+            - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs"
+        remediation: |
+          The OpenShift API server does not use the service-account-key-file argument.
+          The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles.
+          OpenShift does not reuse the apiserver TLS key. This is not configurable.
+        scored: false
+
+      - id: 1.2.29
+        text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)"
+        audit: |
+          # etcd Certificate File
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile
+          # etcd Key File
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile
+          # NOTICE 4.6 extention
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]'
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt"
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key"
+        remediation: |
+          OpenShift automatically manages TLS and client certificate authentication for etcd.
+          This is not configurable.
+        scored: false
+
+      - id: 1.2.30
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
+        audit: |
+          # TLS Cert File - openshift-kube-apiserver
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile
+          # TLS Key File
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile'
+          # NOTECI 4.6 extention
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]'
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt"
+            - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key"
+        remediation: |
+          OpenShift automatically manages TLS authentication for the API server communication with the node/kublet.
+          This is not configurable. You may optionally set a custom default certificate to be used by the API server
+          when serving content in order to enable clients to access the API server at a different host name or without
+          the need to distribute the cluster-managed certificate authority (CA) certificates to the clients.
+          Follow the directions in the OpenShift documentation User-provided certificates for the API server
+        scored: false
+
+      - id: 1.2.31
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)"
+        audit: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]'
+        tests:
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt"
+        remediation: |
+          OpenShift automatically manages TLS authentication for the API server communication with the node/kublet.
+          This is not configurable. You may optionally set a custom default certificate to be used by the API
+          server when serving content in order to enable clients to access the API server at a different host name
+          or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients.
+
+          User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace.
+          Update the API server cluster configuration,
+          the apiserver/cluster resource, to enable the use of the user-provided certificate.
+        scored: false
+
+      - id: 1.2.32
+        text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)"
+        audit: |
+          #etcd CA File
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]'
+        tests:
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt"
+        remediation: |
+          None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA.
+        scored: false
+
+      - id: 1.2.33
+        text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
+        audit: |
+          # encrypt the etcd datastore
+          oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}'
+        tests:
+          test_items:
+            - flag: "EncryptionCompleted"
+        remediation: |
+          Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5
+          https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html
+        scored: false
+
+      - id: 1.2.34
+        text: "Ensure that encryption providers are appropriately configured (Manual)"
+        audit: |
+          # encrypt the etcd datastore
+          oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}'
+        tests:
+          test_items:
+            - flag: "EncryptionCompleted"
+        remediation: |
+          Follow the Kubernetes documentation and configure a EncryptionConfig file.
+          In this file, choose aescbc, kms or secretbox as the encryption provider.
+        scored: false
+
+      - id: 1.2.35
+        text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
+        type: manual
+        remediation: |
+          Verify that the tlsSecurityProfile is set to the value you chose.
+          Note: The HAProxy Ingress controller image does not support TLS 1.3
+          and because the Modern profile requires TLS 1.3, it is not supported.
+          The Ingress Operator converts the Modern profile to Intermediate.
+          The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1,
+          and TLS 1.3 of a Custom profile to 1.2.
+        scored: false
+
+  - id: 1.3
+    text: "Controller Manager"
+    checks:
+      - id: 1.3.1
+        text: "Ensure that garbage collection is configured as appropriate (Manual)"
+        type: manual
+        remediation: |
+          To configure, follow the directions in Configuring garbage collection for containers and images
+          https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring
+        scored: false
+
+      - id: 1.3.2
+        text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)"
+        type: manual
+        audit: |
+          # Verify configuration for ports, livenessProbe, readinessProbe, healthz
+          oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers'
+          # Verify endpoints
+          oc -n openshift-kube-controller-manager describe endpoints
+          # Test to validate RBAC enabled on the controller endpoint; check with non-admin role
+          oc project openshift-kube-controller-manage
+          POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}')
+          PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}')
+          # Following should return 403 Forbidden
+          oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k
+          # Create a service account to test RBAC
+          oc create -n openshift-kube-controller-manager sa permission-test-sa
+          # Should return 403 Forbidden
+          SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa)
+          oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
+          # Cleanup
+          oc delete -n openshift-kube-controller-manager sa permission-test-sa
+          # As cluster admin, should succeed
+          CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
+          oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
+        remediation: |
+          None required; profiling is protected by RBAC.
+        scored: false
+
+      - id: 1.3.3
+        text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)"
+        audit: |
+          echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'`
+        tests:
+          test_items:
+            - flag: "use-service-account-credentials"
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager.
+          The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift.
+          This operator is configured via KubeControllerManager custom resource.
+        scored: false
+
+      - id: 1.3.4
+        text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)"
+        audit: |
+          oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]'
+        tests:
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key"
+        remediation: |
+          None required.
+          OpenShift manages the service account credentials for the scheduler automatically.
+        scored: false
+
+      - id: 1.3.5
+        text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)"
+        audit: |
+          oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]'
+        tests:
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt"
+        remediation: |
+          None required.
+          Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform.
+        scored: false
+
+      - id: 1.3.6
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)"
+        audit: |
+          oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]'
+        tests:
+          test_items:
+            - flag: "RotateKubeletServerCertificate"
+              compare:
+                op: eq
+                value: "true"
+        remediation: |
+          None required.
+          Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform.
+        scored: false
+
+      - id: 1.3.7
+        text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)"
+        audit: |
+          echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'`
+          echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'`
+          #Following should fail with a http code 403
+          POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}')
+          oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "secure-port"
+              compare:
+                op: eq
+                value: "\"10257\""
+            - flag: "port"
+              compare:
+                op: eq
+                value: "\"0\""
+            - flag: "\"code\": 403"
+        remediation: |
+          Edit the Controller Manager pod specification file $controllermanagerconf
+          on the master node and ensure the correct value for the --bind-address parameter
+        scored: false
+
+  - id: 1.4
+    text: "Scheduler"
+    checks:
+      - id: 1.4.1
+        text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)"
+        type: manual
+        audit: |
+          # check configuration for ports, livenessProbe, readinessProbe, healthz
+          oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers'
+          # Test to verify endpoints
+          oc -n openshift-kube-scheduler describe endpoints
+          # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role
+          oc project openshift-kube-scheduler
+          POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}')
+          PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
+          # Should return 403 Forbidden
+          oc rsh ${POD} curl http://localhost:${PORT}/metrics -k
+          # Create a service account to test RBAC
+          oc create sa permission-test-sa
+          # Should return 403 Forbidden
+          SA_TOKEN=$(oc sa get-token permission-test-sa)
+          oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
+          # Cleanup
+          oc delete sa permission-test-sa
+          # As cluster admin, should succeed
+          CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
+          oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
+        remediation: |
+          A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required.
+          Profiling is protected by RBAC and cannot be disabled.
+        scored: false
+
+      - id: 1.4.2
+        text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)"
+        type: manual
+        audit: |
+          # To verify endpoints
+          oc -n openshift-kube-scheduler describe endpoints
+          # To verify that bind-adress is not used in the configuration and that port is set to 0
+          oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers'
+          # To test for RBAC:
+          oc project openshift-kube-scheduler
+          POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}')
+          POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}')
+          PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
+          # Should return a 403
+          oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics
+          # Create a service account to test RBAC
+          oc create sa permission-test-sa
+          # Should return 403 Forbidden
+          SA_TOKEN=$(oc sa get-token permission-test-sa)
+          oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
+          # Cleanup
+          oc delete sa permission-test-sa
+          # As cluster admin, should succeed
+          CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
+          oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
+        remediation: |
+          By default, the --bind-address argument is not present,
+          the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0.
+          Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488
+        scored: false
diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml
new file mode 100644
index 0000000..80dbd68
--- /dev/null
+++ b/cfg/rh-1.0/node.yaml
@@ -0,0 +1,453 @@
+---
+controls:
+version: rh-1.0
+id: 4
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+  - id: 4.1
+    text: "Worker Node Configuration Files"
+    checks:
+      - id: 4.1.1
+        text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          By default, the kubelet service file has permissions of 644.
+        scored: true
+
+      - id: 4.1.2
+        text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
+        audit: |
+          # Should return root:root for each node
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          By default, the kubelet service file has ownership of root:root.
+        scored: true
+
+      - id: 4.1.3
+        text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname)
+          do
+            oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "permissions"
+              set: true
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          None needed.
+        scored: false
+
+      - id: 4.1.4
+        text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)"
+        audit: |
+          for i in $(oc get pods -n openshift-sdn -l app=sdn -oname)
+          do
+            oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: root:root
+        remediation: |
+          None required. The configuration is managed by OpenShift operators.
+        scored: false
+
+      - id: 4.1.5
+        text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
+        audit: |
+          # Check permissions
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 4.1.6
+        text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 4.1.7
+        text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          None required.
+        scored: true
+
+      - id: 4.1.8
+        text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          None required.
+        scored: true
+
+      - id: 4.1.9
+        text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          None required.
+        scored: true
+
+      - id: 4.1.10
+        text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          None required.
+        scored: true
+
+  - id: 4.2
+    text: "Kubelet"
+    checks:
+      - id: 4.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous: /etc/kubernetes/kubelet.conf
+          done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "enabled: true"
+              set: false
+        remediation: |
+          Follow the instructions in the documentation to create a Kubelet config CRD
+          and set the anonymous-auth is set to false.
+        scored: true
+
+      - id: 4.2.2
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
+        type: manual
+        # Takes a lot of time for connection to fail and
+        audit: |
+          POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
+          TOKEN=$(oc whoami -t)
+          for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}')
+          do
+            oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode'
+          done
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "Connection timed out"
+        remediation: |
+          None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes.
+        scored: true
+
+      - id: 4.2.3
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host grep clientCAFile: /etc/kubernetes/kubelet.conf
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "clientCAFile"
+              compare:
+                op: eq
+                value: "/etc/kubernetes/kubelet-ca.crt"
+        remediation: |
+          None required. Changing the clientCAFile value is unsupported.
+        scored: true
+
+      - id: 4.2.4
+        text: "Verify that the read only port is not used or is set to 0 (Automated)"
+        audit: |
+          echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null
+          echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "read-only-port"
+              compare:
+                op: has
+                value: "[\"0\"]"
+            - flag: "read-only-port"
+              set: false
+        remediation: |
+          In earlier versions of OpenShift 4, the read-only-port argument is not used.
+          Follow the instructions in the documentation to create a Kubelet config CRD
+          and set the --read-only-port is set to 0.
+        scored: true
+
+      - id: 4.2.5
+        text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
+        audit: |
+          # Should return 1 for each node
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout
+            echo exit_code=$?
+          done 2>/dev/null
+          # Should return 1 for each node
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf
+            echo exit_code=$?
+          done 2>/dev/null
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              compare:
+                op: noteq
+                value: 0
+            - flag: "exit_code"
+              compare:
+                op: eq
+                value: 1
+        remediation: |
+          Follow the instructions in the documentation to create a Kubelet config CRD and set
+          the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0.
+        scored: true
+
+      - id: 4.2.6
+        text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
+          do
+            oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
+          done
+        tests:
+          test_items:
+            - flag: protectKernelDefaults
+              set: false
+        remediation: |
+          None required. The OpenShift 4 kubelet modifies the system tunable;
+          using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables
+          don't match the kubelet configuration and the OpenShift node will fail to start.
+        scored: false
+
+      - id: 4.2.7
+        text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)"
+        audit: |
+          /bin/bash
+          flag=make-iptables-util-chains
+          opt=makeIPTablesUtilChains
+          # look at each machineconfigpool
+          while read -r pool nodeconfig; do
+            # true by default
+            value='true'
+            # first look for the flag
+            oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }"
+            # if the above command exited with 100, the flag was false
+            [ $? == 100 ] && value='false'
+            # now look in the yaml KubeletConfig
+            yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done)
+            echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100"
+            [ $? == 100 ] && value='false'
+            echo "Pool $pool has $flag ($opt) set to $value"
+          done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name')
+        use_multiple_values: true
+        tests:
+          test_items:
+            - flag: "set to true"
+        remediation: |
+          None required. The --make-iptables-util-chains argument is set to true by default.
+        scored: false
+
+      - id: 4.2.8
+        text: "Ensure that the --hostname-override argument is not set (Manual)"
+        audit: |
+          echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override`
+          echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override`
+        tests:
+          test_items:
+            - flag: hostname-override
+              set: false
+        remediation: |
+          By default, --hostname-override argument is not set.
+        scored: false
+
+      - id: 4.2.9
+        text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Automated)"
+        audit: |
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
+          do
+            oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
+          done
+          oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
+          oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
+        type: "manual"
+        remediation: |
+          Follow the documentation to edit kubelet parameters
+          https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters
+          KubeAPIQPS: <QPS>
+        scored: true
+
+      - id: 4.2.10
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
+        audit: |
+          oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo'
+        tests:
+          bin_op: and
+          test_items:
+            - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
+            - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
+        remediation: |
+          OpenShift automatically manages TLS authentication for the API server communication with the node/kublet.
+          This is not configurable.
+        scored: true
+
+      - id: 4.2.11
+        text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
+        audit: |
+          #Verify the rotateKubeletClientCertificate feature gate is not set to false
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate
+          done 2> /dev/null
+          # Verify the rotateCertificates argument is set to true
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: rotateCertificates
+              compare:
+                op: eq
+                value: true
+            - flag: rotateKubeletClientCertificates
+              compare:
+                op: noteq
+                value: false
+            - flag: rotateKubeletClientCertificates
+              set: false
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 4.2.12
+        text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
+        audit: |
+          #Verify the rotateKubeletServerCertificate feature gate is on
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
+          do
+            oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf;
+          done 2> /dev/null
+          # Verify the rotateCertificates argument is set to true
+          for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
+          do
+            oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
+          done 2> /dev/null
+        use_multiple_values: true
+        tests:
+          bin_op: or
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              compare:
+                op: eq
+                value: true
+            - flag: rotateCertificates
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          By default, kubelet server certificate rotation is disabled.
+        scored: false
+
+      - id: 4.2.13
+        text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
+        audit: |
+          # needs verification
+          # verify cipher suites
+          oc describe --namespace=openshift-ingress-operator ingresscontroller/default
+          oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
+          oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
+          oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
+          #check value for tlsSecurityProfile; null is returned if default is used
+          oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
+        type: manual
+        remediation: |
+          Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
+          Configuring Ingress
+        scored: false
diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml
new file mode 100644
index 0000000..2a629b4
--- /dev/null
+++ b/cfg/rh-1.0/policies.yaml
@@ -0,0 +1,283 @@
+---
+controls:
+version: rh-1.0
+id: 5
+text: "Kubernetes Policies"
+type: "policies"
+groups:
+  - id: 5.1
+    text: "RBAC and Service Accounts"
+    checks:
+      - id: 5.1.1
+        text: "Ensure that the cluster-admin role is only used where required (Manual)"
+        type: "manual"
+        remediation: |
+          Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
+          if they need this role or if they could use a role with fewer privileges.
+          Where possible, first bind users to a lower privileged role and then remove the
+          clusterrolebinding to the cluster-admin role :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 5.1.2
+        text: "Minimize access to secrets (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove get, list and watch access to secret objects in the cluster.
+        scored: false
+
+      - id: 5.1.3
+        text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible replace any use of wildcards in clusterroles and roles with specific
+          objects or actions.
+        scored: false
+
+      - id: 5.1.4
+        text: "Minimize access to create pods (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove create access to pod objects in the cluster.
+        scored: false
+
+      - id: 5.1.5
+        text: "Ensure that default service accounts are not actively used. (Manual)"
+        type: "manual"
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 5.1.6
+        text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
+        type: "manual"
+        remediation: |
+          Modify the definition of pods and service accounts which do not need to mount service
+          account tokens to disable it.
+        scored: false
+
+  - id: 5.2
+    text: "Pod Security Policies"
+    checks:
+      - id: 5.2.1
+        text: "Minimize the admission of privileged containers (Manual)"
+        audit: |
+          # needs verification
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i"; oc describe scc $i | grep "Allow Privileged";
+          done
+        tests:
+          test_items:
+            - flag: "false"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Allow
+          Privileged field is set to false.
+        scored: false
+
+      - id: 5.2.2
+        text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
+        audit: |
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i"; oc describe scc $i | grep "Allow Host PID";
+          done
+        tests:
+          test_items:
+            - flag: "false"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
+          PID field is set to false.
+        scored: false
+
+      - id: 5.2.3
+        text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
+        audit: |
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i"; oc describe scc $i | grep "Allow Host IPC";
+          done
+        tests:
+          test_items:
+            - flag: "false"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
+          IPC field is set to false.
+        scored: false
+
+      - id: 5.2.4
+        text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
+        audit: |
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i"; oc describe scc $i | grep "Allow Host Network";
+          done
+        tests:
+          test_items:
+            - flag: "false"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
+          Network field is omitted or set to false.
+        scored: false
+
+      - id: 5.2.5
+        text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
+        audit: |
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation";
+          done
+        tests:
+          test_items:
+            - flag: "false"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Allow
+          Privilege Escalation field is omitted or set to false.
+        scored: false
+
+      - id: 5.2.6
+        text: "Minimize the admission of root containers (Manual)"
+        audit: |
+          # needs verification
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i";
+            oc describe scc $i | grep "Run As User Strategy";
+          done
+          #For SCCs with MustRunAs verify that the range of UIDs does not include 0
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i";
+            oc describe scc $i | grep "\sUID";
+          done
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "MustRunAsNonRoot"
+            - flag: "MustRunAs"
+              compare:
+                op: nothave
+                value: 0
+        remediation: |
+          None required. By default, OpenShift includes the non-root SCC with the the Run As User
+          Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the
+          OpenShift documentation to create custom SCCs.
+        scored: false
+
+      - id: 5.2.7
+        text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
+        audit: |
+          # needs verification
+          for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
+          do
+            echo "$i";
+            oc describe scc $i | grep "Required Drop Capabilities";
+          done
+        tests:
+          bin_op: or
+          test_items:
+            - flag: "ALL"
+            - flag: "NET_RAW"
+        remediation: |
+          Create a SCC as described in the OpenShift documentation, ensuring that the Required
+          Drop Capabilities is set to include either NET_RAW or ALL.
+        scored: false
+
+      - id: 5.2.8
+        text: "Minimize the admission of containers with added capabilities (Manual)"
+        type: "manual"
+        remediation: |
+          Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster
+          except for the privileged SCC.
+        scored: false
+
+      - id: 5.2.9
+        text: "Minimize the admission of containers with capabilities assigned (Manual)"
+        type: "manual"
+        remediation: |
+          Review the use of capabilites in applications running on your cluster. Where a namespace
+          contains applicaions which do not require any Linux capabities to operate consider
+          adding a SCC which forbids the admission of containers which do not drop all capabilities.
+        scored: false
+
+  - id: 5.3
+    text: "Network Policies and CNI"
+    checks:
+      - id: 5.3.1
+        text: "Ensure that the CNI in use supports Network Policies (Manual)"
+        type: "manual"
+        remediation: |
+          None required.
+        scored: false
+
+      - id: 5.3.2
+        text: "Ensure that all Namespaces have Network Policies defined (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: false
+
+  - id: 5.4
+    text: "Secrets Management"
+    checks:
+      - id: 5.4.1
+        text: "Prefer using secrets as files over secrets as environment variables (Manual)"
+        type: "manual"
+        remediation: |
+          If possible, rewrite application code to read secrets from mounted secret files, rather than
+          from environment variables.
+        scored: false
+
+      - id: 5.4.2
+        text: "Consider external secret storage (Manual)"
+        type: "manual"
+        remediation: |
+          Refer to the secrets management options offered by your cloud provider or a third-party
+          secrets management solution.
+        scored: false
+
+  - id: 5.5
+    text: "Extensible Admission Control"
+    checks:
+      - id: 5.5.1
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html
+        scored: false
+
+  - id: 5.7
+    text: "General Policies"
+    checks:
+      - id: 5.7.1
+        text: "Create administrative boundaries between resources using namespaces (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create namespaces for objects in your deployment as you need
+          them.
+        scored: false
+
+      - id: 5.7.2
+        text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
+        type: "manual"
+        remediation: |
+          To enable the default seccomp profile, use the reserved value /runtime/default that will
+          make sure that the pod uses the default policy available on the host.
+        scored: false
+
+      - id: 5.7.3
+        text: "Apply Security Context to Your Pods and Containers (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and apply security contexts to your pods. For a
+          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 5.7.4
+        text: "The default namespace should not be used (Manual)"
+        type: "manual"
+        remediation: |
+          Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+          resources and that all new resources are created in a specific namespace.
+        scored: false
diff --git a/cmd/common.go b/cmd/common.go
index 60c7189..63d0c9d 100644
--- a/cmd/common.go
+++ b/cmd/common.go
@@ -313,12 +313,12 @@ func loadTargetMapping(v *viper.Viper) (map[string][]string, error) {
 	return benchmarkVersionToTargetsMap, nil
 }
 
-func getBenchmarkVersion(kubeVersion, benchmarkVersion string, v *viper.Viper) (bv string, err error) {
+func getBenchmarkVersion(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper) (bv string, err error) {
 	if !isEmpty(kubeVersion) && !isEmpty(benchmarkVersion) {
 		return "", fmt.Errorf("It is an error to specify both --version and --benchmark flags")
 	}
-	if isEmpty(benchmarkVersion) && isEmpty(kubeVersion) {
-		benchmarkVersion = getPlatformBenchmarkVersion(getPlatformName())
+	if isEmpty(benchmarkVersion) && isEmpty(kubeVersion) && !isEmpty(platformName){
+		benchmarkVersion = getPlatformBenchmarkVersion(platformName)
 	}
 
 	if isEmpty(benchmarkVersion) {
diff --git a/cmd/common_test.go b/cmd/common_test.go
index eefe501..fce1ef1 100644
--- a/cmd/common_test.go
+++ b/cmd/common_test.go
@@ -322,11 +322,11 @@ func TestGetBenchmarkVersion(t *testing.T) {
 		t.Fatalf("Unable to load config file %v", err)
 	}
 
-	type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion string, v *viper.Viper) (string, error)
+	type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper) (string, error)
 
-	withFakeKubectl := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
+	withFakeKubectl := func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
 		execCode := `#!/bin/sh
-		echo '{"serverVersion": {"major": "1", "minor": "15", "gitVersion": "v1.15.10"}}'
+		echo '{"serverVersion": {"major": "1", "minor": "18", "gitVersion": "v1.18.10"}}'
 		`
 		restore, err := fakeExecutableInPath("kubectl", execCode)
 		if err != nil {
@@ -334,39 +334,40 @@ func TestGetBenchmarkVersion(t *testing.T) {
 		}
 		defer restore()
 
-		return fn(kubeVersion, benchmarkVersion, v)
+		return fn(kubeVersion, benchmarkVersion, platformName, v)
 	}
 
-	withNoPath := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
+	withNoPath := func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
 		restore, err := prunePath()
 		if err != nil {
 			t.Fatal("Failed when calling prunePath ", err)
 		}
 		defer restore()
 
-		return fn(kubeVersion, benchmarkVersion, v)
+		return fn(kubeVersion, benchmarkVersion, platformName, v)
 	}
 
-	type getBenchmarkVersionFn func(string, string, *viper.Viper, getBenchmarkVersionFnToTest) (string, error)
+	type getBenchmarkVersionFn func(string, string, string, *viper.Viper, getBenchmarkVersionFnToTest) (string, error)
 	cases := []struct {
 		n                string
 		kubeVersion      string
 		benchmarkVersion string
+		platformName     string
 		v                *viper.Viper
 		callFn           getBenchmarkVersionFn
 		exp              string
 		succeed          bool
 	}{
-		{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
-		{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.6", callFn: withNoPath, succeed: true},
-		{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withFakeKubectl, succeed: true},
-		{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
-		{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
-		{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
-		{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
+		{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", platformName: "", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
+		{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.6", callFn: withNoPath, succeed: true},
+		{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.6", callFn: withFakeKubectl, succeed: true},
+		{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
+		{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
+		{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
+		{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
 	}
 	for _, c := range cases {
-		rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.v, getBenchmarkVersion)
+		rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.platformName, c.v, getBenchmarkVersion)
 		if c.succeed {
 			if err != nil {
 				t.Errorf("[%q]-Unexpected error: %v", c.n, err)
diff --git a/cmd/master.go b/cmd/master.go
index e8db8e3..3135398 100644
--- a/cmd/master.go
+++ b/cmd/master.go
@@ -28,7 +28,7 @@ var masterCmd = &cobra.Command{
 	Short: "Run Kubernetes benchmark checks from the master.yaml file.",
 	Long:  `Run Kubernetes benchmark checks from the master.yaml file in cfg/<version>.`,
 	Run: func(cmd *cobra.Command, args []string) {
-		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
+		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
 		if err != nil {
 			exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
 		}
diff --git a/cmd/node.go b/cmd/node.go
index 4eb13db..2afba59 100644
--- a/cmd/node.go
+++ b/cmd/node.go
@@ -28,7 +28,7 @@ var nodeCmd = &cobra.Command{
 	Short: "Run Kubernetes benchmark checks from the node.yaml file.",
 	Long:  `Run Kubernetes benchmark checks from the node.yaml file in cfg/<version>.`,
 	Run: func(cmd *cobra.Command, args []string) {
-		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
+		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
 		if err != nil {
 			exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
 		}
diff --git a/cmd/root.go b/cmd/root.go
index 42453d0..47907b5 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -68,7 +68,7 @@ var RootCmd = &cobra.Command{
 	Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
 	Long:  `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
 	Run: func(cmd *cobra.Command, args []string) {
-		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
+		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
 		if err != nil {
 			exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
 		}
diff --git a/cmd/run.go b/cmd/run.go
index 16b6641..2e4c59b 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -32,7 +32,7 @@ var runCmd = &cobra.Command{
 			exitWithError(fmt.Errorf("unable to get `targets` from command line :%v", err))
 		}
 
-		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
+		bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
 		if err != nil {
 			exitWithError(fmt.Errorf("unable to get benchmark version. error: %v", err))
 		}
diff --git a/cmd/util.go b/cmd/util.go
index 9f05699..494fa28 100644
--- a/cmd/util.go
+++ b/cmd/util.go
@@ -460,6 +460,8 @@ func getPlatformBenchmarkVersion(platform string) string {
 		return "gke-1.0"
 	case "ocp-3.10":
 		return "rh-0.7"
+	case "ocp-4.1":
+		return "rh-1.0"
 	}
 	return ""
 }
@@ -475,15 +477,26 @@ func getOpenShiftVersion() string{
 		if err == nil {
 			versionRe := regexp.MustCompile(`oc v(\d+\.\d+)`)
 			subs := versionRe.FindStringSubmatch(string(out))
+			if len(subs) < 1 {
+				versionRe = regexp.MustCompile(`Client Version:\s*(\d+\.\d+)`)
+				subs = versionRe.FindStringSubmatch(string(out))
+			}
 			if len(subs) > 1 {
 				glog.V(2).Infof("OCP output '%s' \nplatform is %s \nocp %v",string(out),getPlatformNameFromVersion(string(out)),subs[1])
 				ocpBenchmarkVersion, err := getOcpValidVersion(subs[1])
 				if err == nil{
 					return fmt.Sprintf("ocp-%s", ocpBenchmarkVersion)
+				} else {
+					glog.V(1).Infof("Can't get getOcpValidVersion: %v", err)
 				}
+			} else {
+				glog.V(1).Infof("Can't parse version output: %v", subs)
 			}
+		} else {
+			glog.V(1).Infof("Can't use oc command: %v", err)
 		}
-
+	} else {
+		glog.V(1).Infof("Can't find oc command: %v", err)
 	}
 	return ""
 }
@@ -493,7 +506,7 @@ func getOcpValidVersion(ocpVer string) (string, error) {
 
 	for (!isEmpty(ocpVer)) {
 		glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer))
-		if ocpVer == "3.10"{
+		if ocpVer == "3.10" || ocpVer == "4.1"{
 			glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer))
 			return ocpVer, nil
 		}
diff --git a/cmd/util_test.go b/cmd/util_test.go
index 6636cc0..81dc687 100644
--- a/cmd/util_test.go
+++ b/cmd/util_test.go
@@ -594,12 +594,19 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
 			want: "",
 		},
 		{
-			name: "open shift",
+			name: "openshift3",
 			args: args{
 				platform: "ocp-3.10",
 			},
 			want: "rh-0.7",
 		},
+		{
+			name: "openshift4",
+			args: args{
+				platform: "ocp-4.1",
+			},
+			want: "rh-1.0",
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -620,18 +627,20 @@ func Test_getOcpValidVersion(t *testing.T) {
 		{openShiftVersion: "3.11", succeed: true, exp: "3.10"},
 		{openShiftVersion: "3.10", succeed: true, exp: "3.10"},
 		{openShiftVersion: "2.9", succeed: false, exp: ""},
-		{openShiftVersion: "4.1", succeed: false, exp: ""},
+		{openShiftVersion: "4.1", succeed: true, exp: "4.1"},
+		{openShiftVersion: "4.5", succeed: true, exp: "4.1"},
+		{openShiftVersion: "4.6", succeed: true, exp: "4.1"},
 		{openShiftVersion: "invalid", succeed: false, exp: ""},
 	}
 	for _, c := range cases {
 		ocpVer,_ := getOcpValidVersion(c.openShiftVersion)
 		if c.succeed {
 			if c.exp != ocpVer {
-				t.Fatalf("getOcpValidVersion(%q) - Got %q expected %s", c.openShiftVersion, ocpVer, c.exp)
+				t.Errorf("getOcpValidVersion(%q) - Got %q expected %s", c.openShiftVersion, ocpVer, c.exp)
 			}
 		} else {
 			if len(ocpVer) > 0 {
-				t.Fatalf("getOcpValidVersion(%q) - Expected empty string but Got %s", c.openShiftVersion, ocpVer)
+				t.Errorf("getOcpValidVersion(%q) - Expected empty string but Got %s", c.openShiftVersion, ocpVer)
 			}
 		}
 	}
-- 
GitLab