diff --git a/cfg/config.yaml b/cfg/config.yaml
index 0bc8abd2e542204a6e48cc169f45be1942ff86db..be0ea321b8a068e0691a108c6a752b4bc6053c49 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -246,6 +246,7 @@ version_mapping:
   "1.20": "cis-1.20"
   "eks-1.0.1": "eks-1.0.1"
   "gke-1.0": "gke-1.0"
+  "gke-1.2.0": "gke-1.2.0"
   "ocp-3.10": "rh-0.7"
   "ocp-3.11": "rh-0.7"
   "ocp-4.0": "rh-1.0"
@@ -278,6 +279,12 @@ target_mapping:
     - "etcd"
     - "policies"
     - "managedservices"
+  "gke-1.2.0":
+    - "master"
+    - "node"
+    - "controlplane"
+    - "policies"
+    - "managedservices"
   "eks-1.0.1":
     - "master"
     - "node"
diff --git a/cfg/gke-1.2.0/config.yaml b/cfg/gke-1.2.0/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7839455a64a067a15886474164b6d4476c49133
--- /dev/null
+++ b/cfg/gke-1.2.0/config.yaml
@@ -0,0 +1,2 @@
+---
+## Version-specific settings that override the values in cfg/config.yaml
diff --git a/cfg/gke-1.2.0/controlplane.yaml b/cfg/gke-1.2.0/controlplane.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33d1ddfc87d5a7a5d7918db02c47769e5c54ee7d
--- /dev/null
+++ b/cfg/gke-1.2.0/controlplane.yaml
@@ -0,0 +1,35 @@
+---
+controls:
+version: "gke-1.2.0"
+id: 2
+text: "Control Plane Configuration"
+type: "controlplane"
+groups:
+  - id: 2.1
+    text: "Authentication and Authorization"
+    checks:
+      - id: 2.1.1
+        text: "Client certificate authentication should not be used for users (Manual)"
+        type: "manual"
+        remediation: |
+          Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
+          implemented in place of client certificates.
+          You can remediate the availability of client certificates in your GKE cluster. See
+          Recommendation 6.8.2.
+        scored: false
+
+  - id: 2.2
+    text: "Logging"
+    type: skip
+    checks:
+      - id: 2.2.1
+        text: "Ensure that a minimal audit policy is created (Manual)"
+        type: "manual"
+        remediation: "This control cannot be modified in GKE."
+        scored: false
+
+      - id: 2.2.2
+        text: "Ensure that the audit policy covers key security concerns (Manual)"
+        type: "manual"
+        remediation: "This control cannot be modified in GKE."
+        scored: false
diff --git a/cfg/gke-1.2.0/managedservices.yaml b/cfg/gke-1.2.0/managedservices.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..823a4d1fd952b383cbbb1853e5ea691c9b05a700
--- /dev/null
+++ b/cfg/gke-1.2.0/managedservices.yaml
@@ -0,0 +1,706 @@
+---
+controls:
+version: "gke-1.2.0"
+id: 5
+text: "Managed Services"
+type: "managedservices"
+groups:
+  - id: 5.1
+    text: "Image Registry and Image Scanning"
+    checks:
+      - id: 5.1.1
+        text: "Ensure Image Vulnerability Scanning using GCR Container Analysis
+        or a third-party provider (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+
+            gcloud services enable containerscanning.googleapis.com
+        scored: false
+
+      - id: 5.1.2
+        text: "Minimize user access to GCR (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To change roles at the GCR bucket level:
+            Firstly, run the following if read permissions are required:
+
+              gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer
+              gs://artifacts.[PROJECT_ID].appspot.com
+
+            Then remove the excessively privileged role (Storage Admin / Storage Object Admin /
+            Storage Object Creator) using:
+
+              gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE]
+              gs://artifacts.[PROJECT_ID].appspot.com
+
+            where:
+              [TYPE] can be one of the following:
+                    o user, if the [EMAIL-ADDRESS] is a Google account
+                    o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account
+              [EMAIL-ADDRESS] can be one of the following:
+                    o a Google account (for example, someone@example.com)
+                    o a Cloud IAM service account
+                    To modify roles defined at the project level and subsequently inherited within the GCR
+                    bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly
+            and apply it using:
+
+              gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE]
+        scored: false
+
+      - id: 5.1.3
+        text: "Minimize cluster access to read-only for GCR (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            For an account explicitly granted to the bucket. First, add read access to the Kubernetes
+            Service Account
+
+              gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer
+              gs://artifacts.[PROJECT_ID].appspot.com
+
+              where:
+              [TYPE] can be one of the following:
+                      o user, if the [EMAIL-ADDRESS] is a Google account
+                      o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account
+              [EMAIL-ADDRESS] can be one of the following:
+                      o a Google account (for example, someone@example.com)
+                      o a Cloud IAM service account
+
+              Then remove the excessively privileged role (Storage Admin / Storage Object Admin /
+              Storage Object Creator) using:
+
+                gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE]
+                gs://artifacts.[PROJECT_ID].appspot.com
+
+              For an account that inherits access to the GCR Bucket through Project level permissions,
+              modify the Projects IAM policy file accordingly, then upload it using:
+
+                gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE]
+        scored: false
+
+      - id: 5.1.4
+        text: "Minimize Container Registries to only those approved (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            First, update the cluster to enable Binary Authorization:
+
+              gcloud container cluster update [CLUSTER_NAME] \
+                --enable-binauthz
+
+            Create a Binary Authorization Policy using the Binary Authorization Policy Reference
+            (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance.
+            Import the policy file into Binary Authorization:
+
+              gcloud container binauthz policy import [YAML_POLICY]
+        scored: false
+
+  - id: 5.2
+    text: "Identity and Access Management (IAM)"
+    checks:
+      - id: 5.2.1
+        text: "Ensure GKE clusters are not running using the Compute Engine
+        default service account (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Firstly, create a minimally privileged service account:
+
+              gcloud iam service-accounts create [SA_NAME] \
+                --display-name "GKE Node Service Account"
+              export NODE_SA_EMAIL=`gcloud iam service-accounts list \
+                --format='value(email)' \
+                --filter='displayName:GKE Node Service Account'`
+
+            Grant the following roles to the service account:
+
+              export PROJECT_ID=`gcloud config get-value project`
+              gcloud projects add-iam-policy-binding $PROJECT_ID \
+                --member serviceAccount:$NODE_SA_EMAIL \
+                --role roles/monitoring.metricWriter
+              gcloud projects add-iam-policy-binding $PROJECT_ID \
+                --member serviceAccount:$NODE_SA_EMAIL \
+                --role roles/monitoring.viewer
+              gcloud projects add-iam-policy-binding $PROJECT_ID \
+                --member serviceAccount:$NODE_SA_EMAIL \
+                --role roles/logging.logWriter
+
+            To create a new Node pool using the Service account, run the following command:
+
+              gcloud container node-pools create [NODE_POOL] \
+                --service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \
+                --cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE]
+
+            You will need to migrate your workloads to the new Node pool, and delete Node pools that
+            use the default service account to complete the remediation.
+        scored: false
+
+      - id: 5.2.2
+        text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+
+              gcloud beta container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \
+                --identity-namespace=[PROJECT_ID].svc.id.goog
+
+            Note that existing Node pools are unaffected. New Node pools default to --workload-
+            metadata-from-node=GKE_METADATA_SERVER .
+
+            Then, modify existing Node pools to enable GKE_METADATA_SERVER:
+
+              gcloud beta container node-pools update [NODEPOOL_NAME] \
+                --cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \
+                --workload-metadata-from-node=GKE_METADATA_SERVER
+
+            You may also need to modify workloads in order for them to use Workload Identity as
+            described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload-
+            identity. Also consider the effects on the availability of your hosted workloads as Node
+            pools are updated, it may be more appropriate to create new Node Pools.
+        scored: false
+
+  - id: 5.3
+    text: "Cloud Key Management Service (Cloud KMS)"
+    checks:
+      - id: 5.3.1
+        text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To create a key
+
+            Create a key ring:
+
+              gcloud kms keyrings create [RING_NAME] \
+                --location [LOCATION] \
+                --project [KEY_PROJECT_ID]
+
+            Create a key:
+
+              gcloud kms keys create [KEY_NAME] \
+                --location [LOCATION] \
+                --keyring [RING_NAME] \
+                --purpose encryption \
+                --project [KEY_PROJECT_ID]
+
+            Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey
+            Encrypter/Decrypter role:
+
+              gcloud kms keys add-iam-policy-binding [KEY_NAME] \
+                --location [LOCATION] \
+                --keyring [RING_NAME] \
+                --member serviceAccount:[SERVICE_ACCOUNT_NAME] \
+                --role roles/cloudkms.cryptoKeyEncrypterDecrypter \
+                --project [KEY_PROJECT_ID]
+
+            To create a new cluster with Application-layer Secrets Encryption:
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --cluster-version=latest \
+                --zone [ZONE] \
+                --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \
+                --project [CLUSTER_PROJECT_ID]
+
+            To enable on an existing cluster:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [ZONE] \
+                --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \
+                --project [CLUSTER_PROJECT_ID]
+        scored: false
+
+  - id: 5.4
+    text: "Node Metadata"
+    checks:
+      - id: 5.4.1
+        text: "Ensure legacy Compute Engine instance metadata APIs are Disabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To update an existing cluster, create a new Node pool with the legacy GCE metadata
+            endpoint disabled:
+
+              gcloud container node-pools create [POOL_NAME] \
+                --metadata disable-legacy-endpoints=true \
+                --cluster [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE]
+
+            You will need to migrate workloads from any existing non-conforming Node pools, to the
+            new Node pool, then delete non-conforming Node pools to complete the remediation.
+        scored: false
+
+      - id: 5.4.2
+        text: "Ensure the GKE Metadata Server is Enabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+              gcloud beta container clusters update [CLUSTER_NAME] \
+                --identity-namespace=[PROJECT_ID].svc.id.goog
+            Note that existing Node pools are unaffected. New Node pools default to --workload-
+            metadata-from-node=GKE_METADATA_SERVER .
+
+            To modify an existing Node pool to enable GKE Metadata Server:
+
+              gcloud beta container node-pools update [NODEPOOL_NAME] \
+                --cluster=[CLUSTER_NAME] \
+                --workload-metadata-from-node=GKE_METADATA_SERVER
+
+            You may also need to modify workloads in order for them to use Workload Identity as
+            described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload-
+            identity.
+        scored: false
+
+  - id: 5.5
+    text: "Node Configuration and Maintenance"
+    checks:
+      - id: 5.5.1
+        text: "Ensure Container-Optimized OS (COS) is used for GKE node images (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To set the node image to cos for an existing cluster's Node pool:
+
+              gcloud container clusters upgrade [CLUSTER_NAME]\
+                --image-type cos \
+                --zone [COMPUTE_ZONE] --node-pool [POOL_NAME]
+        scored: false
+
+      - id: 5.5.2
+        text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable node auto-repair for an existing cluster with Node pool, run the following
+            command:
+
+              gcloud container node-pools update [POOL_NAME] \
+                --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --enable-autorepair
+        scored: false
+
+      - id: 5.5.3
+        text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable node auto-upgrade for an existing cluster's Node pool, run the following
+            command:
+
+              gcloud container node-pools update [NODE_POOL] \
+                --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --enable-autoupgrade
+        scored: false
+
+      - id: 5.5.4
+        text: "Automate GKE version management using Release Channels (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Create a new cluster by running the following command:
+
+              gcloud beta container clusters create [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --release-channel [RELEASE_CHANNEL]
+
+            where [RELEASE_CHANNEL] is stable or regular according to your needs.
+        scored: false
+
+      - id: 5.5.5
+        text: "Ensure Shielded GKE Nodes are Enabled (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To create a Node pool within the cluster with Integrity Monitoring enabled, run the
+            following command:
+
+              gcloud beta container node-pools create [NODEPOOL_NAME] \
+                --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --shielded-integrity-monitoring
+
+            You will also need to migrate workloads from existing non-conforming Node pools to the
+            newly created Node pool, then delete the non-conforming pools.
+        scored: false
+
+      - id: 5.5.6
+        text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To create a Node pool within the cluster with Integrity Monitoring enabled, run the
+            following command:
+
+              gcloud beta container node-pools create [NODEPOOL_NAME] \
+                --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --shielded-integrity-monitoring
+
+          You will also need to migrate workloads from existing non-conforming Node pools to the newly created Node pool,
+          then delete the non-conforming pools.
+        scored: false
+
+      - id: 5.5.7
+        text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To create a Node pool within the cluster with Secure Boot enabled, run the following
+            command:
+
+              gcloud beta container node-pools create [NODEPOOL_NAME] \
+                --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --shielded-secure-boot
+
+            You will also need to migrate workloads from existing non-conforming Node pools to the
+            newly created Node pool, then delete the non-conforming pools.
+        scored: false
+
+  - id: 5.6
+    text: "Cluster Networking"
+    checks:
+      - id: 5.6.1
+        text: "Enable VPC Flow Logs and Intranode Visibility (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable intranode visibility on an existing cluster, run the following command:
+
+              gcloud beta container clusters update [CLUSTER_NAME] \
+                --enable-intra-node-visibility
+        scored: false
+
+      - id: 5.6.2
+        text: "Ensure use of VPC-native clusters (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable Alias IP on a new cluster, run the following command:
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --enable-ip-alias
+        scored: false
+
+      - id: 5.6.3
+        text: "Ensure Master Authorized Networks is Enabled (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To check Master Authorized Networks status for an existing cluster, run the following
+            command;
+
+              gcloud container clusters describe [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --format json | jq '.masterAuthorizedNetworksConfig'
+
+            The output should return
+
+              {
+                "enabled": true
+              }
+
+            if Master Authorized Networks is enabled.
+
+            If Master Authorized Networks is disabled, the
+            above command will return null ( { } ).
+        scored: false
+
+      - id: 5.6.4
+        text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Create a cluster with a Private Endpoint enabled and Public Access disabled by including
+            the --enable-private-endpoint flag within the cluster create command:
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --enable-private-endpoint
+
+            Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias
+            and --master-ipv4-cidr=[MASTER_CIDR_RANGE] .
+        scored: false
+
+      - id: 5.6.5
+        text: "Ensure clusters are created with Private Nodes (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag
+            within the cluster create command:
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --enable-private-nodes
+
+            Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4-
+            cidr=[MASTER_CIDR_RANGE] .
+        scored: false
+
+      - id: 5.6.6
+        text: "Consider firewalling GKE worker nodes (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Use the following command to generate firewall rules, setting the variables as appropriate.
+            You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified.
+
+              gcloud compute firewall-rules create FIREWALL_RULE_NAME \
+                --network [NETWORK] \
+                --priority [PRIORITY] \
+                --direction [DIRECTION] \
+                --action [ACTION] \
+                --target-tags [TAG] \
+                --target-service-accounts [SERVICE_ACCOUNT] \
+                --source-ranges [SOURCE_CIDR-RANGE] \
+                --source-tags [SOURCE_TAGS] \
+                --source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \
+                --destination-ranges [DESTINATION_CIDR_RANGE] \
+                --rules [RULES]
+        scored: false
+
+      - id: 5.6.7
+        text: "Ensure Network Policy is Enabled and set as appropriate (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --update-addons NetworkPolicy=ENABLED
+
+            Then, enable Network Policy:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --enable-network-policy
+        scored: false
+
+      - id: 5.6.8
+        text: "Ensure use of Google-managed SSL Certificates (Manual)"
+        type: "manual"
+        remediation: |
+          If services of type:LoadBalancer are discovered, consider replacing the Service with an
+          Ingress.
+
+          To configure the Ingress and use Google-managed SSL certificates, follow the instructions
+          as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs.
+        scored: false
+
+  - id: 5.7
+    text: "Logging"
+    checks:
+      - id: 5.7.1
+        text: "Ensure Stackdriver Kubernetes Logging and Monitoring is Enabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+
+            STACKDRIVER KUBERNETES ENGINE MONITORING SUPPORT (PREFERRED):
+            To enable Stackdriver Kubernetes Engine Monitoring for an existing cluster, run the
+            following command:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --enable-stackdriver-kubernetes
+
+            LEGACY STACKDRIVER SUPPORT:
+            Both Logging and Monitoring support must be enabled.
+            To enable Legacy Stackdriver Logging for an existing cluster, run the following command:
+
+              gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --logging-service logging.googleapis.com
+
+            To enable Legacy Stackdriver Monitoring for an existing cluster, run the following
+            command:
+
+              gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
+                --monitoring-service monitoring.googleapis.com
+        scored: false
+
+      - id: 5.7.2
+        text: "Enable Linux auditd logging (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Download the example manifests:
+
+              curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \
+                > cos-auditd-logging.yaml
+
+            Edit the example manifests if needed. Then, deploy them:
+
+              kubectl apply -f cos-auditd-logging.yaml
+
+            Verify that the logging Pods have started. If you defined a different Namespace in your
+            manifests, replace cos-auditd with the name of the namespace you're using:
+
+              kubectl get pods --namespace=cos-auditd
+        scored: false
+
+  - id: 5.8
+    text: "Authentication and Authorization"
+    checks:
+      - id: 5.8.1
+        text: "Ensure Basic Authentication using static passwords is Disabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To update an existing cluster and disable Basic Authentication by removing the static
+            password:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --no-enable-basic-auth
+        scored: false
+
+      - id: 5.8.2
+        text: "Ensure authentication using Client Certificates is Disabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Create a new cluster without a Client Certificate:
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --no-issue-client-certificate
+        scored: false
+
+      - id: 5.8.3
+        text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes-
+            engine/docs/how-to/role-based-access-control#google-groups-for-gke.
+
+            Then, create a cluster with
+
+              gcloud beta container clusters create my-cluster \
+                --security-group="gke-security-groups@[yourdomain.com]"
+
+            Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that
+            reference your G Suite Groups.
+        scored: false
+
+      - id: 5.8.4
+        text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To disable Legacy Authorization for an existing cluster, run the following command:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --no-enable-legacy-authorization
+        scored: false
+
+  - id: 5.9
+    text: "Storage"
+    checks:
+      - id: 5.9.1
+        text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            FOR NODE BOOT DISKS:
+            Create a new node pool using customer-managed encryption keys for the node boot disk, of
+            [DISK_TYPE] either pd-standard or pd-ssd :
+
+              gcloud beta container node-pools create [CLUSTER_NAME] \
+                --disk-type [DISK_TYPE] \
+                --boot-disk-kms-key \
+                projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]
+
+            Create a cluster using customer-managed encryption keys for the node boot disk, of
+            [DISK_TYPE] either pd-standard or pd-ssd :
+
+              gcloud beta container clusters create [CLUSTER_NAME] \
+                --disk-type [DISK_TYPE] \
+                --boot-disk-kms-key \
+                projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]
+
+            FOR ATTACHED DISKS:
+            Follow the instructions detailed at https://cloud.google.com/kubernetes-
+            engine/docs/how-to/using-cmek.
+        scored: false
+
+  - id: 5.10
+    text: "Other Cluster Configurations"
+    checks:
+      - id: 5.10.1
+        text: "Ensure Kubernetes Web UI is Disabled (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To disable the Kubernetes Dashboard on an existing cluster, run the following command:
+
+              gcloud container clusters update [CLUSTER_NAME] \
+                --zone [ZONE] \
+                --update-addons=KubernetesDashboard=DISABLED
+        scored: false
+
+      - id: 5.10.2
+        text: "Ensure that Alpha clusters are not used for production workloads (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Upon creating a new cluster
+
+              gcloud container clusters create [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE]
+
+            Do not use the --enable-kubernetes-alpha argument.
+        scored: false
+
+      - id: 5.10.3
+        text: "Ensure Pod Security Policy is Enabled and set as appropriate (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable Pod Security Policy for an existing cluster, run the following command:
+
+              gcloud beta container clusters update [CLUSTER_NAME] \
+                --zone [COMPUTE_ZONE] \
+                --enable-pod-security-policy
+        scored: false
+
+      - id: 5.10.4
+        text: "Consider GKE Sandbox for running untrusted workloads (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            To enable GKE Sandbox on an existing cluster, a new Node pool must be created.
+
+              gcloud container node-pools create [NODE_POOL_NAME] \
+                --zone=[COMPUTE-ZONE] \
+                --cluster=[CLUSTER_NAME] \
+                --image-type=cos_containerd \
+                --sandbox type=gvisor
+        scored: false
+
+      - id: 5.10.5
+        text: "Ensure use of Binary Authorization (Automated)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Firstly, update the cluster to enable Binary Authorization:
+
+              gcloud container cluster update [CLUSTER_NAME] \
+                --zone [COMPUTE-ZONE] \
+                --enable-binauthz
+
+            Create a Binary Authorization Policy using the Binary Authorization Policy Reference
+            (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for
+            guidance.
+
+            Import the policy file into Binary Authorization:
+
+              gcloud container binauthz policy import [YAML_POLICY]
+        scored: false
+
+      - id: 5.10.6
+        text: "Enable Cloud Security Command Center (Cloud SCC) (Manual)"
+        type: "manual"
+        remediation: |
+          Using Command Line:
+            Follow the instructions at https://cloud.google.com/security-command-
+            center/docs/quickstart-scc-setup.
+        scored: false
diff --git a/cfg/gke-1.2.0/master.yaml b/cfg/gke-1.2.0/master.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dddd5fc724520b556594e606e497a566b7baf5f5
--- /dev/null
+++ b/cfg/gke-1.2.0/master.yaml
@@ -0,0 +1,6 @@
+---
+controls:
+version: "gke-1.2.0"
+id: 1
+text: "Control Plane Components"
+type: "master"
diff --git a/cfg/gke-1.2.0/node.yaml b/cfg/gke-1.2.0/node.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4d811a8dbf374b697845c89a25a67827cacc7f74
--- /dev/null
+++ b/cfg/gke-1.2.0/node.yaml
@@ -0,0 +1,334 @@
+---
+controls:
+version: "gke-1.2.0"
+id: 3
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+  - id: 3.1
+    text: "Worker Node Configuration Files"
+    checks:
+      - id: 3.1.1
+        text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Manual)"
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          Run the below command (based on the file location on your system) on each worker node.
+          For example,
+          chmod 644 $proxykubeconfig
+        scored: false
+
+      - id: 3.1.2
+        text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)"
+        audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          Run the below command (based on the file location on your system) on each worker node.
+          For example, chown root:root $proxykubeconfig
+        scored: false
+
+      - id: 3.1.3
+        text: "Ensure that the kubelet configuration file permissions are set to 644 or more restrictive (Manual)"
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: "permissions"
+              compare:
+                op: bitmask
+                value: "644"
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chmod 644 /var/lib/kubelet/config.yaml
+        scored: false
+
+      - id: 3.1.4
+        text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)"
+        audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+        tests:
+          test_items:
+            - flag: root:root
+        remediation: |
+          Run the following command (using the config file location identied in the Audit step)
+          chown root:root /etc/kubernetes/kubelet.conf
+        scored: false
+
+  - id: 3.2
+    text: "Kubelet"
+    checks:
+      - id: 3.2.1
+        text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--anonymous-auth"
+              path: '{.authentication.anonymous.enabled}'
+              compare:
+                op: eq
+                value: false
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
+          false.
+          If using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --anonymous-auth=false
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.2
+        text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --authorization-mode
+              path: '{.authorization.mode}'
+              compare:
+                op: nothave
+                value: AlwaysAllow
+        remediation: |
+          If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If
+          using executable arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --authorization-mode=Webhook
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.3
+        text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --client-ca-file
+              path: '{.authentication.x509.clientCAFile}'
+              set: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
+          the location of the client CA file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_AUTHZ_ARGS variable.
+          --client-ca-file=<path/to/client-ca-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.4
+        text: "Ensure that the --read-only-port argument is set to 0 (Manual)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: "--read-only-port"
+              path: '{.readOnlyPort}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set readOnlyPort to 0.
+          If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+          on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --read-only-port=0
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 3.2.5
+        text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              compare:
+                op: noteq
+                value: 0
+            - flag: --streaming-connection-idle-timeout
+              path: '{.streamingConnectionIdleTimeout}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
+          value other than 0.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --streaming-connection-idle-timeout=5m
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.6
+        text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --protect-kernel-defaults
+              path: '{.protectKernelDefaults}'
+              set: true
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          If using a Kubelet config file, edit the file to set protectKernelDefaults: true.
+          If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+          on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          --protect-kernel-defaults=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.7
+        text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) "
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              compare:
+                op: eq
+                value: true
+            - flag: --make-iptables-util-chains
+              path: '{.makeIPTablesUtilChains}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove the --make-iptables-util-chains argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.8
+        text: "Ensure that the --hostname-override argument is not set (Manual)"
+        audit: "/bin/ps -fC $kubeletbin "
+        tests:
+          test_items:
+            - flag: --hostname-override
+              set: false
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and remove the --hostname-override argument from the
+          KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 3.2.9
+        text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --event-qps
+              path: '{.eventRecordQPS}'
+              set: true
+              compare:
+                op: eq
+                value: 0
+        remediation: |
+          If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
+          If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+          on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
+
+      - id: 3.2.10
+        text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --tls-cert-file
+              path: '{.tlsCertFile}'
+            - flag: --tls-private-key-file
+              path: '{.tlsPrivateKeyFile}'
+        remediation: |
+          If using a Kubelet config file, edit the file to set tlsCertFile to the location
+          of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile
+          to the location of the corresponding private key file.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
+          --tls-cert-file=<path/to/tls-certificate-file>
+          --tls-private-key-file=<path/to/tls-key-file>
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 3.2.11
+        text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              compare:
+                op: eq
+                value: true
+            - flag: --rotate-certificates
+              path: '{.rotateCertificates}'
+              set: false
+          bin_op: or
+        remediation: |
+          If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
+          remove it altogether to use the default value.
+          If using command line arguments, edit the kubelet service file
+          $kubeletsvc on each worker node and
+          remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
+          variable.
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: false
+
+      - id: 3.2.12
+        text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
+        audit: "/bin/ps -fC $kubeletbin"
+        audit_config: "/bin/cat $kubeletconf"
+        tests:
+          test_items:
+            - flag: RotateKubeletServerCertificate
+              path: '{.featureGates.RotateKubeletServerCertificate}'
+              compare:
+                op: eq
+                value: true
+        remediation: |
+          Edit the kubelet service file $kubeletsvc
+          on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
+          --feature-gates=RotateKubeletServerCertificate=true
+          Based on your system, restart the kubelet service. For example:
+          systemctl daemon-reload
+          systemctl restart kubelet.service
+        scored: true
diff --git a/cfg/gke-1.2.0/policies.yaml b/cfg/gke-1.2.0/policies.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e1261c0e390770d87fc7111636e4cd76aef28067
--- /dev/null
+++ b/cfg/gke-1.2.0/policies.yaml
@@ -0,0 +1,239 @@
+---
+controls:
+version: "gke-1.2.0"
+id: 4
+text: "Kubernetes Policies"
+type: "policies"
+groups:
+  - id: 4.1
+    text: "RBAC and Service Accounts"
+    checks:
+      - id: 4.1.1
+        text: "Ensure that the cluster-admin role is only used where required (Manual)"
+        type: "manual"
+        remediation: |
+          Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
+          if they need this role or if they could use a role with fewer privileges.
+          Where possible, first bind users to a lower privileged role and then remove the
+          clusterrolebinding to the cluster-admin role :
+          kubectl delete clusterrolebinding [name]
+        scored: false
+
+      - id: 4.1.2
+        text: "Minimize access to secrets (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible, remove get, list and watch access to secret objects in the cluster.
+        scored: false
+
+      - id: 4.1.3
+        text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
+        type: "manual"
+        remediation: |
+          Where possible replace any use of wildcards in clusterroles and roles with specific
+          objects or actions.
+        scored: false
+
+      - id: 4.1.4
+        text: "Minimize access to create pods (Manual)"
+        type: "manual"
+        Remediation: |
+          Where possible, remove create access to pod objects in the cluster.
+        scored: false
+
+      - id: 4.1.5
+        text: "Ensure that default service accounts are not actively used. (Manual)"
+        type: "manual"
+        remediation: |
+          Create explicit service accounts wherever a Kubernetes workload requires specific access
+          to the Kubernetes API server.
+          Modify the configuration of each default service account to include this value
+          automountServiceAccountToken: false
+        scored: true
+
+      - id: 4.1.6
+        text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
+        type: "manual"
+        remediation: |
+          Modify the definition of pods and service accounts which do not need to mount service
+          account tokens to disable it.
+        scored: false
+
+  - id: 4.2
+    text: "Pod Security Policies"
+    checks:
+      - id: 4.2.1
+        text: "Minimize the admission of privileged containers (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that
+          the .spec.privileged field is omitted or set to false.
+        scored: false
+
+      - id: 4.2.2
+        text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostPID field is omitted or set to false.
+        scored: false
+
+      - id: 4.2.3
+        text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostIPC field is omitted or set to false.
+        scored: false
+
+      - id: 4.2.4
+        text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.hostNetwork field is omitted or set to false.
+        scored: false
+
+      - id: 4.2.5
+        text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.allowPrivilegeEscalation field is omitted or set to false.
+        scored: false
+
+      - id: 4.2.6
+        text: "Minimize the admission of root containers (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
+          UIDs not including 0.
+        scored: false
+
+      - id: 4.2.7
+        text: "Minimize the admission of containers with the NET_RAW capability (Automated)"
+        type: "manual"
+        remediation: |
+          Create a PSP as described in the Kubernetes documentation, ensuring that the
+          .spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
+        scored: false
+
+      - id: 4.2.8
+        text: "Minimize the admission of containers with added capabilities (Automated)"
+        type: "manual"
+        remediation: |
+          Ensure that allowedCapabilities is not present in PSPs for the cluster unless
+          it is set to an empty array.
+        scored: false
+
+      - id: 4.2.9
+        text: "Minimize the admission of containers with capabilities assigned (Manual) "
+        type: "manual"
+        remediation: |
+          Review the use of capabilites in applications running on your cluster. Where a namespace
+          contains applications which do not require any Linux capabities to operate consider adding
+          a PSP which forbids the admission of containers which do not drop all capabilities.
+        scored: false
+
+  - id: 4.3
+    text: "Network Policies and CNI"
+    checks:
+      - id: 4.3.1
+        text: "Ensure that the CNI in use supports Network Policies (Manual)"
+        type: "manual"
+        remediation: |
+          To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin
+          will be updated. See Recommendation 6.6.7.
+        scored: false
+
+      - id: 4.3.2
+        text: "Ensure that all Namespaces have Network Policies defined (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create NetworkPolicy objects as you need them.
+        scored: false
+
+  - id: 4.4
+    text: "Secrets Management"
+    checks:
+      - id: 4.4.1
+        text: "Prefer using secrets as files over secrets as environment variables (Manual)"
+        type: "manual"
+        remediation: |
+          if possible, rewrite application code to read secrets from mounted secret files, rather than
+          from environment variables.
+        scored: false
+
+      - id: 4.4.2
+        text: "Consider external secret storage (Manual)"
+        type: "manual"
+        remediation: |
+          Refer to the secrets management options offered by your cloud provider or a third-party
+          secrets management solution.
+        scored: false
+
+  - id: 4.5
+    text: "Extensible Admission Control"
+    checks:
+      - id: 4.5.1
+        text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and setup image provenance.
+          See also Recommendation 6.10.5 for GKE specifically.
+        scored: false
+
+  - id: 4.6
+    text: "General Policies"
+    checks:
+      - id: 4.6.1
+        text: "Create administrative boundaries between resources using namespaces (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the documentation and create namespaces for objects in your deployment as you need
+          them.
+        scored: false
+
+      - id: 4.6.2
+        text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
+        type: "manual"
+        remediation: |
+          Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
+          would need to enable alpha features in the apiserver by passing "--feature-
+          gates=AllAlpha=true" argument.
+          Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
+          parameter to "--feature-gates=AllAlpha=true"
+          KUBE_API_ARGS="--feature-gates=AllAlpha=true"
+          Based on your system, restart the kube-apiserver service. For example:
+          systemctl restart kube-apiserver.service
+          Use annotations to enable the docker/default seccomp profile in your pod definitions. An
+          example is as below:
+          apiVersion: v1
+          kind: Pod
+          metadata:
+            name: trustworthy-pod
+            annotations:
+              seccomp.security.alpha.kubernetes.io/pod: docker/default
+          spec:
+            containers:
+              - name: trustworthy-container
+                image: sotrustworthy:latest
+        scored: false
+
+      - id: 4.6.3
+        text: "Apply Security Context to Your Pods and Containers (Manual)"
+        type: "manual"
+        remediation: |
+          Follow the Kubernetes documentation and apply security contexts to your pods. For a
+          suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
+          Containers.
+        scored: false
+
+      - id: 4.6.4
+        text: "The default namespace should not be used (Manual)"
+        type: "manual"
+        remediation: |
+          Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+          resources and that all new resources are created in a specific namespace.
+        scored: false
diff --git a/cmd/common_test.go b/cmd/common_test.go
index 88b9dd676d9e14954135d04bc58aa1dc10672dc6..e4249880d175495eed50f87bfc69167dbbe70d07 100644
--- a/cmd/common_test.go
+++ b/cmd/common_test.go
@@ -240,7 +240,7 @@ func TestMapToCISVersion(t *testing.T) {
 		{kubeVersion: "1.19", succeed: true, exp: "cis-1.20"},
 		{kubeVersion: "1.20", succeed: true, exp: "cis-1.20"},
 		{kubeVersion: "1.21", succeed: true, exp: "cis-1.20"},
-		{kubeVersion: "gke-1.0", succeed: true, exp: "gke-1.0"},
+		{kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"},
 		{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
 		{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
 		{kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"},
@@ -366,7 +366,7 @@ func TestGetBenchmarkVersion(t *testing.T) {
 		{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
 		{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
 		{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
-		{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
+		{n: "gke12", kubeVersion: "gke-1.2.0", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "gke-1.2.0", callFn: withNoPath, succeed: true},
 	}
 	for _, c := range cases {
 		rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.platformName, c.v, getBenchmarkVersion)
@@ -426,9 +426,9 @@ func TestValidTargets(t *testing.T) {
 			expected:  true,
 		},
 		{
-			name:      "gke-1.0 valid",
-			benchmark: "gke-1.0",
-			targets:   []string{"master", "node", "controlplane", "etcd", "policies", "managedservices"},
+			name:      "gke-1.2.0 valid",
+			benchmark: "gke-1.2.0",
+			targets:   []string{"master", "node", "controlplane", "policies", "managedservices"},
 			expected:  true,
 		},
 		{
diff --git a/cmd/util.go b/cmd/util.go
index 732d083e34cf6c3b8ecf9e605179363e87ef1748..20043714006b11e3f294b18ddfab19bc04241933 100644
--- a/cmd/util.go
+++ b/cmd/util.go
@@ -457,7 +457,8 @@ func getPlatformBenchmarkVersion(platform string) string {
 	case "eks":
 		return "eks-1.0.1"
 	case "gke":
-		return "gke-1.0"
+		// TODO: support check kubeVersion
+		return "gke-1.2.0"
 	case "aliyun":
 		return "ack-1.0"
 	case "ocp-3.10":
diff --git a/cmd/util_test.go b/cmd/util_test.go
index f125eb5df283de5ce488e206c8353b0ce12cae14..45b4c24a377356618d01c13319f34c53a522cec9 100644
--- a/cmd/util_test.go
+++ b/cmd/util_test.go
@@ -585,7 +585,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
 			args: args{
 				platform: "gke",
 			},
-			want: "gke-1.0",
+			want: "gke-1.2.0",
 		},
 		{
 			name: "aliyun",
diff --git a/docs/architecture.md b/docs/architecture.md
index b74e00a22c21e7d829a946d324aa010e473f2b7c..b2a48f485f43fa0b85a4edb56a11b369d53720c9 100644
--- a/docs/architecture.md
+++ b/docs/architecture.md
@@ -19,6 +19,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
 | cis-1.6| master, controlplane, node, etcd, policies |
 |cis-1.20| master, controlplane, node, etcd, policies |
 | gke-1.0| master, controlplane, node, etcd, policies, managedservices |
+| gke-1.2.0| controlplane, node, policies, managedservices |
 | eks-1.0.1| controlplane, node, policies, managedservices |
 | ack-1.0| master, controlplane, node, etcd, policies, managedservices |
 | aks-1.0| controlplane, node, policies, managedservices |
diff --git a/docs/platforms.md b/docs/platforms.md
index ce6019b8ee6c738a221a160c715831eb09a27133..cf0477471773bbd2d80e8ea95701cd53e30af763 100644
--- a/docs/platforms.md
+++ b/docs/platforms.md
@@ -11,6 +11,7 @@ Some defined by other hardenening guides.
 | CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 |
 | CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.20 |
 | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE |
+| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
 | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS |
 | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK |
 | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS |
diff --git a/docs/running.md b/docs/running.md
index 265209e9cfc3ef7b0cfa78db602a633118b1609e..0c7889f04c2bc3f1c175014a9b6f54ac7c8c61d4 100644
--- a/docs/running.md
+++ b/docs/running.md
@@ -120,8 +120,9 @@ kube-bench includes a set of test files for Red Hat's OpenShift hardening guide
 | CIS Benchmark | Targets |
 |---|---|
 | gke-1.0| master, controlplane, node, etcd, policies, managedservices |
+| gke-1.2.0| master, controlplane, node, policies, managedservices |
 
-kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` when you run the `kube-bench` command.
+kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` or `--benchmark gke-1.2.0` when you run the `kube-bench` command.
 
 To run the benchmark as a job in your GKE cluster apply the included `job-gke.yaml`.
 
diff --git a/job-gke.yaml b/job-gke.yaml
index 8a1b6f03db054fb63b77f48ad5f6fe0c2fd35827..2e6d1e956d69fc15589345934c83fe6876b01fe4 100644
--- a/job-gke.yaml
+++ b/job-gke.yaml
@@ -10,7 +10,7 @@ spec:
       containers:
         - name: kube-bench
           image: aquasec/kube-bench:latest
-          command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "gke-1.0"]
+          command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "gke-1.2.0"]
           volumeMounts:
             - name: var-lib-kubelet
               mountPath: /var/lib/kubelet