diff --git a/README.md b/README.md
index 4b67ad11ef0a5bf23dee3f0f9afe3acbc90987d1..eff1e4e9b885477287994fc08b77529f6b275b89 100644
--- a/README.md
+++ b/README.md
@@ -53,6 +53,7 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m
     - [Stripping container resource limits](#stripping-container-resource-limits)
     - [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
     - [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
+    - [Setting up a blackbox exporter](#setting-up-a-blackbox exporter)
   - [Minikube Example](#minikube-example)
   - [Troubleshooting](#troubleshooting)
     - [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
@@ -223,6 +224,7 @@ local kp =
 // serviceMonitor is separated so that it can be created after the CRDs are ready
 { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
 { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
 { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
 { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
 { ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
@@ -729,6 +731,36 @@ See [developing Prometheus rules and Grafana dashboards](docs/developing-prometh
 
 See [exposing Prometheus/Alertmanager/Grafana](docs/exposing-prometheus-alertmanager-grafana-ingress.md) guide.
 
+### Setting up a blackbox exporter
+
+```jsonnet
+local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
+           // ... all necessary mixins ...
+  {
+    _config+:: {
+      // ... configuration for other features ...
+      blackboxExporter+:: {
+        modules+:: {
+          tls_connect: {
+            prober: 'tcp',
+            tcp: {
+              tls: true
+            }
+          }
+        }
+      }
+    }
+  };
+
+{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
+// ... other rendering blocks ...
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
+```
+
+Then describe the actual blackbox checks you want to run using `Probe` resources. Specify `blackbox-exporter.<namespace>.svc.cluster.local:9115` as the `spec.prober.url` field of the `Probe` resource.
+
+See the [blackbox exporter guide](docs/blackbox-exporter.md) for the list of configurable options and a complete example.
+
 ## Minikube Example
 
 To use an easy to reproduce example, see [minikube.jsonnet](examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services.
diff --git a/docs/blackbox-exporter.md b/docs/blackbox-exporter.md
new file mode 100644
index 0000000000000000000000000000000000000000..9136944d17b33c0c58d8a16c12aa61cf81d583fc
--- /dev/null
+++ b/docs/blackbox-exporter.md
@@ -0,0 +1,82 @@
+# Setting up a blackbox exporter
+
+The `prometheus-operator` defines a `Probe` resource type that can be used to describe blackbox checks. To execute these, a separate component called [`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter) has to be deployed, which can be scraped to retrieve the results of these checks. You can use `kube-prometheus` to set up such a blackbox exporter within your Kubernetes cluster.
+
+## Adding blackbox exporter manifests to an existing `kube-prometheus` configuration
+
+1. Override blackbox-related configuration parameters as needed.
+2. Add the following to the list of renderers to render the blackbox exporter manifests:
+```
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
+```
+
+## Configuration parameters influencing the blackbox exporter
+
+* `_config.namespace`: the namespace where the various generated resources (`ConfigMap`, `Deployment`, `Service`, `ServiceAccount` and `ServiceMonitor`) will reside. This does not affect where you can place `Probe` objects; that is determined by the configuration of the `Prometheus` resource. This option is shared with other `kube-prometheus` components; defaults to `default`.
+* `_config.imageRepos.blackboxExporter`: the name of the blackbox exporter image to deploy. Defaults to `quay.io/prometheus/blackbox-exporter`.
+* `_config.versions.blackboxExporter`: the tag of the blackbox exporter image to deploy. Defaults to the version `kube-prometheus` was tested with.
+* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `jimmidyson/configmap-reload`.
+* `_config.versions.configmapReloader`: the tag of the ConfigMap reloader image to deploy. Defaults to the version `kube-prometheus` was tested with.
+* `_config.resources.blackbox-exporter.requests`: the requested resources; this is used for each container. Defaults to `10m` CPU and `20Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
+* `_config.resources.blackbox-exporter.limits`: the resource limits; this is used for each container. Defaults to `20m` CPU and `40Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
+* `_config.blackboxExporter.port`: the exposed HTTPS port of the exporter. This is where Prometheus should send the probe requests. Defaults to `9115`.
+* `_config.blackboxExporter.internalPort`: the internal plaintext port of the exporter. Not accessible from outside the pod. Defaults to `19115`.
+* `_config.blackboxExporter.replicas`: the number of exporter replicas to be deployed. Defaults to `1`.
+* `_config.blackboxExporter.matchLabels`: map of the labels to be used to select resources belonging to the instance deployed. Defaults to `{ 'app.kubernetes.io/name': 'blackbox-exporter' }`
+* `_config.blackboxExporter.assignLabels`: map of the labels applied to components of the instance deployed. Defaults to all the labels included in the `matchLabels` option, and additionally `app.kubernetes.io/version` is set to the version of the blackbox exporter.
+* `_config.blackboxExporter.modules`: the modules available in the blackbox exporter installation, i.e. the types of checks it can perform. The default value includes most of the modules defined in the default blackbox exporter configuration: `http_2xx`, `http_post_2xx`, `tcp_connect`, `pop3s_banner`, `ssh_banner`, and `irc_banner`. `icmp` is omitted so the exporter can be run with minimum privileges, but you can add it back if needed - see the example below. See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for the configuration format, except you have to use JSON instead of YAML here.
+* `_config.blackboxExporter.privileged`: whether the `blackbox-exporter` container should be running as non-root (`false`) or root with heavily-restricted capability set (`true`). Defaults to `true` if you have any ICMP modules defined (which need the extra permissions) and `false` otherwise.
+
+## Complete example
+
+```jsonnet
+local kp =
+  (import 'kube-prometheus/kube-prometheus.libsonnet') +
+  {
+    _config+:: {
+      namespace: 'monitoring',
+      blackboxExporter+:: {
+        modules+:: {
+          icmp: {
+            prober: 'icmp',
+          },
+        },
+      },
+    },
+  };
+
+{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
+{
+  ['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
+  for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
+} +
+// serviceMonitor is separated so that it can be created after the CRDs are ready
+{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
+{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
+{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
+{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
+{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
+{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
+```
+
+After installing the generated manifests, you can create `Probe` resources, for example:
+
+```yaml
+kind: Probe
+apiVersion: monitoring.coreos.com/v1
+metadata:
+  name: example-com-website
+  namespace: monitoring
+spec:
+  interval: 60s
+  module: http_2xx
+  prober:
+    url: blackbox-exporter.monitoring.svc.cluster.local:9115
+  targets:
+    staticConfig:
+      static:
+      - http://example.com
+      - https://example.com
+```
diff --git a/docs/developing-prometheus-rules-and-grafana-dashboards.md b/docs/developing-prometheus-rules-and-grafana-dashboards.md
index ee7be4e0dd2d641caa4f0a535d6c81c0bcc7f7b7..f9decdcd5563740b333b37f64bdce9d873496edc 100644
--- a/docs/developing-prometheus-rules-and-grafana-dashboards.md
+++ b/docs/developing-prometheus-rules-and-grafana-dashboards.md
@@ -34,6 +34,7 @@ local kp =
 // serviceMonitor is separated so that it can be created after the CRDs are ready
 { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
 { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
 { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
 { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
 { ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
diff --git a/example.jsonnet b/example.jsonnet
index 15a801f80100cc7eb77161059bbd390dbb66d758..a459460db2451ce5548f093b9b01e3fb92a1dbf3 100644
--- a/example.jsonnet
+++ b/example.jsonnet
@@ -22,6 +22,7 @@ local kp =
 // serviceMonitor is separated so that it can be created after the CRDs are ready
 { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
 { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
 { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
 { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
 { ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
diff --git a/examples/kustomize.jsonnet b/examples/kustomize.jsonnet
index 38dd6c89d66559beff0c949545c2f1e5b40cccbf..7b1cf6a2e74e1147cb9454bc4b6be1cbbdda8223 100644
--- a/examples/kustomize.jsonnet
+++ b/examples/kustomize.jsonnet
@@ -16,6 +16,7 @@ local manifests =
   // serviceMonitor is separated so that it can be created after the CRDs are ready
   { 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
   { ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+  { ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
   { ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
   { ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
   { ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
diff --git a/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet b/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet
new file mode 100644
index 0000000000000000000000000000000000000000..8bd08e12ee870c96a8bab8b28e37e7a34007888a
--- /dev/null
+++ b/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet
@@ -0,0 +1,275 @@
+local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet';
+
+{
+  _config+:: {
+    namespace: 'default',
+
+    versions+:: {
+      blackboxExporter: 'v0.18.0',
+      configmapReloader: 'v0.4.0',
+    },
+
+    imageRepos+:: {
+      blackboxExporter: 'quay.io/prometheus/blackbox-exporter',
+      configmapReloader: 'jimmidyson/configmap-reload',
+    },
+
+    resources+:: {
+      'blackbox-exporter': {
+        requests: { cpu: '10m', memory: '20Mi' },
+        limits: { cpu: '20m', memory: '40Mi' },
+      },
+    },
+
+    blackboxExporter: {
+      port: 9115,
+      internalPort: 19115,
+      replicas: 1,
+      matchLabels: {
+        'app.kubernetes.io/name': 'blackbox-exporter',
+      },
+      assignLabels: self.matchLabels {
+        'app.kubernetes.io/version': $._config.versions.blackboxExporter,
+      },
+      modules: {
+        http_2xx: {
+          prober: 'http',
+        },
+        http_post_2xx: {
+          prober: 'http',
+          http: {
+            method: 'POST',
+          },
+        },
+        tcp_connect: {
+          prober: 'tcp',
+        },
+        pop3s_banner: {
+          prober: 'tcp',
+          tcp: {
+            query_response: [
+              { expect: '^+OK' },
+            ],
+            tls: true,
+            tls_config: {
+              insecure_skip_verify: false,
+            },
+          },
+        },
+        ssh_banner: {
+          prober: 'tcp',
+          tcp: {
+            query_response: [
+              { expect: '^SSH-2.0-' },
+            ],
+          },
+        },
+        irc_banner: {
+          prober: 'tcp',
+          tcp: {
+            query_response: [
+              { send: 'NICK prober' },
+              { send: 'USER prober prober prober :prober' },
+              { expect: 'PING :([^ ]+)', send: 'PONG ${1}' },
+              { expect: '^:[^ ]+ 001' },
+            ],
+          },
+        },
+      },
+      privileged:
+        local icmpModules = [self.modules[m] for m in std.objectFields(self.modules) if self.modules[m].prober == 'icmp'];
+        std.length(icmpModules) > 0,
+    },
+  },
+
+  blackboxExporter+::
+    local bb = $._config.blackboxExporter;
+    {
+      configuration: {
+        apiVersion: 'v1',
+        kind: 'ConfigMap',
+        metadata: {
+          name: 'blackbox-exporter-configuration',
+          namespace: $._config.namespace,
+        },
+        data: {
+          'config.yml': std.manifestYamlDoc({ modules: bb.modules }),
+        },
+      },
+
+      serviceAccount: {
+        apiVersion: 'v1',
+        kind: 'ServiceAccount',
+        metadata: {
+          name: 'blackbox-exporter',
+          namespace: $._config.namespace,
+        },
+      },
+
+      clusterRole: {
+        apiVersion: 'rbac.authorization.k8s.io/v1',
+        kind: 'ClusterRole',
+        metadata: {
+          name: 'blackbox-exporter',
+        },
+        rules: [
+          {
+            apiGroups: ['authentication.k8s.io'],
+            resources: ['tokenreviews'],
+            verbs: ['create'],
+          },
+          {
+            apiGroups: ['authorization.k8s.io'],
+            resources: ['subjectaccessreviews'],
+            verbs: ['create'],
+          },
+        ],
+      },
+
+      clusterRoleBinding: {
+        apiVersion: 'rbac.authorization.k8s.io/v1',
+        kind: 'ClusterRoleBinding',
+        metadata: {
+          name: 'blackbox-exporter',
+        },
+        roleRef: {
+          apiGroup: 'rbac.authorization.k8s.io',
+          kind: 'ClusterRole',
+          name: 'blackbox-exporter',
+        },
+        subjects: [{
+          kind: 'ServiceAccount',
+          name: 'blackbox-exporter',
+          namespace: $._config.namespace,
+        }],
+      },
+
+      deployment: {
+        apiVersion: 'apps/v1',
+        kind: 'Deployment',
+        metadata: {
+          name: 'blackbox-exporter',
+          namespace: $._config.namespace,
+          labels: bb.assignLabels,
+        },
+        spec: {
+          replicas: bb.replicas,
+          selector: { matchLabels: bb.matchLabels },
+          template: {
+            metadata: { labels: bb.assignLabels },
+            spec: {
+              containers: [
+                {
+                  name: 'blackbox-exporter',
+                  image: $._config.imageRepos.blackboxExporter + ':' + $._config.versions.blackboxExporter,
+                  args: [
+                    '--config.file=/etc/blackbox_exporter/config.yml',
+                    '--web.listen-address=:%d' % bb.internalPort,
+                  ],
+                  ports: [{
+                    name: 'http',
+                    containerPort: bb.internalPort,
+                  }],
+                  resources: {
+                    requests: $._config.resources['blackbox-exporter'].requests,
+                    limits: $._config.resources['blackbox-exporter'].limits,
+                  },
+                  securityContext: if bb.privileged then {
+                    runAsNonRoot: false,
+                    capabilities: { drop: ['ALL'], add: ['NET_RAW'] },
+                  } else {
+                    runAsNonRoot: true,
+                    runAsUser: 65534,
+                  },
+                  volumeMounts: [{
+                    mountPath: '/etc/blackbox_exporter/',
+                    name: 'config',
+                    readOnly: true,
+                  }],
+                },
+                {
+                  name: 'module-configmap-reloader',
+                  image: $._config.imageRepos.configmapReloader + ':' + $._config.versions.configmapReloader,
+                  args: [
+                    '--webhook-url=http://localhost:%d/-/reload' % bb.internalPort,
+                    '--volume-dir=/etc/blackbox_exporter/',
+                  ],
+                  resources: {
+                    requests: $._config.resources['blackbox-exporter'].requests,
+                    limits: $._config.resources['blackbox-exporter'].limits,
+                  },
+                  securityContext: { runAsNonRoot: true, runAsUser: 65534 },
+                  terminationMessagePath: '/dev/termination-log',
+                  terminationMessagePolicy: 'FallbackToLogsOnError',
+                  volumeMounts: [{
+                    mountPath: '/etc/blackbox_exporter/',
+                    name: 'config',
+                    readOnly: true,
+                  }],
+                },
+              ],
+              nodeSelector: { 'kubernetes.io/os': 'linux' },
+              serviceAccountName: 'blackbox-exporter',
+              volumes: [{
+                name: 'config',
+                configMap: { name: 'blackbox-exporter-configuration' },
+              }],
+            },
+          },
+        },
+      },
+
+      service: {
+        apiVersion: 'v1',
+        kind: 'Service',
+        metadata: {
+          name: 'blackbox-exporter',
+          namespace: $._config.namespace,
+          labels: bb.assignLabels,
+        },
+        spec: {
+          ports: [{ name: 'http', port: bb.port, targetPort: 'https' }],
+          selector: bb.matchLabels,
+        },
+      },
+
+      serviceMonitor:
+        {
+          apiVersion: 'monitoring.coreos.com/v1',
+          kind: 'ServiceMonitor',
+          metadata: {
+            name: 'blackbox-exporter',
+            namespace: $._config.namespace,
+            labels: bb.assignLabels,
+          },
+          spec: {
+            endpoints: [{
+              bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
+              interval: '30s',
+              path: '/metrics',
+              port: 'http',
+              scheme: 'https',
+              tlsConfig: {
+                insecureSkipVerify: true,
+              },
+            }],
+            selector: {
+              matchLabels: bb.matchLabels,
+            },
+          },
+        },
+    } +
+    (kubeRbacProxyContainer {
+       config+:: {
+         kubeRbacProxy: {
+           image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
+           name: 'kube-rbac-proxy',
+           securePortName: 'https',
+           securePort: bb.port,
+           secureListenAddress: ':%d' % self.securePort,
+           upstream: 'http://127.0.0.1:%d/' % bb.internalPort,
+           tlsCipherSuites: $._config.tlsCipherSuites,
+         },
+       },
+     }).deploymentMixin,
+}
diff --git a/jsonnet/kube-prometheus/kube-prometheus.libsonnet b/jsonnet/kube-prometheus/kube-prometheus.libsonnet
index 3bfd47688a29a2f1913c48218416e2452e476622..38bb061ecfdd61240cf660c9f67c0acf3ad559e6 100644
--- a/jsonnet/kube-prometheus/kube-prometheus.libsonnet
+++ b/jsonnet/kube-prometheus/kube-prometheus.libsonnet
@@ -5,6 +5,7 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
 (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
 (import './node-exporter/node-exporter.libsonnet') +
 (import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
+(import './blackbox-exporter/blackbox-exporter.libsonnet') +
 (import './alertmanager/alertmanager.libsonnet') +
 (import 'github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet') +
 (import 'github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheus-operator.libsonnet') +
diff --git a/kustomization.yaml b/kustomization.yaml
index b067b22f5d0aada60a2eab82bf2baa581d408c6f..7066018a01560f7e1e5b940a699fe58b39b5fbc9 100644
--- a/kustomization.yaml
+++ b/kustomization.yaml
@@ -6,6 +6,13 @@ resources:
 - ./manifests/alertmanager-service.yaml
 - ./manifests/alertmanager-serviceAccount.yaml
 - ./manifests/alertmanager-serviceMonitor.yaml
+- ./manifests/blackbox-exporter-clusterRole.yaml
+- ./manifests/blackbox-exporter-clusterRoleBinding.yaml
+- ./manifests/blackbox-exporter-configuration.yaml
+- ./manifests/blackbox-exporter-deployment.yaml
+- ./manifests/blackbox-exporter-service.yaml
+- ./manifests/blackbox-exporter-serviceAccount.yaml
+- ./manifests/blackbox-exporter-serviceMonitor.yaml
 - ./manifests/grafana-dashboardDatasources.yaml
 - ./manifests/grafana-dashboardDefinitions.yaml
 - ./manifests/grafana-dashboardSources.yaml
diff --git a/manifests/blackbox-exporter-clusterRole.yaml b/manifests/blackbox-exporter-clusterRole.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7824058e1b24902c6eaf01dd8b48e2bb213a523
--- /dev/null
+++ b/manifests/blackbox-exporter-clusterRole.yaml
@@ -0,0 +1,17 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: blackbox-exporter
+rules:
+- apiGroups:
+  - authentication.k8s.io
+  resources:
+  - tokenreviews
+  verbs:
+  - create
+- apiGroups:
+  - authorization.k8s.io
+  resources:
+  - subjectaccessreviews
+  verbs:
+  - create
diff --git a/manifests/blackbox-exporter-clusterRoleBinding.yaml b/manifests/blackbox-exporter-clusterRoleBinding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7b3ae320903f9916cd2ed4191139142db3eb1558
--- /dev/null
+++ b/manifests/blackbox-exporter-clusterRoleBinding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: blackbox-exporter
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: blackbox-exporter
+subjects:
+- kind: ServiceAccount
+  name: blackbox-exporter
+  namespace: monitoring
diff --git a/manifests/blackbox-exporter-configuration.yaml b/manifests/blackbox-exporter-configuration.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..497945ec80f646bcec55e625eefa8e78e64f982c
--- /dev/null
+++ b/manifests/blackbox-exporter-configuration.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+data:
+  config.yml: |-
+    "modules":
+      "http_2xx":
+        "prober": "http"
+      "http_post_2xx":
+        "http":
+          "method": "POST"
+        "prober": "http"
+      "irc_banner":
+        "prober": "tcp"
+        "tcp":
+          "query_response":
+          - "send": "NICK prober"
+          - "send": "USER prober prober prober :prober"
+          - "expect": "PING :([^ ]+)"
+            "send": "PONG ${1}"
+          - "expect": "^:[^ ]+ 001"
+      "pop3s_banner":
+        "prober": "tcp"
+        "tcp":
+          "query_response":
+          - "expect": "^+OK"
+          "tls": true
+          "tls_config":
+            "insecure_skip_verify": false
+      "ssh_banner":
+        "prober": "tcp"
+        "tcp":
+          "query_response":
+          - "expect": "^SSH-2.0-"
+      "tcp_connect":
+        "prober": "tcp"
+kind: ConfigMap
+metadata:
+  name: blackbox-exporter-configuration
+  namespace: monitoring
diff --git a/manifests/blackbox-exporter-deployment.yaml b/manifests/blackbox-exporter-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ca71dafbbcef53efb0767837a1b3a7adf754d875
--- /dev/null
+++ b/manifests/blackbox-exporter-deployment.yaml
@@ -0,0 +1,84 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app.kubernetes.io/name: blackbox-exporter
+    app.kubernetes.io/version: v0.18.0
+  name: blackbox-exporter
+  namespace: monitoring
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: blackbox-exporter
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: blackbox-exporter
+        app.kubernetes.io/version: v0.18.0
+    spec:
+      containers:
+      - args:
+        - --config.file=/etc/blackbox_exporter/config.yml
+        - --web.listen-address=:19115
+        image: quay.io/prometheus/blackbox-exporter:v0.18.0
+        name: blackbox-exporter
+        ports:
+        - containerPort: 19115
+          name: http
+        resources:
+          limits:
+            cpu: 20m
+            memory: 40Mi
+          requests:
+            cpu: 10m
+            memory: 20Mi
+        securityContext:
+          runAsNonRoot: true
+          runAsUser: 65534
+        volumeMounts:
+        - mountPath: /etc/blackbox_exporter/
+          name: config
+          readOnly: true
+      - args:
+        - --webhook-url=http://localhost:19115/-/reload
+        - --volume-dir=/etc/blackbox_exporter/
+        image: jimmidyson/configmap-reload:v0.4.0
+        name: module-configmap-reloader
+        resources:
+          limits:
+            cpu: 20m
+            memory: 40Mi
+          requests:
+            cpu: 10m
+            memory: 20Mi
+        securityContext:
+          runAsNonRoot: true
+          runAsUser: 65534
+        terminationMessagePath: /dev/termination-log
+        terminationMessagePolicy: FallbackToLogsOnError
+        volumeMounts:
+        - mountPath: /etc/blackbox_exporter/
+          name: config
+          readOnly: true
+      - args:
+        - --logtostderr
+        - --secure-listen-address=:9115
+        - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+        - --upstream=http://127.0.0.1:19115/
+        image: quay.io/brancz/kube-rbac-proxy:v0.8.0
+        name: kube-rbac-proxy
+        ports:
+        - containerPort: 9115
+          name: https
+        securityContext:
+          runAsGroup: 65532
+          runAsNonRoot: true
+          runAsUser: 65532
+      nodeSelector:
+        kubernetes.io/os: linux
+      serviceAccountName: blackbox-exporter
+      volumes:
+      - configMap:
+          name: blackbox-exporter-configuration
+        name: config
diff --git a/manifests/blackbox-exporter-service.yaml b/manifests/blackbox-exporter-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..587fff2bd20d04dc6eafa5fa8fddcb50eadbc012
--- /dev/null
+++ b/manifests/blackbox-exporter-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app.kubernetes.io/name: blackbox-exporter
+    app.kubernetes.io/version: v0.18.0
+  name: blackbox-exporter
+  namespace: monitoring
+spec:
+  ports:
+  - name: http
+    port: 9115
+    targetPort: https
+  selector:
+    app.kubernetes.io/name: blackbox-exporter
diff --git a/manifests/blackbox-exporter-serviceAccount.yaml b/manifests/blackbox-exporter-serviceAccount.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac2acefb2e3aa946f502a84628d47cb64217721a
--- /dev/null
+++ b/manifests/blackbox-exporter-serviceAccount.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: blackbox-exporter
+  namespace: monitoring
diff --git a/manifests/blackbox-exporter-serviceMonitor.yaml b/manifests/blackbox-exporter-serviceMonitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..81eec23d4175a2ca1a9e370f23fff5cee33fa547
--- /dev/null
+++ b/manifests/blackbox-exporter-serviceMonitor.yaml
@@ -0,0 +1,20 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app.kubernetes.io/name: blackbox-exporter
+    app.kubernetes.io/version: v0.18.0
+  name: blackbox-exporter
+  namespace: monitoring
+spec:
+  endpoints:
+  - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+    interval: 30s
+    path: /metrics
+    port: http
+    scheme: https
+    tlsConfig:
+      insecureSkipVerify: true
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: blackbox-exporter