diff --git a/README.md b/README.md
index eff1e4e9b885477287994fc08b77529f6b275b89..8e6d669404eb59a2a12d64aa70eb32b7c6429f2d 100644
--- a/README.md
+++ b/README.md
@@ -18,9 +18,14 @@ Components included in this package:
 
 This stack is meant for cluster monitoring, so it is pre-configured to collect metrics from all Kubernetes components. In addition to that it delivers a default set of dashboards and alerting rules. Many of the useful dashboards and alerts come from the [kubernetes-mixin project](https://github.com/kubernetes-monitoring/kubernetes-mixin), similar to this project it provides composable jsonnet as a library for users to customize to their needs.
 
+## Warning
+
+`master` branch is under heavy refactoring work. Please use `release-0.7` branch until code refactoring is complete and this information is removed.
+
 ## Table of contents
 
 - [kube-prometheus](#kube-prometheus)
+  - [Warning](#warning)
   - [Table of contents](#table-of-contents)
   - [Prerequisites](#prerequisites)
     - [minikube](#minikube)
@@ -53,8 +58,9 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m
     - [Stripping container resource limits](#stripping-container-resource-limits)
     - [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
     - [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
-    - [Setting up a blackbox exporter](#setting-up-a-blackbox exporter)
+    - [Setting up a blackbox exporter](#setting-up-a-blackbox-exporter)
   - [Minikube Example](#minikube-example)
+  - [Continuous Delivery](#continuous-delivery)
   - [Troubleshooting](#troubleshooting)
     - [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
       - [Authentication problem](#authentication-problem)
diff --git a/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet b/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet
index 38ef07eaa0526dc70e6ee4d0f70c261e7da8cbd3..9c76ff349e3857bd7544746ad9af39ff7f501b6c 100644
--- a/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet
+++ b/jsonnet/kube-prometheus/blackbox-exporter/blackbox-exporter.libsonnet
@@ -1,4 +1,4 @@
-local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet';
+local kubeRbacProxyContainer = import '../kube-rbac-proxy/containerMixin.libsonnet';
 
 {
   _config+:: {
diff --git a/jsonnet/kube-prometheus/kube-prometheus.libsonnet b/jsonnet/kube-prometheus/kube-prometheus.libsonnet
index 05c7326a4022ab01ea66dd2a4554c26e032f4136..daab5bcc5d62ef45549dd9c7924e1d66d8804d5d 100644
--- a/jsonnet/kube-prometheus/kube-prometheus.libsonnet
+++ b/jsonnet/kube-prometheus/kube-prometheus.libsonnet
@@ -1,9 +1,10 @@
-local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
+local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonnet';
+
+local nodeExporter = import './node-exporter/node-exporter.libsonnet';
 
 (import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') +
 (import './kube-state-metrics/kube-state-metrics.libsonnet') +
 (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
-(import './node-exporter/node-exporter.libsonnet') +
 (import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
 (import './blackbox-exporter/blackbox-exporter.libsonnet') +
 (import './alertmanager/alertmanager.libsonnet') +
@@ -17,6 +18,11 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
 (import './alerts/alerts.libsonnet') +
 (import './rules/rules.libsonnet') +
 {
+  nodeExporter: nodeExporter({
+    namespace: $._config.namespace,
+    version: '1.0.1',
+    image: 'quay.io/prometheus/node-exporter:v1.0.1',
+  }),
   kubePrometheus+:: {
     namespace: {
       apiVersion: 'v1',
@@ -84,7 +90,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
        },
      }).deploymentMixin,
 
-
   grafana+:: {
     local dashboardDefinitions = super.dashboardDefinitions,
 
@@ -197,10 +202,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
         requests: { cpu: '100m', memory: '150Mi' },
         limits: { cpu: '100m', memory: '150Mi' },
       },
-      'node-exporter': {
-        requests: { cpu: '102m', memory: '180Mi' },
-        limits: { cpu: '250m', memory: '180Mi' },
-      },
     },
     prometheus+:: { rules: $.prometheusRules + $.prometheusAlerts },
     grafana+:: {
diff --git a/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet b/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet
index 724087d6141577d7f0f90b2a934e4dab1fae1ceb..a142b4b62821cedfc3795f01fe574c49f8f77c1a 100644
--- a/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet
+++ b/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet
@@ -1,93 +1,64 @@
-{
-  local krp = self,
-  config+:: {
-    kubeRbacProxy: {
-      image: error 'must provide image',
-      name: error 'must provide name',
-      securePortName: error 'must provide securePortName',
-      securePort: error 'must provide securePort',
-      secureListenAddress: error 'must provide secureListenAddress',
-      upstream: error 'must provide upstream',
-      tlsCipherSuites: error 'must provide tlsCipherSuites',
-    },
+local defaults = {
+  local defaults = self,
+  namespace: error 'must provide namespace',
+  image: 'quay.io/brancz/kube-rbac-proxy:v0.8.0',
+  ports: error 'must provide ports',
+  secureListenAddress: error 'must provide secureListenAddress',
+  upstream: error 'must provide upstream',
+  resources: {
+    requests: { cpu: '10m', memory: '20Mi' },
+    limits: { cpu: '20m', memory: '40Mi' },
   },
+  tlsCipherSuites: [
+      'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',  // required by h2: http://golang.org/cl/30721
+      'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256',  // required by h2: http://golang.org/cl/30721
 
-  specMixin:: {
-    local sm = self,
-    config+:: {
-      kubeRbacProxy: {
-        image: error 'must provide image',
-        name: error 'must provide name',
-        securePortName: error 'must provide securePortName',
-        securePort: error 'must provide securePort',
-        secureListenAddress: error 'must provide secureListenAddress',
-        upstream: error 'must provide upstream',
-        tlsCipherSuites: error 'must provide tlsCipherSuites',
-      },
-    },
-    spec+: {
-      template+: {
-        spec+: {
-          containers+: [{
-            name: krp.config.kubeRbacProxy.name,
-            image: krp.config.kubeRbacProxy.image,
-            args: [
-              '--logtostderr',
-              '--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress,
-              '--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites),
-              '--upstream=' + krp.config.kubeRbacProxy.upstream,
-            ],
-            ports: [
-              { name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort },
-            ],
-            securityContext: {
-              runAsUser: 65532,
-              runAsGroup: 65532,
-              runAsNonRoot: true,
-            },
-          }],
-        },
-      },
-    },
-  },
+      // 'TLS_RSA_WITH_RC4_128_SHA',                // insecure: https://access.redhat.com/security/cve/cve-2013-2566
+      // 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',           // insecure: https://access.redhat.com/articles/2548661
+      // 'TLS_RSA_WITH_AES_128_CBC_SHA',            // disabled by h2
+      // 'TLS_RSA_WITH_AES_256_CBC_SHA',            // disabled by h2
+      // 'TLS_RSA_WITH_AES_128_CBC_SHA256',         // insecure: https://access.redhat.com/security/cve/cve-2013-0169
+      // 'TLS_RSA_WITH_AES_128_GCM_SHA256',         // disabled by h2
+      // 'TLS_RSA_WITH_AES_256_GCM_SHA384',         // disabled by h2
+      // 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA',        // insecure: https://access.redhat.com/security/cve/cve-2013-2566
+      // 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA',    // disabled by h2
+      // 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA',    // disabled by h2
+      // 'TLS_ECDHE_RSA_WITH_RC4_128_SHA',          // insecure: https://access.redhat.com/security/cve/cve-2013-2566
+      // 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA',     // insecure: https://access.redhat.com/articles/2548661
+      // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA',      // disabled by h2
+      // 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA',      // disabled by h2
+      // 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
+      // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256',   // insecure: https://access.redhat.com/security/cve/cve-2013-0169
 
-  deploymentMixin:: {
-    local dm = self,
-    config+:: {
-      kubeRbacProxy: {
-        image: error 'must provide image',
-        name: error 'must provide name',
-        securePortName: error 'must provide securePortName',
-        securePort: error 'must provide securePort',
-        secureListenAddress: error 'must provide secureListenAddress',
-        upstream: error 'must provide upstream',
-        tlsCipherSuites: error 'must provide tlsCipherSuites',
-      },
-    },
-    deployment+: krp.specMixin {
-      config+:: {
-        kubeRbacProxy+: dm.config.kubeRbacProxy,
-      },
-    },
-  },
+      // disabled by h2 means: https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/http2/ciphers.go
 
-  statefulSetMixin:: {
-    local sm = self,
-    config+:: {
-      kubeRbacProxy: {
-        image: error 'must provide image',
-        name: error 'must provide name',
-        securePortName: error 'must provide securePortName',
-        securePort: error 'must provide securePort',
-        secureListenAddress: error 'must provide secureListenAddress',
-        upstream: error 'must provide upstream',
-        tlsCipherSuites: error 'must provide tlsCipherSuites',
-      },
-    },
-    statefulSet+: krp.specMixin {
-      config+:: {
-        kubeRbacProxy+: sm.config.kubeRbacProxy,
-      },
+      'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384',
+      'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384',
+      'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305',
+      'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305',
+    ],
+};
+
+
+function(params) {
+  local krp = self,
+  config:: defaults + params,
+  // Safety check
+  assert std.isObject(krp.config.resources),
+
+    name: krp.config.name,
+    image: krp.config.image,
+    args: [
+      '--logtostderr',
+      '--secure-listen-address=' + krp.config.secureListenAddress,
+      '--tls-cipher-suites=' + std.join(',', krp.config.tlsCipherSuites),
+      '--upstream=' + krp.config.upstream,
+    ],
+    resources: krp.config.resources,
+    ports: krp.config.ports,
+    securityContext: {
+      runAsUser: 65532,
+      runAsGroup: 65532,
+      runAsNonRoot: true,
     },
-  },
 }
diff --git a/jsonnet/kube-prometheus/kube-rbac-proxy/containerMixin.libsonnet b/jsonnet/kube-prometheus/kube-rbac-proxy/containerMixin.libsonnet
new file mode 100644
index 0000000000000000000000000000000000000000..795463a7192845ed0d490aadc0d4f5d75c4bd910
--- /dev/null
+++ b/jsonnet/kube-prometheus/kube-rbac-proxy/containerMixin.libsonnet
@@ -0,0 +1,96 @@
+// TODO(paulfantom): remove the file after all usage of kube-rbac-proxy/containerMixin.libsonnet
+// are converted to use kube-rbac-proxy/container.libsonnet
+
+{
+  local krp = self,
+  config+:: {
+    kubeRbacProxy: {
+      image: error 'must provide image',
+      name: error 'must provide name',
+      securePortName: error 'must provide securePortName',
+      securePort: error 'must provide securePort',
+      secureListenAddress: error 'must provide secureListenAddress',
+      upstream: error 'must provide upstream',
+      tlsCipherSuites: error 'must provide tlsCipherSuites',
+    },
+  },
+
+  specMixin:: {
+    local sm = self,
+    config+:: {
+      kubeRbacProxy: {
+        image: error 'must provide image',
+        name: error 'must provide name',
+        securePortName: error 'must provide securePortName',
+        securePort: error 'must provide securePort',
+        secureListenAddress: error 'must provide secureListenAddress',
+        upstream: error 'must provide upstream',
+        tlsCipherSuites: error 'must provide tlsCipherSuites',
+      },
+    },
+    spec+: {
+      template+: {
+        spec+: {
+          containers+: [{
+            name: krp.config.kubeRbacProxy.name,
+            image: krp.config.kubeRbacProxy.image,
+            args: [
+              '--logtostderr',
+              '--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress,
+              '--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites),
+              '--upstream=' + krp.config.kubeRbacProxy.upstream,
+            ],
+            ports: [
+              { name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort },
+            ],
+            securityContext: {
+              runAsUser: 65532,
+              runAsGroup: 65532,
+              runAsNonRoot: true,
+            },
+          }],
+        },
+      },
+    },
+  },
+
+  deploymentMixin:: {
+    local dm = self,
+    config+:: {
+      kubeRbacProxy: {
+        image: error 'must provide image',
+        name: error 'must provide name',
+        securePortName: error 'must provide securePortName',
+        securePort: error 'must provide securePort',
+        secureListenAddress: error 'must provide secureListenAddress',
+        upstream: error 'must provide upstream',
+        tlsCipherSuites: error 'must provide tlsCipherSuites',
+      },
+    },
+    deployment+: krp.specMixin {
+      config+:: {
+        kubeRbacProxy+: dm.config.kubeRbacProxy,
+      },
+    },
+  },
+
+  statefulSetMixin:: {
+    local sm = self,
+    config+:: {
+      kubeRbacProxy: {
+        image: error 'must provide image',
+        name: error 'must provide name',
+        securePortName: error 'must provide securePortName',
+        securePort: error 'must provide securePort',
+        secureListenAddress: error 'must provide secureListenAddress',
+        upstream: error 'must provide upstream',
+        tlsCipherSuites: error 'must provide tlsCipherSuites',
+      },
+    },
+    statefulSet+: krp.specMixin {
+      config+:: {
+        kubeRbacProxy+: sm.config.kubeRbacProxy,
+      },
+    },
+  },
+}
diff --git a/jsonnet/kube-prometheus/kube-state-metrics/kube-state-metrics.libsonnet b/jsonnet/kube-prometheus/kube-state-metrics/kube-state-metrics.libsonnet
index 4e1709dd57fef362627af17aca79b458f21c9933..62cad10bb3a45f7ae54debc794b1fb4a5b1b0ff4 100644
--- a/jsonnet/kube-prometheus/kube-state-metrics/kube-state-metrics.libsonnet
+++ b/jsonnet/kube-prometheus/kube-state-metrics/kube-state-metrics.libsonnet
@@ -1,4 +1,4 @@
-local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet';
+local kubeRbacProxyContainer = import '../kube-rbac-proxy/containerMixin.libsonnet';
 local ksm = import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet';
 
 {
diff --git a/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet b/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet
index 8c5f7383dd941b6c0b75c5b9e0cebf875cacb378..63ec53b9dce8aa7ce71e12449cd6c1b4a7f06537 100644
--- a/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet
+++ b/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet
@@ -1,214 +1,212 @@
-{
-  _config+:: {
-    namespace: 'default',
-    versions+:: { nodeExporter: 'v1.0.1' },
-    imageRepos+:: { nodeExporter: 'quay.io/prometheus/node-exporter' },
+local krp = (import '../kube-rbac-proxy/container.libsonnet');
 
-    nodeExporter+:: {
-      listenAddress: '127.0.0.1',
-      port: 9100,
-      labels: {
-        'app.kubernetes.io/name': 'node-exporter',
-        'app.kubernetes.io/version': $._config.versions.nodeExporter,
-        'app.kubernetes.io/component': 'exporter',
-        'app.kubernetes.io/part-of': 'kube-prometheus',
-      },
-      selectorLabels: {
-        [labelName]: $._config.nodeExporter.labels[labelName]
-        for labelName in std.objectFields($._config.nodeExporter.labels)
-        if !std.setMember(labelName, ['app.kubernetes.io/version'])
-      },
+local defaults = {
+  local defaults = self,
+  namespace: error 'must provide namespace',
+  version: error 'must provide version',
+  image: error 'must provide version',
+  resources: {
+    requests: { cpu: '102m', memory: '180Mi' },
+    limits: { cpu: '250m', memory: '180Mi' },
+  },
+  listenAddress: '127.0.0.1',
+  port: 9100,
+  commonLabels:: {
+    'app.kubernetes.io/name': 'node-exporter',
+    'app.kubernetes.io/version': defaults.version,
+    'app.kubernetes.io/component': 'exporter',
+    'app.kubernetes.io/part-of': 'kube-prometheus',
+  },
+  selectorLabels:: {
+    [labelName]: defaults.commonLabels[labelName]
+    for labelName in std.objectFields(defaults.commonLabels)
+    if !std.setMember(labelName, ['app.kubernetes.io/version'])
+  },
+};
+
+
+function(params) {
+  local ne = self,
+  config:: defaults + params,
+  // Safety check
+  assert std.isObject(ne.config.resources),
+
+  clusterRoleBinding: {
+    apiVersion: 'rbac.authorization.k8s.io/v1',
+    kind: 'ClusterRoleBinding',
+    metadata: {
+      name: 'node-exporter',
+      labels: ne.config.commonLabels,
     },
+    roleRef: {
+      apiGroup: 'rbac.authorization.k8s.io',
+      kind: 'ClusterRole',
+      name: 'node-exporter',
+    },
+    subjects: [{
+      kind: 'ServiceAccount',
+      name: 'node-exporter',
+      namespace: ne.config.namespace,
+    }],
   },
 
-  nodeExporter+:: {
-    clusterRoleBinding: {
-      apiVersion: 'rbac.authorization.k8s.io/v1',
-      kind: 'ClusterRoleBinding',
-      metadata: {
-        name: 'node-exporter',
-        labels: $._config.nodeExporter.labels,
-      },
-      roleRef: {
-        apiGroup: 'rbac.authorization.k8s.io',
-        kind: 'ClusterRole',
-        name: 'node-exporter',
+  clusterRole: {
+    apiVersion: 'rbac.authorization.k8s.io/v1',
+    kind: 'ClusterRole',
+    metadata: {
+      name: 'node-exporter',
+      labels: ne.config.commonLabels,
+    },
+    rules: [
+      {
+        apiGroups: ['authentication.k8s.io'],
+        resources: ['tokenreviews'],
+        verbs: ['create'],
       },
-      subjects: [{
-        kind: 'ServiceAccount',
-        name: 'node-exporter',
-        namespace: $._config.namespace,
+      {
+        apiGroups: ['authorization.k8s.io'],
+        resources: ['subjectaccessreviews'],
+        verbs: ['create'],
       }],
     },
 
-    clusterRole: {
-      apiVersion: 'rbac.authorization.k8s.io/v1',
-      kind: 'ClusterRole',
-      metadata: {
-        name: 'node-exporter',
-        labels: $._config.nodeExporter.labels,
-      },
-      rules: [
-        {
-          apiGroups: ['authentication.k8s.io'],
-          resources: ['tokenreviews'],
-          verbs: ['create'],
-        },
-        {
-          apiGroups: ['authorization.k8s.io'],
-          resources: ['subjectaccessreviews'],
-          verbs: ['create'],
-        },
-      ],
+  serviceAccount: {
+    apiVersion: 'v1',
+    kind: 'ServiceAccount',
+    metadata: {
+      name: 'node-exporter',
+      namespace: ne.config.namespace,
+      labels: ne.config.commonLabels,
     },
+  },
 
-    daemonset:
-      local nodeExporter = {
-        name: 'node-exporter',
-        image: $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter,
-        args: [
-          '--web.listen-address=' + std.join(':', [$._config.nodeExporter.listenAddress, std.toString($._config.nodeExporter.port)]),
-          '--path.sysfs=/host/sys',
-          '--path.rootfs=/host/root',
-          '--no-collector.wifi',
-          '--no-collector.hwmon',
-          '--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
-        ],
-        volumeMounts: [
-          { name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
-          { name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
-        ],
-        resources: $._config.resources['node-exporter'],
-      };
+  service: {
+    apiVersion: 'v1',
+    kind: 'Service',
+    metadata: {
+      name: 'node-exporter',
+      namespace: ne.config.namespace,
+      labels: ne.config.commonLabels,
+    },
+    spec: {
+      ports: [
+        { name: 'https', targetPort: 'https', port: ne.config.port },
+      ],
+      selector: ne.config.selectorLabels,
+      clusterIP: 'None',
+    },
+  },
 
-      local proxy = {
-        name: 'kube-rbac-proxy',
-        image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
-        args: [
-          '--logtostderr',
-          '--secure-listen-address=[$(IP)]:' + $._config.nodeExporter.port,
-          '--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites),
-          '--upstream=http://127.0.0.1:' + $._config.nodeExporter.port + '/',
-        ],
-        env: [
-          { name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } },
-        ],
-        // Keep `hostPort` here, rather than in the node-exporter container
-        // because Kubernetes mandates that if you define a `hostPort` then
-        // `containerPort` must match. In our case, we are splitting the
-        // host port and container port between the two containers.
-        // We'll keep the port specification here so that the named port
-        // used by the service is tied to the proxy container. We *could*
-        // forgo declaring the host port, however it is important to declare
-        // it so that the scheduler can decide if the pod is schedulable.
-        ports: [
-          { name: 'https', containerPort: $._config.nodeExporter.port, hostPort: $._config.nodeExporter.port },
+  serviceMonitor: {
+    apiVersion: 'monitoring.coreos.com/v1',
+    kind: 'ServiceMonitor',
+    metadata: {
+      name: 'node-exporter',
+      namespace: ne.config.namespace,
+      labels: ne.config.commonLabels,
+    },
+    spec: {
+      jobLabel: 'app.kubernetes.io/name',
+      selector: {
+        matchLabels: ne.config.selectorLabels,
+      },
+      endpoints: [{
+        port: 'https',
+        scheme: 'https',
+        interval: '15s',
+        bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
+        relabelings: [
+          {
+            action: 'replace',
+            regex: '(.*)',
+            replacement: '$1',
+            sourceLabels: ['__meta_kubernetes_pod_node_name'],
+            targetLabel: 'instance',
+          },
         ],
-        resources: $._config.resources['kube-rbac-proxy'],
-        securityContext: {
-          runAsUser: 65532,
-          runAsGroup: 65532,
-          runAsNonRoot: true,
+        tlsConfig: {
+          insecureSkipVerify: true,
         },
-      };
+      }],
+    },
+  },
 
-      {
-        apiVersion: 'apps/v1',
-        kind: 'DaemonSet',
-        metadata: {
-          name: 'node-exporter',
-          namespace: $._config.namespace,
-          labels: $._config.nodeExporter.labels,
-        },
-        spec: {
-          selector: { matchLabels: $._config.nodeExporter.selectorLabels },
-          updateStrategy: {
-            type: 'RollingUpdate',
-            rollingUpdate: { maxUnavailable: '10%' },
-          },
-          template: {
-            metadata: { labels: $._config.nodeExporter.labels },
-            spec: {
-              nodeSelector: { 'kubernetes.io/os': 'linux' },
-              tolerations: [{
-                operator: 'Exists',
-              }],
-              containers: [nodeExporter, proxy],
-              volumes: [
-                { name: 'sys', hostPath: { path: '/sys' } },
-                { name: 'root', hostPath: { path: '/' } },
-              ],
-              serviceAccountName: 'node-exporter',
-              securityContext: {
-                runAsUser: 65534,
-                runAsNonRoot: true,
-              },
-              hostPID: true,
-              hostNetwork: true,
-            },
-          },
-        },
-      },
+  daemonset:
+    local nodeExporter = {
+      name: 'node-exporter',
+      image: ne.config.image,
+      args: [
+        '--web.listen-address=' + std.join(':', [ne.config.listenAddress, std.toString(ne.config.port)]),
+        '--path.sysfs=/host/sys',
+        '--path.rootfs=/host/root',
+        '--no-collector.wifi',
+        '--no-collector.hwmon',
+        '--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
+      ],
+      volumeMounts: [
+        { name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
+        { name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
+      ],
+      resources: ne.config.resources,
+    };
 
-    serviceAccount: {
-      apiVersion: 'v1',
-      kind: 'ServiceAccount',
-      metadata: {
-        name: 'node-exporter',
-        namespace: $._config.namespace,
-        labels: $._config.nodeExporter.labels,
-      },
-    },
+    local kubeRbacProxy = krp({
+      name: 'kube-rbac-proxy',
+      //image: krpImage,
+      upstream: 'http://127.0.0.1:' + ne.config.port + '/',
+      secureListenAddress: '[$(IP)]:' + ne.config.port,
+      // Keep `hostPort` here, rather than in the node-exporter container
+      // because Kubernetes mandates that if you define a `hostPort` then
+      // `containerPort` must match. In our case, we are splitting the
+      // host port and container port between the two containers.
+      // We'll keep the port specification here so that the named port
+      // used by the service is tied to the proxy container. We *could*
+      // forgo declaring the host port, however it is important to declare
+      // it so that the scheduler can decide if the pod is schedulable.
+      ports: [
+        { name: 'https', containerPort: ne.config.port, hostPort: ne.config.port },
+      ],
+    }) + {
+      env: [
+        { name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } },
+      ]
+    };
 
-    serviceMonitor: {
-      apiVersion: 'monitoring.coreos.com/v1',
-      kind: 'ServiceMonitor',
+    {
+      apiVersion: 'apps/v1',
+      kind: 'DaemonSet',
       metadata: {
         name: 'node-exporter',
-        namespace: $._config.namespace,
-        labels: $._config.nodeExporter.labels,
+        namespace: ne.config.namespace,
+        labels: ne.config.commonLabels,
       },
       spec: {
-        jobLabel: 'app.kubernetes.io/name',
-        selector: {
-          matchLabels: $._config.nodeExporter.selectorLabels,
+        selector: { matchLabels: ne.config.selectorLabels },
+        updateStrategy: {
+          type: 'RollingUpdate',
+          rollingUpdate: { maxUnavailable: '10%' },
         },
-        endpoints: [{
-          port: 'https',
-          scheme: 'https',
-          interval: '15s',
-          bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
-          relabelings: [
-            {
-              action: 'replace',
-              regex: '(.*)',
-              replacement: '$1',
-              sourceLabels: ['__meta_kubernetes_pod_node_name'],
-              targetLabel: 'instance',
+        template: {
+          metadata: { labels: ne.config.commonLabels },
+          spec: {
+            nodeSelector: { 'kubernetes.io/os': 'linux' },
+            tolerations: [{
+              operator: 'Exists',
+            }],
+            containers: [nodeExporter, kubeRbacProxy],
+            volumes: [
+              { name: 'sys', hostPath: { path: '/sys' } },
+              { name: 'root', hostPath: { path: '/' } },
+            ],
+            serviceAccountName: 'node-exporter',
+            securityContext: {
+              runAsUser: 65534,
+              runAsNonRoot: true,
             },
-          ],
-          tlsConfig: {
-            insecureSkipVerify: true,
+            hostPID: true,
+            hostNetwork: true,
           },
-        }],
-      },
-    },
-
-    service: {
-      apiVersion: 'v1',
-      kind: 'Service',
-      metadata: {
-        name: 'node-exporter',
-        namespace: $._config.namespace,
-        labels: $._config.nodeExporter.labels,
-      },
-      spec: {
-        ports: [
-          { name: 'https', targetPort: 'https', port: $._config.nodeExporter.port },
-        ],
-        selector: $._config.nodeExporter.selectorLabels,
-        clusterIP: 'None',
+        },
       },
     },
-  },
 }
diff --git a/manifests/node-exporter-clusterRole.yaml b/manifests/node-exporter-clusterRole.yaml
index 7d5bf044bddd20f7148a63e69d2f9e9caf9885a4..45383007fd05f39cf7a45951430fda059093b106 100644
--- a/manifests/node-exporter-clusterRole.yaml
+++ b/manifests/node-exporter-clusterRole.yaml
@@ -5,7 +5,7 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
 rules:
 - apiGroups:
diff --git a/manifests/node-exporter-clusterRoleBinding.yaml b/manifests/node-exporter-clusterRoleBinding.yaml
index 3c8d407223169ded23c45192a428828e70d0e20d..8887d631a3151208603d16163890a432764931cc 100644
--- a/manifests/node-exporter-clusterRoleBinding.yaml
+++ b/manifests/node-exporter-clusterRoleBinding.yaml
@@ -5,7 +5,7 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
 roleRef:
   apiGroup: rbac.authorization.k8s.io
diff --git a/manifests/node-exporter-daemonset.yaml b/manifests/node-exporter-daemonset.yaml
index e8a21f32da0ae0929db0edabc758cb9901ab459e..d88d88463dd00adb11030a88503e2e718bbee156 100644
--- a/manifests/node-exporter-daemonset.yaml
+++ b/manifests/node-exporter-daemonset.yaml
@@ -5,7 +5,7 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
   namespace: monitoring
 spec:
@@ -20,7 +20,7 @@ spec:
         app.kubernetes.io/component: exporter
         app.kubernetes.io/name: node-exporter
         app.kubernetes.io/part-of: kube-prometheus
-        app.kubernetes.io/version: v1.0.1
+        app.kubernetes.io/version: 1.0.1
     spec:
       containers:
       - args:
diff --git a/manifests/node-exporter-service.yaml b/manifests/node-exporter-service.yaml
index 4423c3e879e06cc2cab2180b7f8825c272727da6..e0cfb53a09f71fd54747d0c86a203b8576d29b3f 100644
--- a/manifests/node-exporter-service.yaml
+++ b/manifests/node-exporter-service.yaml
@@ -5,7 +5,7 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
   namespace: monitoring
 spec:
diff --git a/manifests/node-exporter-serviceAccount.yaml b/manifests/node-exporter-serviceAccount.yaml
index daad7d2fad080e9f97a85b7a7eb2b9e4d82d42d0..1be5af3c83a1cafe5a1d060953bd373f4c8ca35a 100644
--- a/manifests/node-exporter-serviceAccount.yaml
+++ b/manifests/node-exporter-serviceAccount.yaml
@@ -5,6 +5,6 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
   namespace: monitoring
diff --git a/manifests/node-exporter-serviceMonitor.yaml b/manifests/node-exporter-serviceMonitor.yaml
index 65432fd66f2090069c815688df5391769dac176d..7b0835c8923bea13211579ea68661869c1b76dbe 100644
--- a/manifests/node-exporter-serviceMonitor.yaml
+++ b/manifests/node-exporter-serviceMonitor.yaml
@@ -5,7 +5,7 @@ metadata:
     app.kubernetes.io/component: exporter
     app.kubernetes.io/name: node-exporter
     app.kubernetes.io/part-of: kube-prometheus
-    app.kubernetes.io/version: v1.0.1
+    app.kubernetes.io/version: 1.0.1
   name: node-exporter
   namespace: monitoring
 spec: