diff --git a/jsonnet/kube-prometheus/kube-prometheus.libsonnet b/jsonnet/kube-prometheus/kube-prometheus.libsonnet index 219c011d2f3fb62e89da15935fe91bf1ed1eff42..1def6a56d17e575a70dd2d4bad27bbcffd32d32a 100644 --- a/jsonnet/kube-prometheus/kube-prometheus.libsonnet +++ b/jsonnet/kube-prometheus/kube-prometheus.libsonnet @@ -1,5 +1,7 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonnet'; +local nodeExporter = import './node-exporter/node-exporter.libsonnet'; + (import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') + (import './kube-state-metrics/kube-state-metrics.libsonnet') + (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') + @@ -16,6 +18,11 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonne (import './alerts/alerts.libsonnet') + (import './rules/rules.libsonnet') + { + nodeExporter: nodeExporter({ + namespace: $._config.namespace, + version: '1.0.1', + imageRepo: 'quay.io/prometheus/node-exporter', + }), kubePrometheus+:: { namespace: { apiVersion: 'v1', @@ -83,7 +90,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonne }, }).deploymentMixin, - grafana+:: { local dashboardDefinitions = super.dashboardDefinitions, @@ -196,10 +202,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonne requests: { cpu: '100m', memory: '150Mi' }, limits: { cpu: '100m', memory: '150Mi' }, }, - 'node-exporter': { - requests: { cpu: '102m', memory: '180Mi' }, - limits: { cpu: '250m', memory: '180Mi' }, - }, }, prometheus+:: { rules: $.prometheusRules + $.prometheusAlerts }, grafana+:: { diff --git a/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet b/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet index 724087d6141577d7f0f90b2a934e4dab1fae1ceb..a142b4b62821cedfc3795f01fe574c49f8f77c1a 100644 --- a/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet +++ b/jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet @@ -1,93 +1,64 @@ -{ - local krp = self, - config+:: { - kubeRbacProxy: { - image: error 'must provide image', - name: error 'must provide name', - securePortName: error 'must provide securePortName', - securePort: error 'must provide securePort', - secureListenAddress: error 'must provide secureListenAddress', - upstream: error 'must provide upstream', - tlsCipherSuites: error 'must provide tlsCipherSuites', - }, +local defaults = { + local defaults = self, + namespace: error 'must provide namespace', + image: 'quay.io/brancz/kube-rbac-proxy:v0.8.0', + ports: error 'must provide ports', + secureListenAddress: error 'must provide secureListenAddress', + upstream: error 'must provide upstream', + resources: { + requests: { cpu: '10m', memory: '20Mi' }, + limits: { cpu: '20m', memory: '40Mi' }, }, + tlsCipherSuites: [ + 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721 + 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721 - specMixin:: { - local sm = self, - config+:: { - kubeRbacProxy: { - image: error 'must provide image', - name: error 'must provide name', - securePortName: error 'must provide securePortName', - securePort: error 'must provide securePort', - secureListenAddress: error 'must provide secureListenAddress', - upstream: error 'must provide upstream', - tlsCipherSuites: error 'must provide tlsCipherSuites', - }, - }, - spec+: { - template+: { - spec+: { - containers+: [{ - name: krp.config.kubeRbacProxy.name, - image: krp.config.kubeRbacProxy.image, - args: [ - '--logtostderr', - '--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress, - '--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites), - '--upstream=' + krp.config.kubeRbacProxy.upstream, - ], - ports: [ - { name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort }, - ], - securityContext: { - runAsUser: 65532, - runAsGroup: 65532, - runAsNonRoot: true, - }, - }], - }, - }, - }, - }, + // 'TLS_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566 + // 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661 + // 'TLS_RSA_WITH_AES_128_CBC_SHA', // disabled by h2 + // 'TLS_RSA_WITH_AES_256_CBC_SHA', // disabled by h2 + // 'TLS_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169 + // 'TLS_RSA_WITH_AES_128_GCM_SHA256', // disabled by h2 + // 'TLS_RSA_WITH_AES_256_GCM_SHA384', // disabled by h2 + // 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566 + // 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA', // disabled by h2 + // 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA', // disabled by h2 + // 'TLS_ECDHE_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566 + // 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661 + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', // disabled by h2 + // 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', // disabled by h2 + // 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169 + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169 - deploymentMixin:: { - local dm = self, - config+:: { - kubeRbacProxy: { - image: error 'must provide image', - name: error 'must provide name', - securePortName: error 'must provide securePortName', - securePort: error 'must provide securePort', - secureListenAddress: error 'must provide secureListenAddress', - upstream: error 'must provide upstream', - tlsCipherSuites: error 'must provide tlsCipherSuites', - }, - }, - deployment+: krp.specMixin { - config+:: { - kubeRbacProxy+: dm.config.kubeRbacProxy, - }, - }, - }, + // disabled by h2 means: https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/http2/ciphers.go - statefulSetMixin:: { - local sm = self, - config+:: { - kubeRbacProxy: { - image: error 'must provide image', - name: error 'must provide name', - securePortName: error 'must provide securePortName', - securePort: error 'must provide securePort', - secureListenAddress: error 'must provide secureListenAddress', - upstream: error 'must provide upstream', - tlsCipherSuites: error 'must provide tlsCipherSuites', - }, - }, - statefulSet+: krp.specMixin { - config+:: { - kubeRbacProxy+: sm.config.kubeRbacProxy, - }, + 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', + 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305', + 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305', + ], +}; + + +function(params) { + local krp = self, + config:: defaults + params, + // Safety check + assert std.isObject(krp.config.resources), + + name: krp.config.name, + image: krp.config.image, + args: [ + '--logtostderr', + '--secure-listen-address=' + krp.config.secureListenAddress, + '--tls-cipher-suites=' + std.join(',', krp.config.tlsCipherSuites), + '--upstream=' + krp.config.upstream, + ], + resources: krp.config.resources, + ports: krp.config.ports, + securityContext: { + runAsUser: 65532, + runAsGroup: 65532, + runAsNonRoot: true, }, - }, } diff --git a/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet b/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet index 8c5f7383dd941b6c0b75c5b9e0cebf875cacb378..9100133c22b5520b758dd9a633b6faa64ddc6fbb 100644 --- a/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet +++ b/jsonnet/kube-prometheus/node-exporter/node-exporter.libsonnet @@ -1,214 +1,213 @@ -{ - _config+:: { - namespace: 'default', - versions+:: { nodeExporter: 'v1.0.1' }, - imageRepos+:: { nodeExporter: 'quay.io/prometheus/node-exporter' }, +local krp = (import '../kube-rbac-proxy/container.libsonnet'); - nodeExporter+:: { - listenAddress: '127.0.0.1', - port: 9100, - labels: { - 'app.kubernetes.io/name': 'node-exporter', - 'app.kubernetes.io/version': $._config.versions.nodeExporter, - 'app.kubernetes.io/component': 'exporter', - 'app.kubernetes.io/part-of': 'kube-prometheus', - }, - selectorLabels: { - [labelName]: $._config.nodeExporter.labels[labelName] - for labelName in std.objectFields($._config.nodeExporter.labels) - if !std.setMember(labelName, ['app.kubernetes.io/version']) - }, +local defaults = { + local defaults = self, + namespace: error 'must provide namespace', + version: error 'must provide version', + imageRepo: error 'must provide version', + // image: "", + resources: { + requests: { cpu: '102m', memory: '180Mi' }, + limits: { cpu: '250m', memory: '180Mi' }, + }, + listenAddress: '127.0.0.1', + port: 9100, + commonLabels:: { + 'app.kubernetes.io/name': 'node-exporter', + 'app.kubernetes.io/version': defaults.version, + 'app.kubernetes.io/component': 'exporter', + 'app.kubernetes.io/part-of': 'kube-prometheus', + }, + selectorLabels:: { + [labelName]: defaults.commonLabels[labelName] + for labelName in std.objectFields(defaults.commonLabels) + if !std.setMember(labelName, ['app.kubernetes.io/version']) + }, +}; + + +function(params) { + local ne = self, + config:: defaults + params, + // Safety check + assert std.isObject(ne.config.resources), + + clusterRoleBinding: { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRoleBinding', + metadata: { + name: 'node-exporter', + labels: ne.config.commonLabels, }, + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'node-exporter', + }, + subjects: [{ + kind: 'ServiceAccount', + name: 'node-exporter', + namespace: ne.config.namespace, + }], }, - nodeExporter+:: { - clusterRoleBinding: { - apiVersion: 'rbac.authorization.k8s.io/v1', - kind: 'ClusterRoleBinding', - metadata: { - name: 'node-exporter', - labels: $._config.nodeExporter.labels, - }, - roleRef: { - apiGroup: 'rbac.authorization.k8s.io', - kind: 'ClusterRole', - name: 'node-exporter', + clusterRole: { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRole', + metadata: { + name: 'node-exporter', + labels: ne.config.commonLabels, + }, + rules: [ + { + apiGroups: ['authentication.k8s.io'], + resources: ['tokenreviews'], + verbs: ['create'], }, - subjects: [{ - kind: 'ServiceAccount', - name: 'node-exporter', - namespace: $._config.namespace, + { + apiGroups: ['authorization.k8s.io'], + resources: ['subjectaccessreviews'], + verbs: ['create'], }], }, - clusterRole: { - apiVersion: 'rbac.authorization.k8s.io/v1', - kind: 'ClusterRole', - metadata: { - name: 'node-exporter', - labels: $._config.nodeExporter.labels, - }, - rules: [ - { - apiGroups: ['authentication.k8s.io'], - resources: ['tokenreviews'], - verbs: ['create'], - }, - { - apiGroups: ['authorization.k8s.io'], - resources: ['subjectaccessreviews'], - verbs: ['create'], - }, - ], + serviceAccount: { + apiVersion: 'v1', + kind: 'ServiceAccount', + metadata: { + name: 'node-exporter', + namespace: ne.config.namespace, + labels: ne.config.commonLabels, }, + }, - daemonset: - local nodeExporter = { - name: 'node-exporter', - image: $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter, - args: [ - '--web.listen-address=' + std.join(':', [$._config.nodeExporter.listenAddress, std.toString($._config.nodeExporter.port)]), - '--path.sysfs=/host/sys', - '--path.rootfs=/host/root', - '--no-collector.wifi', - '--no-collector.hwmon', - '--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)', - ], - volumeMounts: [ - { name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true }, - { name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true }, - ], - resources: $._config.resources['node-exporter'], - }; + service: { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: 'node-exporter', + namespace: ne.config.namespace, + labels: ne.config.commonLabels, + }, + spec: { + ports: [ + { name: 'https', targetPort: 'https', port: ne.config.port }, + ], + selector: ne.config.selectorLabels, + clusterIP: 'None', + }, + }, - local proxy = { - name: 'kube-rbac-proxy', - image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy, - args: [ - '--logtostderr', - '--secure-listen-address=[$(IP)]:' + $._config.nodeExporter.port, - '--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites), - '--upstream=http://127.0.0.1:' + $._config.nodeExporter.port + '/', - ], - env: [ - { name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } }, - ], - // Keep `hostPort` here, rather than in the node-exporter container - // because Kubernetes mandates that if you define a `hostPort` then - // `containerPort` must match. In our case, we are splitting the - // host port and container port between the two containers. - // We'll keep the port specification here so that the named port - // used by the service is tied to the proxy container. We *could* - // forgo declaring the host port, however it is important to declare - // it so that the scheduler can decide if the pod is schedulable. - ports: [ - { name: 'https', containerPort: $._config.nodeExporter.port, hostPort: $._config.nodeExporter.port }, + serviceMonitor: { + apiVersion: 'monitoring.coreos.com/v1', + kind: 'ServiceMonitor', + metadata: { + name: 'node-exporter', + namespace: ne.config.namespace, + labels: ne.config.commonLabels, + }, + spec: { + jobLabel: 'app.kubernetes.io/name', + selector: { + matchLabels: ne.config.selectorLabels, + }, + endpoints: [{ + port: 'https', + scheme: 'https', + interval: '15s', + bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', + relabelings: [ + { + action: 'replace', + regex: '(.*)', + replacement: '$1', + sourceLabels: ['__meta_kubernetes_pod_node_name'], + targetLabel: 'instance', + }, ], - resources: $._config.resources['kube-rbac-proxy'], - securityContext: { - runAsUser: 65532, - runAsGroup: 65532, - runAsNonRoot: true, + tlsConfig: { + insecureSkipVerify: true, }, - }; + }], + }, + }, - { - apiVersion: 'apps/v1', - kind: 'DaemonSet', - metadata: { - name: 'node-exporter', - namespace: $._config.namespace, - labels: $._config.nodeExporter.labels, - }, - spec: { - selector: { matchLabels: $._config.nodeExporter.selectorLabels }, - updateStrategy: { - type: 'RollingUpdate', - rollingUpdate: { maxUnavailable: '10%' }, - }, - template: { - metadata: { labels: $._config.nodeExporter.labels }, - spec: { - nodeSelector: { 'kubernetes.io/os': 'linux' }, - tolerations: [{ - operator: 'Exists', - }], - containers: [nodeExporter, proxy], - volumes: [ - { name: 'sys', hostPath: { path: '/sys' } }, - { name: 'root', hostPath: { path: '/' } }, - ], - serviceAccountName: 'node-exporter', - securityContext: { - runAsUser: 65534, - runAsNonRoot: true, - }, - hostPID: true, - hostNetwork: true, - }, - }, - }, - }, + daemonset: + local nodeExporter = { + name: 'node-exporter', + image: ne.config.imageRepo + ':v' + ne.config.version, + args: [ + '--web.listen-address=' + std.join(':', [ne.config.listenAddress, std.toString(ne.config.port)]), + '--path.sysfs=/host/sys', + '--path.rootfs=/host/root', + '--no-collector.wifi', + '--no-collector.hwmon', + '--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)', + ], + volumeMounts: [ + { name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true }, + { name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true }, + ], + resources: ne.config.resources, + }; - serviceAccount: { - apiVersion: 'v1', - kind: 'ServiceAccount', - metadata: { - name: 'node-exporter', - namespace: $._config.namespace, - labels: $._config.nodeExporter.labels, - }, - }, + local kubeRbacProxy = krp({ + name: 'kube-rbac-proxy', + //image: krpImage, + upstream: 'http://127.0.0.1:' + ne.config.port + '/', + secureListenAddress: '[$(IP)]:' + ne.config.port, + // Keep `hostPort` here, rather than in the node-exporter container + // because Kubernetes mandates that if you define a `hostPort` then + // `containerPort` must match. In our case, we are splitting the + // host port and container port between the two containers. + // We'll keep the port specification here so that the named port + // used by the service is tied to the proxy container. We *could* + // forgo declaring the host port, however it is important to declare + // it so that the scheduler can decide if the pod is schedulable. + ports: [ + { name: 'https', containerPort: ne.config.port, hostPort: ne.config.port }, + ], + }) + { + env: [ + { name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } }, + ] + }; - serviceMonitor: { - apiVersion: 'monitoring.coreos.com/v1', - kind: 'ServiceMonitor', + { + apiVersion: 'apps/v1', + kind: 'DaemonSet', metadata: { name: 'node-exporter', - namespace: $._config.namespace, - labels: $._config.nodeExporter.labels, + namespace: ne.config.namespace, + labels: ne.config.commonLabels, }, spec: { - jobLabel: 'app.kubernetes.io/name', - selector: { - matchLabels: $._config.nodeExporter.selectorLabels, + selector: { matchLabels: ne.config.selectorLabels }, + updateStrategy: { + type: 'RollingUpdate', + rollingUpdate: { maxUnavailable: '10%' }, }, - endpoints: [{ - port: 'https', - scheme: 'https', - interval: '15s', - bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token', - relabelings: [ - { - action: 'replace', - regex: '(.*)', - replacement: '$1', - sourceLabels: ['__meta_kubernetes_pod_node_name'], - targetLabel: 'instance', + template: { + metadata: { labels: ne.config.commonLabels }, + spec: { + nodeSelector: { 'kubernetes.io/os': 'linux' }, + tolerations: [{ + operator: 'Exists', + }], + containers: [nodeExporter, kubeRbacProxy], + volumes: [ + { name: 'sys', hostPath: { path: '/sys' } }, + { name: 'root', hostPath: { path: '/' } }, + ], + serviceAccountName: 'node-exporter', + securityContext: { + runAsUser: 65534, + runAsNonRoot: true, }, - ], - tlsConfig: { - insecureSkipVerify: true, + hostPID: true, + hostNetwork: true, }, - }], - }, - }, - - service: { - apiVersion: 'v1', - kind: 'Service', - metadata: { - name: 'node-exporter', - namespace: $._config.namespace, - labels: $._config.nodeExporter.labels, - }, - spec: { - ports: [ - { name: 'https', targetPort: 'https', port: $._config.nodeExporter.port }, - ], - selector: $._config.nodeExporter.selectorLabels, - clusterIP: 'None', + }, }, }, - }, }