diff --git a/README.md b/README.md index 84aeb1d37cbc899f52a9a095d6303610d95334e1..f05b92a8b9695dba5bf341fb030990beadb969c2 100644 --- a/README.md +++ b/README.md @@ -22,16 +22,24 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m * [Prerequisites](#prerequisites) * [minikube](#minikube) * [Quickstart](#quickstart) -* [Usage](#usage) +* [Customizing Kube-Prometheus](#customizing-kube-prometheus) + * [Installing](#installing) * [Compiling](#compiling) + * [Containerized Installing and Compiling](#containerized-installing-and-compiling) * [Configuration](#configuration) -* [Customization](#customization) +* [Customization Examples](#customization-examples) + * [Cluster Creation Tools](#cluster-creation-tools) + * [NodePorts](#nodeports) + * [Prometheus Object Name](#prometheus-object-name) + * [node-exporter DaemonSet namespace](#node-exporter-daemonset-namespace) * [Alertmanager configuration](#alertmanager-configuration) + * [Static etcd configuration](#static-etcd-configuration) * [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards) * [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress) * [Minikube Example](#minikube-example) * [Troubleshooting](#troubleshooting) * [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics) + * [kube-state-metrics resource usage](#kube-state-metrics-resource-usage) * [Contributing](#contributing) ## Prerequisites @@ -53,29 +61,49 @@ $ minikube delete && minikube start --kubernetes-version=v1.10.1 --memory=4096 - ## Quickstart -Although this project is intended to be used as a library, a compiled version of the Kubernetes manifests generated with this library is checked into this repository in order to try the content out quickly. - -Simply create the stack: +This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository). +Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run: + * Simply create the stack: +``` +$ kubectl create -f manifests/ || true +$ kubectl create -f manifests/ 2>/dev/null || true # This command sometimes may need to be done twice +``` + * And to teardown the stack: ``` -$ kubectl create -f manifests/ +$ kubectl delete -f manifests/ || true ``` -## Usage +## Customizing Kube-Prometheus -The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed. +This section: + * describes how to customize the kube-prometheus library via compiling the kube-prometheus manifests yourself (as an alternative to the [Quickstart section](#Quickstart)). + * still doesn't require you to make a copy of this entire repository, but rather only a copy of a few select files. + +### Installing -Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install): +The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed. +Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install) (the jsonnet package manager): ``` $ mkdir my-kube-prometheus; cd my-kube-prometheus -$ jb init -$ jb install github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet/kube-prometheus +$ jb init # Creates the initial/empty `jsonnetfile.json` +# Install the kube-prometheus dependency +$ jb install github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet/kube-prometheus # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json` ``` > `jb` can be installed with `go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb` -You may wish to not use ksonnet and simply render the generated manifests to files on disk, this can be done with: +> An e.g. of how to install a given version of this library: `jb install github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet/kube-prometheus/@v0.22.0` + +In order to update the kube-prometheus dependency, simply use the jsonnet-bundler update functionality: +`$ jb update` + +### Compiling + +e.g. of how to compile the manifests: `./build.sh example.jsonnet` + +Here's [example.jsonnet](example.jsonnet): [embedmd]:# (example.jsonnet) ```jsonnet @@ -94,15 +122,14 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + { { ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } ``` -This renders all manifests in a json structure of `{filename: manifest-content}`. - -### Compiling - -To compile the above and get each manifest in a separate file on disk use the following script: +And here's the [build.sh](build.sh) script (which uses `vendor/` to render all manifests in a json structure of `{filename: manifest-content}`): [embedmd]:# (build.sh) ```sh #!/usr/bin/env bash + +# This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files. + set -e set -x # only exit with zero if all commands of the pipeline exit successfully @@ -117,56 +144,110 @@ jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} ``` -> Note you need `jsonnet` and `gojsonyaml` (`go get github.com/brancz/gojsontoyaml`) installed. If you just want json output, not yaml, then you can skip the pipe and everything afterwards. +> Note you need `jsonnet` (`go get github.com/google/go-jsonnet/jsonnet`) and `gojsontoyaml` (`go get github.com/brancz/gojsontoyaml`) installed to run `build.sh`. If you just want json output, not yaml, then you can skip the pipe and everything afterwards. + +This script runs the jsonnet code, then reads each key of the generated json and uses that as the file name, and writes the value of that key to that file, and converts each json manifest to yaml. -This script reads each key of the generated json and uses that as the file name, and writes the value of that key to that file. +### Containerized Installing and Compiling + +If you don't care to have `jb` nor `jsonnet` nor `gojsontoyaml` installed, then build the `po-jsonnet` Docker image (this is something you'll need a copy of this repository for). Do the following from this `kube-prometheus` directory: +``` +$ make ../../hack/jsonnet-docker-image +``` -> You can also run this script executing the command `make generate-raw` from kube-prometheus base directory of this repository but the above option it is recommended so that you run it in your own infrastructure repository. +Then you can do commands such as the following: +``` +docker run \ + --rm \ + -v `pwd`:`pwd` \ + --workdir `pwd` \ + po-jsonnet jb init + +docker run \ + --rm \ + -v `pwd`:`pwd` \ + --workdir `pwd` \ + po-jsonnet jb install github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet/kube-prometheus + +docker run \ + --rm \ + -v `pwd`:`pwd` \ + --workdir `pwd` \ + po-jsonnet ./build.sh example.jsonnet +``` ## Configuration -A hidden `_config` field is located at the top level of the object this library provides. These are the available fields with their respective default values: +Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Usage section](#Usage), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults. +These are the available fields with their respective default values: ``` { _config+:: { - namespace: "default", - - versions+:: { - alertmanager: "v0.14.0", - nodeExporter: "v0.15.2", - kubeStateMetrics: "v1.3.0", - kubeRbacProxy: "v0.3.0", - addonResizer: "1.0", - prometheusOperator: "v0.18.1", - prometheus: "v2.2.1", - }, + namespace: "default", + + versions+:: { + alertmanager: "v0.15.0", + nodeExporter: "v0.15.2", + kubeStateMetrics: "v1.3.1", + kubeRbacProxy: "v0.3.1", + addonResizer: "1.0", + prometheusOperator: "v0.18.1", + prometheus: "v2.2.1", + }, - imageRepos+:: { - prometheus: "quay.io/prometheus/prometheus", - alertmanager: "quay.io/prometheus/alertmanager", - kubeStateMetrics: "quay.io/coreos/kube-state-metrics", - kubeRbacProxy: "quay.io/coreos/kube-rbac-proxy", - addonResizer: "quay.io/coreos/addon-resizer", - nodeExporter: "quay.io/prometheus/node-exporter", - prometheusOperator: "quay.io/coreos/prometheus-operator", - }, + imageRepos+:: { + prometheus: "quay.io/prometheus/prometheus", + alertmanager: "quay.io/prometheus/alertmanager", + kubeStateMetrics: "quay.io/coreos/kube-state-metrics", + kubeRbacProxy: "quay.io/coreos/kube-rbac-proxy", + addonResizer: "quay.io/coreos/addon-resizer", + nodeExporter: "quay.io/prometheus/node-exporter", + prometheusOperator: "quay.io/coreos/prometheus-operator", + }, - prometheus+:: { - replicas: 2, - rules: {}, - }, + prometheus+:: { + names: 'k8s', + replicas: 2, + rules: {}, + }, - alertmanager+:: { - config: alertmanagerConfig, - replicas: 3, - }, + alertmanager+:: { + name: 'main', + config: ||| + global: + resolve_timeout: 5m + route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - match: + alertname: DeadMansSwitch + receiver: 'null' + receivers: + - name: 'null' + |||, + replicas: 3, + }, + + kubeStateMetrics+:: { + collectors: '', // empty string gets a default set + scrapeInterval: '30s', + scrapeTimeout: '30s', + + baseCPU: '100m', + baseMemory: '150Mi', + cpuPerNode: '2m', + memoryPerNode: '30Mi', + }, }, } ``` -The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana), but needed configuration can be customized from the same file. F.e. to allow anonymous access to grafana, add the `_config` section: - +The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana), but needed configuration can be customized from the same top level `_config` field. For example to allow anonymous access to grafana, add the following `_config` section: ``` grafana+:: { config: { @@ -177,12 +258,13 @@ The grafana definition is located in a different project (https://github.com/bra }, ``` - -## Customization +## Customization Examples Jsonnet is a turing complete language, any logic can be reflected in it. It also has powerful merge functionalities, allowing sophisticated customizations of any kind simply by merging it into the object the library provides. -A common example is that not all Kubernetes clusters are created exactly the same way, meaning the configuration to monitor them may be slightly different. For [kubeadm]() and [bootkube]() clusters there are mixins available to easily configure these: +### Cluster Creation Tools + +A common example is that not all Kubernetes clusters are created exactly the same way, meaning the configuration to monitor them may be slightly different. For [kubeadm](examples/jsonnet-snippets/kubeadm.jsonnet) and [bootkube](examples/jsonnet-snippets/bootkube.jsonnet) and [kops](examples/jsonnet-snippets/kops.jsonnet) clusters there are mixins available to easily configure these: kubeadm: @@ -208,6 +290,8 @@ kops: (import 'kube-prometheus/kube-prometheus-kops.libsonnet') ``` +### NodePorts + Another mixin that may be useful for exploring the stack is to expose the UIs of Prometheus, Alertmanager and Grafana on NodePorts: [embedmd]:# (examples/jsonnet-snippets/node-ports.jsonnet) @@ -216,7 +300,9 @@ Another mixin that may be useful for exploring the stack is to expose the UIs of (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') ``` -For example the name of the `Prometheus` object provided by this library can be overridden: +### Prometheus Object Name + +To give another customization example, the name of the `Prometheus` object provided by this library can be overridden: [embedmd]:# (examples/prometheus-name-override.jsonnet) ```jsonnet @@ -231,6 +317,8 @@ For example the name of the `Prometheus` object provided by this library can be }).prometheus.prometheus ``` +### node-exporter DaemonSet namespace + Standard Kubernetes manifests are all written using [ksonnet-lib](https://github.com/ksonnet/ksonnet-lib/), so they can be modified with the mixins supplied by ksonnet-lib. For example to override the namespace of the node-exporter DaemonSet: [embedmd]:# (examples/ksonnet-example.jsonnet) @@ -316,35 +404,9 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + { ### Static etcd configuration -In order to configure a static etcd cluster to scrape there is a simple mixin prepared, so only the IPs and certificate information need to be configured. Simply append the `kube-prometheus/kube-prometheus-static-etcd.libsonnet` mixin to the rest of the configuration, and configure the `ips` to be the IPs to scrape, and the `clientCA`, `clientKey` and `clientCert` to values that are valid to scrape etcd metrics with. +In order to configure a static etcd cluster to scrape there is a simple [kube-prometheus-static-etcd.libsonnet](jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet) mixin prepared - see [etcd.jsonnet](examples/etcd.jsonnet) for an example of how to use that mixin, and [Monitoring external etcd](docs/monitoring-external-etcd.md) for more information. -Most likely these certificates are generated somewhere in an infrastructure repository, so using the jsonnet `importstr` function can be useful here. All the sensitive information on the certificates will end up in a Kubernetes Secret. - -[embedmd]:# (examples/etcd.jsonnet) -```jsonnet -local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + - (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') + { - _config+:: { - namespace: 'monitoring', - - etcd+:: { - ips: ['127.0.0.1'], - clientCA: importstr 'etcd-client-ca.crt', - clientKey: importstr 'etcd-client.key', - clientCert: importstr 'etcd-client.crt', - serverName: 'etcd.my-cluster.local', - }, - }, -}; - -{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } + -{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } + -{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } + -{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } + -{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } + -{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } + -{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } -``` +> Note that monitoring etcd in minikube is currently not possible because of how etcd is setup. (minikube's etcd binds to 127.0.0.1:2379 only, and within host networking namespace.) ### Customizing Prometheus alerting/recording rules and Grafana dashboards @@ -356,30 +418,7 @@ See [exposing Prometheus/Alertmanager/Grafana](docs/exposing-prometheus-alertman ## Minikube Example -To use an easy to reproduce example, let's take the minikube setup as demonstrated in [prerequisites](#Prerequisites). It is a kubeadm cluster (as we use the kubeadm bootstrapper) and because we would like easy access to our Prometheus, Alertmanager and Grafana UI we want the services to be exposed as NodePort type services: - -> Note that NodePort type services is likely not a good idea for your production use case, it is only used for demonstration purposes here. - -[embedmd]:# (examples/minikube.jsonnet) -```jsonnet -local kp = - (import 'kube-prometheus/kube-prometheus.libsonnet') + - (import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') + - (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') + - { - _config+:: { - namespace: 'monitoring', - }, - }; - -{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } + -{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } + -{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } + -{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } + -{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } + -{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } + -{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } -``` +To use an easy to reproduce example, see [minikube.jsonnet](examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services. ## Troubleshooting @@ -387,7 +426,7 @@ local kp = Should the Prometheus `/targets` page show kubelet targets, but not able to successfully scrape the metrics, then most likely it is a problem with the authentication and authorization setup of the kubelets. -As described in the [prerequisites](#prerequisites) section, in order to retrieve metrics from the kubelet token authentication and authorization must be enabled. Some Kubernetes setup tools do not enable this by default. +As described in the [Prerequisites](#prerequisites) section, in order to retrieve metrics from the kubelet token authentication and authorization must be enabled. Some Kubernetes setup tools do not enable this by default. If you are using Google's GKE product, see [docs/GKE-cadvisor-support.md]. diff --git a/build.sh b/build.sh index 4eaf5ac569b085073f5969c6683b22e84084171a..f68cd4475df21548ce6f7b26eff574929f3043e7 100755 --- a/build.sh +++ b/build.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash + +# This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files. + set -e set -x # only exit with zero if all commands of the pipeline exit successfully diff --git a/docs/monitoring-external-etcd.md b/docs/monitoring-external-etcd.md index bfdc58a3cd8b2eac8b2f5ef13faaab7c606d27fb..1e26af0fdc2347a711311b6d18529a6571223074 100644 --- a/docs/monitoring-external-etcd.md +++ b/docs/monitoring-external-etcd.md @@ -2,119 +2,11 @@ This guide will help you monitor an external etcd cluster. When the etcd cluster is not hosted inside Kubernetes. This is often the case with Kubernetes setups. This approach has been tested with kube-aws but the same principals apply to other tools. -# Step 1 - Make the etcd certificates available to Prometheus pod -Prometheus Operator (and Prometheus) allow us to specify a tlsConfig. This is required as most likely your etcd metrics end points is secure. +Note that [etcd.jsonnet](../examples/etcd.jsonnet) & [kube-prometheus-static-etcd.libsonnet](../jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet) (which are described by a section of the [Readme](../README.md#static-etcd-configuration)) do the following: + * Put the three etcd TLS client files (CA & cert & key) into a secret in the namespace, and have Prometheus Operator load the secret. + * Create the following (to expose etcd metrics - port 2379): a Service, Endpoint, & ServiceMonitor. -## a - Create the secrets in the namespace -Prometheus Operator allows us to mount secrets in the pod. By loading the secrets as files, they can be made available inside the Prometheus pod. - -`kubectl -n monitoring create secret generic etcd-certs --from-file=CREDENTIAL_PATH/etcd-client.pem --from-file=CREDENTIAL_PATH/etcd-client-key.pem --from-file=CREDENTIAL_PATH/ca.pem` - -where CREDENTIAL_PATH is the path to your etcd client credentials on your work machine. -(Kube-aws stores them inside the credential folder). - -## b - Get Prometheus Operator to load the secret -In the previous step we have named the secret 'etcd-certs'. - -Edit prometheus-operator/contrib/kube-prometheus/manifests/prometheus/prometheus-k8s.yaml and add the secret under the spec of the Prometheus object manifest: - -``` - secrets: - - etcd-certs -``` - -The manifest will look like that: -``` -apiVersion: monitoring.coreos.com/v1 -kind: Prometheus -metadata: - name: k8s - labels: - prometheus: k8s -spec: - replicas: 2 - secrets: - - etcd-certs - version: v1.7.1 -``` - -If your Prometheus Operator is already in place, update it: - -`kubectl -n monitoring replace -f contrib/kube-prometheus/manifests/prometheus/prometheus-k8s.yaml - -# Step 2 - Create the Service, endpoints and ServiceMonitor - -The below manifest creates a Service to expose etcd metrics (port 2379) - -* Replace `IP_OF_YOUR_ETCD_NODE_[0/1/2]` with the IP addresses of your etcd nodes. If you have more than one node, add them to the same list. -* Use `#insecureSkipVerify: true` or replace `ETCD_DNS_OR_ALTERNAME_NAME` with a valid name for the certificate. - -In case you have generated the etcd certificated with kube-aws, you will need to use insecureSkipVerify as the valid certificate domain will be different for each etcd node (etcd0, etcd1, etcd2). If you only have one etcd node, you can use the value from `etcd.internalDomainName` speficied in your kube-aws `cluster.yaml` - -In this example we use insecureSkipVerify: true as kube-aws default certificates are not valid against the IP. They were created for the DNS. Depending on your use case, you might want to remove this flag or set it to false. (true required for kube-aws if using default certificate generators method) - -``` -apiVersion: v1 -kind: Service -metadata: - name: etcd-k8s - labels: - k8s-app: etcd -spec: - type: ClusterIP - clusterIP: None - ports: - - name: api - port: 2379 - protocol: TCP ---- -apiVersion: v1 -kind: Endpoints -metadata: - name: etcd-k8s - labels: - k8s-app: etcd -subsets: -- addresses: - - ip: IP_OF_YOUR_ETCD_NODE_0 - nodeName: etcd0 - - ip: IP_OF_YOUR_ETCD_NODE_1 - nodeName: etcd1 - - ip: IP_OF_YOUR_ETCD_NODE_2 - nodeName: etcd2 - ports: - - name: api - port: 2379 - protocol: TCP ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: etcd-k8s - labels: - k8s-app: etcd-k8s -spec: - jobLabel: k8s-app - endpoints: - - port: api - interval: 30s - scheme: https - tlsConfig: - caFile: /etc/prometheus/secrets/etcd-certs/ca.pem - certFile: /etc/prometheus/secrets/etcd-certs/etcd-client.pem - keyFile: /etc/prometheus/secrets/etcd-certs/etcd-client-key.pem - #use insecureSkipVerify only if you cannot use a Subject Alternative Name - #insecureSkipVerify: true - serverName: ETCD_DNS_OR_ALTERNAME_NAME - selector: - matchLabels: - k8s-app: etcd - namespaceSelector: - matchNames: - - monitoring -``` - -# Step 3: Open the port +# Step 1: Open the port You now need to allow the nodes Prometheus are running on to talk to the etcd on the port 2379 (if 2379 is the port used by etcd to expose the metrics) @@ -128,11 +20,11 @@ With kube-aws, each etcd node has two IP addresses: For some reason, some etcd node answer to :2379/metrics on the intance IP (eth0), some others on the EIP|ENI address (eth1). See issue https://github.com/kubernetes-incubator/kube-aws/issues/923 It would be of course much better if we could hit the EPI/ENI all the time as they don't change even if the underlying EC2 intance goes down. -If specifying the Instance IP (eth0) in the Prometheus Operator ServiceMonitor, and the EC2 intance goes down, one would have to update the ServiceMonitor. +If specifying the Instance IP (eth0) in the Prometheus Operator ServiceMonitor, and the EC2 intance goes down, one would have to update the ServiceMonitor. Another idea woud be to use the DNS entries of etcd, but those are not currently supported for EndPoints objects in Kubernetes. -# Step 4: verify +# Step 2: verify Go to the Prometheus UI on :9090/config and check that you have an etcd job entry: ``` @@ -142,9 +34,11 @@ Go to the Prometheus UI on :9090/config and check that you have an etcd job entr ... ``` -On the :9090/targets page, you should see "etcd" with the UP state. If not, check the Error column for more information. +On the :9090/targets page: + * You should see "etcd" with the UP state. If not, check the Error column for more information. + * If no "etcd" targets are even shown on this page, prometheus isn't attempting to scrape it. -# Step 5: Grafana dashboard +# Step 3: Grafana dashboard ## Find a dashboard you like diff --git a/examples/etcd.jsonnet b/examples/etcd.jsonnet index c521d1cdc76220fb69c1827ee7681bdf3e9258d6..e26c957142198c5cf3b4672fd871c6d217af44d0 100644 --- a/examples/etcd.jsonnet +++ b/examples/etcd.jsonnet @@ -3,12 +3,43 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + _config+:: { namespace: 'monitoring', + // Reference info: https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/README.md#static-etcd-configuration etcd+:: { + // Configure this to be the IP(s) to scrape - i.e. your etcd node(s) (use commas to separate multiple values). ips: ['127.0.0.1'], + + // Reference info: + // * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec (has endpoints) + // * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint (has tlsConfig) + // * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig (has: caFile, certFile, keyFile, serverName, & insecureSkipVerify) + + // Set these three variables to the fully qualified directory path on your work machine to the certificate files that are valid to scrape etcd metrics with (check the apiserver container). + // Most likely these certificates are generated somewhere in an infrastructure repository, so using the jsonnet `importstr` function can + // be useful here. (Kube-aws stores these three files inside the credential folder.) + // All the sensitive information on the certificates will end up in a Kubernetes Secret. clientCA: importstr 'etcd-client-ca.crt', clientKey: importstr 'etcd-client.key', clientCert: importstr 'etcd-client.crt', - serverName: 'etcd.my-cluster.local', + + // Note that you should specify a value EITHER for 'serverName' OR for 'insecureSkipVerify'. (Don't specify a value for both of them, and don't specify a value for neither of them.) + // * Specifying serverName: Ideally you should provide a valid value for serverName (and then insecureSkipVerify should be left as false - so that serverName gets used). + // * Specifying insecureSkipVerify: insecureSkipVerify is only to be used (i.e. set to true) if you cannot (based on how your etcd certificates were created) use a Subject Alternative Name. + // * If you specify a value: + // ** for both of these variables: When 'insecureSkipVerify: true' is specified, then also specifying a value for serverName won't hurt anything but it will be ignored. + // ** for neither of these variables: then you'll get authentication errors on the prom '/targets' page with your etcd targets. + + // A valid name (DNS or Subject Alternative Name) that the client (i.e. prometheus) will use to verify the etcd TLS certificate. + // * Note that doing `nslookup etcd.kube-system.svc.cluster.local` (on a pod in a K8s cluster where kube-prometheus has been installed) shows that kube-prometheus sets up this hostname. + // * `openssl x509 -noout -text -in etcd-client.pem` will print the Subject Alternative Names. + serverName: 'etcd.kube-system.svc.cluster.local', + + // When insecureSkipVerify isn't specified, the default value is "false". + //insecureSkipVerify: true, + + // In case you have generated the etcd certificate with kube-aws: + // * If you only have one etcd node, you can use the value from 'etcd.internalDomainName' (specified in your kube-aws cluster.yaml) as the value for 'serverName'. + // * But if you have multiple etcd nodes, you will need to use 'insecureSkipVerify: true' (if using default certificate generators method), as the valid certificate domain + // will be different for each etcd node. (kube-aws default certificates are not valid against the IP - they were created for the DNS.) }, }, }; diff --git a/examples/minikube.jsonnet b/examples/minikube.jsonnet index ed1a05c44d5683a348e4dfd464070f15feac25c2..e14407983ba5723c921171195bd48ecd6e5dfb5a 100644 --- a/examples/minikube.jsonnet +++ b/examples/minikube.jsonnet @@ -1,10 +1,52 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + (import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') + + // Note that NodePort type services is likely not a good idea for your production use case, it is only used for demonstration purposes here. (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') + { _config+:: { namespace: 'monitoring', + alertmanager+:: { + config: importstr 'alertmanager-config.yaml', + }, + grafana+:: { + config: { + sections: { + // Do not require grafana users to login/authenticate + "auth.anonymous": {enabled: true}, + }, + }, + }, + }, + + // For simplicity, each of the following values for 'externalUrl': + // * assume that `minikube ip` prints "192.168.99.100" + // * hard-code the NodePort for each app + prometheus+:: { + prometheus+: { + // Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec + spec+: { + // An e.g. of the purpose of this is so the "Source" links on http://<alert-manager>/#/alerts are valid. + externalUrl: "http://192.168.99.100:30900", + + // Reference info: "external_labels" on https://prometheus.io/docs/prometheus/latest/configuration/configuration/ + externalLabels: { + // This 'cluster' label will be included on every firing prometheus alert. (This is more useful + // when running multiple clusters in a shared environment (e.g. AWS) with other users.) + cluster: "minikube-<INSERT YOUR USERNAME HERE>", + }, + }, + }, + }, + alertmanager+:: { + alertmanager+: { + // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec + spec+: { + externalUrl: "http://192.168.99.100:30903", + + logLevel: "debug", // So firing alerts show up in log + }, + }, }, }; diff --git a/jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet b/jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet index d63b8680d0a9cc5aba2c626cf8e62982f28a5541..573e809c7b3eedc5b65737f5f0f629c8564f82f7 100644 --- a/jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet +++ b/jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet @@ -62,6 +62,7 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; port: 'metrics', interval: '30s', scheme: 'https', + // Prometheus Operator (and Prometheus) allow us to specify a tlsConfig. This is required as most likely your etcd metrics end points is secure. tlsConfig: { caFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client-ca.crt', keyFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.key', @@ -79,8 +80,8 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; }, }, secretEtcdCerts: + // Prometheus Operator allows us to mount secrets in the pod. By loading the secrets as files, they can be made available inside the Prometheus pod. local secret = k.core.v1.secret; - secret.new('kube-etcd-client-certs', { 'etcd-client-ca.crt': std.base64($._config.etcd.clientCA), 'etcd-client.key': std.base64($._config.etcd.clientKey), @@ -89,6 +90,7 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; secret.mixin.metadata.withNamespace($._config.namespace), prometheus+: { + // Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec spec+: { secrets+: [$.prometheus.secretEtcdCerts.metadata.name], },