diff --git a/examples/minikube.jsonnet b/examples/minikube.jsonnet index 850514fdc5d0415044a071f146d967b60bba441b..3073612a4849c849d0e366c366795edbed243c7b 100644 --- a/examples/minikube.jsonnet +++ b/examples/minikube.jsonnet @@ -10,10 +10,10 @@ local kp = config: importstr 'alertmanager-config.yaml', }, grafana+:: { - config: { // http://docs.grafana.org/installation/configuration/ + config: { // http://docs.grafana.org/installation/configuration/ sections: { // Do not require grafana users to login/authenticate - "auth.anonymous": {enabled: true}, + 'auth.anonymous': { enabled: true }, }, }, }, @@ -27,13 +27,13 @@ local kp = // Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec spec+: { // An e.g. of the purpose of this is so the "Source" links on http://<alert-manager>/#/alerts are valid. - externalUrl: "http://192.168.99.100:30900", + externalUrl: 'http://192.168.99.100:30900', // Reference info: "external_labels" on https://prometheus.io/docs/prometheus/latest/configuration/configuration/ externalLabels: { // This 'cluster' label will be included on every firing prometheus alert. (This is more useful // when running multiple clusters in a shared environment (e.g. AWS) with other users.) - cluster: "minikube-<INSERT YOUR USERNAME HERE>", + cluster: 'minikube-<INSERT YOUR USERNAME HERE>', }, }, }, @@ -42,9 +42,9 @@ local kp = alertmanager+: { // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec spec+: { - externalUrl: "http://192.168.99.100:30903", + externalUrl: 'http://192.168.99.100:30903', - logLevel: "debug", // So firing alerts show up in log + logLevel: 'debug', // So firing alerts show up in log }, }, }, diff --git a/examples/prometheus-pvc.jsonnet b/examples/prometheus-pvc.jsonnet index 75b250fee92332bc383ec11eb334516d29095a8f..82716e0f514ec3ff2b76abc2ce45f8646bbe19c1 100644 --- a/examples/prometheus-pvc.jsonnet +++ b/examples/prometheus-pvc.jsonnet @@ -1,10 +1,9 @@ - // Reference info: documentation for https://github.com/ksonnet/ksonnet-lib can be found at http://g.bryan.dev.hepti.center // -local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet +local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet // * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {" // -local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec') +local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec') local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + @@ -16,20 +15,20 @@ local kp = prometheus+:: { prometheus+: { - spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec // If a value isn't specified for 'retention', then by default the '--storage.tsdb.retention=24h' arg will be passed to prometheus by prometheus-operator. // The possible values for a prometheus <duration> are: // * https://github.com/prometheus/common/blob/c7de230/model/time.go#L178 specifies "^([0-9]+)(y|w|d|h|m|s|ms)$" (years weeks days hours minutes seconds milliseconds) - retention: "30d", + retention: '30d', // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md // By default (if the following 'storage.volumeClaimTemplate' isn't created), prometheus will be created with an EmptyDir for the 'prometheus-k8s-db' volume (for the prom tsdb). // This 'storage.volumeClaimTemplate' causes the following to be automatically created (via dynamic provisioning) for each prometheus pod: // * PersistentVolumeClaim (and a corresponding PersistentVolume) // * the actual volume (per the StorageClassName specified below) - storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec - volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined) - pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new + storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec + volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined) + pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new pvc.mixin.spec.withAccessModes('ReadWriteOnce') + @@ -40,14 +39,14 @@ local kp = // A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed. pvc.mixin.spec.withStorageClassName('ssd'), - // The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning). - // And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error: - // * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS' - //pvc.mixin.spec.selector.withMatchLabels({}), - }, // storage - }, // spec - }, // prometheus - }, // prometheus + // The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning). + // And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error: + // * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS' + //pvc.mixin.spec.selector.withMatchLabels({}), + }, // storage + }, // spec + }, // prometheus + }, // prometheus }; diff --git a/jsonnetfile.lock.json b/jsonnetfile.lock.json index 7473392929a569753b3fc31953e66b26d1d2b3e9..195e58e036139aa8b4207b9c60e3f8e7bf21daee 100644 --- a/jsonnetfile.lock.json +++ b/jsonnetfile.lock.json @@ -8,7 +8,7 @@ "subdir": "contrib/kube-prometheus/jsonnet/kube-prometheus" } }, - "version": "6e412f0b4727f3a7a6f097530294409baa6b520a" + "version": "fa0a0ae33a16a23845da8ab9973dd4eed50a20df" }, { "name": "ksonnet", diff --git a/manifests/prometheus-prometheus.yaml b/manifests/prometheus-prometheus.yaml index ae18cd67553bfc8a5191b943f400471c6257faa8..94fd64dc250a0cb3bca49ebf760b30d44314590c 100644 --- a/manifests/prometheus-prometheus.yaml +++ b/manifests/prometheus-prometheus.yaml @@ -25,4 +25,4 @@ spec: serviceAccountName: prometheus-k8s serviceMonitorNamespaceSelector: {} serviceMonitorSelector: {} - version: v2.4.3 + version: v2.5.0 diff --git a/sync-to-internal-registry.jsonnet b/sync-to-internal-registry.jsonnet index f0cf35ae3f2a4e7ac7d38ae927324fb3fa926771..b7c85571f5617cf5320ba6218689281df30a7143 100644 --- a/sync-to-internal-registry.jsonnet +++ b/sync-to-internal-registry.jsonnet @@ -3,20 +3,20 @@ local l = import 'kube-prometheus/lib/lib.libsonnet'; local config = kp._config; local makeImages(config) = [ - { - name: config.imageRepos[image], - tag: config.versions[image], - } - for image in std.objectFields(config.imageRepos) + { + name: config.imageRepos[image], + tag: config.versions[image], + } + for image in std.objectFields(config.imageRepos) ]; local upstreamImage(image) = '%s:%s' % [image.name, image.tag]; local downstreamImage(registry, image) = '%s/%s:%s' % [registry, l.imageName(image.name), image.tag]; local pullPush(image, newRegistry) = [ - 'docker pull %s' % upstreamImage(image), - 'docker tag %s %s' % [upstreamImage(image), downstreamImage(newRegistry, image)], - 'docker push %s' % downstreamImage(newRegistry, image), + 'docker pull %s' % upstreamImage(image), + 'docker tag %s %s' % [upstreamImage(image), downstreamImage(newRegistry, image)], + 'docker push %s' % downstreamImage(newRegistry, image), ]; local images = makeImages(config); @@ -26,5 +26,5 @@ local output(repository) = std.flattenArrays([ for image in images ]); -function(repository="my-registry.com/repository") - std.join('\n', output(repository)) +function(repository='my-registry.com/repository') + std.join('\n', output(repository))