diff --git a/examples/prometheus-pvc.jsonnet b/examples/prometheus-pvc.jsonnet new file mode 100644 index 0000000000000000000000000000000000000000..75b250fee92332bc383ec11eb334516d29095a8f --- /dev/null +++ b/examples/prometheus-pvc.jsonnet @@ -0,0 +1,59 @@ + +// Reference info: documentation for https://github.com/ksonnet/ksonnet-lib can be found at http://g.bryan.dev.hepti.center +// +local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet +// * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {" +// +local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec') + +local kp = + (import 'kube-prometheus/kube-prometheus.libsonnet') + + (import 'kube-prometheus/kube-prometheus-bootkube.libsonnet') + + { + _config+:: { + namespace: 'monitoring', + }, + + prometheus+:: { + prometheus+: { + spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + // If a value isn't specified for 'retention', then by default the '--storage.tsdb.retention=24h' arg will be passed to prometheus by prometheus-operator. + // The possible values for a prometheus <duration> are: + // * https://github.com/prometheus/common/blob/c7de230/model/time.go#L178 specifies "^([0-9]+)(y|w|d|h|m|s|ms)$" (years weeks days hours minutes seconds milliseconds) + retention: "30d", + + // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md + // By default (if the following 'storage.volumeClaimTemplate' isn't created), prometheus will be created with an EmptyDir for the 'prometheus-k8s-db' volume (for the prom tsdb). + // This 'storage.volumeClaimTemplate' causes the following to be automatically created (via dynamic provisioning) for each prometheus pod: + // * PersistentVolumeClaim (and a corresponding PersistentVolume) + // * the actual volume (per the StorageClassName specified below) + storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec + volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined) + pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new + + pvc.mixin.spec.withAccessModes('ReadWriteOnce') + + + // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#resourcerequirements-v1-core (defines 'requests'), + // and https://kubernetes.io/docs/concepts/policy/resource-quotas/#storage-resource-quota (defines 'requests.storage') + pvc.mixin.spec.resources.withRequests({ storage: '100Gi' }) + + + // A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed. + pvc.mixin.spec.withStorageClassName('ssd'), + + // The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning). + // And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error: + // * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS' + //pvc.mixin.spec.selector.withMatchLabels({}), + }, // storage + }, // spec + }, // prometheus + }, // prometheus + + }; + +{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } + +{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } + +{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } + +{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } + +{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } + +{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }