Skip to content
Snippets Groups Projects
Commit dda5b0cc authored by Fabian Reinartz's avatar Fabian Reinartz Committed by GitHub
Browse files

Merge pull request #16 from brancz/default-resources

Update manifests to latest version of Prometheus Operator
parents 9f7b2173 cbf86730
Branches
Tags
No related merge requests found
Showing
with 93 additions and 28 deletions
......@@ -43,10 +43,6 @@ To tear it all down again, run:
hack/cluster-monitoring/teardown
```
> All services in the manifest still contain the `prometheus.io/scrape = true`
> annotations. It is not used by the Prometheus Operator. They remain for
> pre Prometheus v1.3.0 deployments as in [this example configuration](https://github.com/prometheus/prometheus/blob/6703404cb431f57ca4c5097bc2762438d3c1968e/documentation/examples/prometheus-kubernetes.yml).
## Monitoring custom services
The example manifests in [/manifests/examples/example-app](/manifests/examples/example-app)
......
......@@ -65,4 +65,4 @@ scrape_configs:
regex: "kube-(.*)-prometheus-discovery"
- action: keep
source_labels: [__meta_kubernetes_endpoint_port_name]
regex: "prometheus"
regex: "prometheus.*"
apiVersion: v1
kind: ConfigMap
metadata:
name: alertmanager-main
data:
alertmanager.yaml: |-
global:
resolve_timeout: 5m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'webhook'
receivers:
- name: 'webhook'
webhook_configs:
- url: 'http://alertmanagerwh:30500/'
apiVersion: v1
kind: Service
metadata:
name: alertmanager-main
spec:
type: NodePort
ports:
- name: web
nodePort: 30903
port: 9093
protocol: TCP
targetPort: web
selector:
alertmanager: alertmanager-main
apiVersion: "monitoring.coreos.com/v1alpha1"
kind: "Alertmanager"
metadata:
name: "alertmanager-main"
labels:
alertmanager: "main"
spec:
replicas: 3
version: v0.5.1
......@@ -5,8 +5,6 @@ metadata:
labels:
app: etcd
etcd: k8s
annotations:
prometheus.io/scrape: 'true'
spec:
type: ClusterIP
clusterIP: None
......
......@@ -5,8 +5,6 @@ metadata:
labels:
app: etcd
etcd: k8s
annotations:
prometheus.io/scrape: 'true'
spec:
type: ClusterIP
clusterIP: None
......
......@@ -4,8 +4,6 @@ metadata:
name: example-app
labels:
tier: frontend
annotations:
prometheus.io/scrape: 'true'
spec:
selector:
app: example-app
......
......@@ -6,8 +6,20 @@ metadata:
labels:
prometheus: frontend
spec:
version: v1.3.0
version: v1.4.1
serviceMonitors:
- selector:
matchLabels:
tier: frontend
resources:
requests:
# 2Gi is default, but won't schedule if you don't have a node with >2Gi
# memory. Modify based on your target and time-series count for
# production use. This value is mainly meant for demonstration/testing
# purposes.
memory: 400Mi
alerting:
alertmanagers:
- namespace: monitoring
name: alertmanager-main
port: web
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: 'true'
labels:
app: kube-state-metrics
name: kube-state-metrics
......
......@@ -12,7 +12,7 @@ spec:
hostNetwork: true
hostPID: true
containers:
- image: quay.io/prometheus/node-exporter:0.12.0
- image: quay.io/prometheus/node-exporter:0.13.0
args:
- "-collector.procfs=/host/proc"
- "-collector.sysfs=/host/sys"
......
......@@ -3,8 +3,6 @@ kind: Service
metadata:
labels:
app: node-exporter
annotations:
prometheus.io/scrape: 'true'
name: node-exporter
spec:
type: ClusterIP
......
......@@ -4,8 +4,6 @@ metadata:
name: grafana
labels:
app: grafana
annotations:
prometheus.io/scrape: 'true'
spec:
type: NodePort
ports:
......
......@@ -4,8 +4,6 @@ metadata:
name: kube-controller-manager-prometheus-discovery
labels:
k8s-app: kube-controller-manager
annotations:
prometheus.io/scrape: 'true'
spec:
selector:
k8s-app: kube-controller-manager
......
apiVersion: v1
kind: Service
metadata:
name: kube-dns-prometheus-discovery
labels:
k8s-app: kube-dns
spec:
selector:
k8s-app: kube-dns
type: ClusterIP
clusterIP: None
ports:
- name: prometheus-skydns
port: 10055
targetPort: 10055
protocol: TCP
- name: prometheus-dnsmasq
port: 10054
targetPort: 10054
protocol: TCP
......@@ -4,8 +4,6 @@ metadata:
name: kube-scheduler-prometheus-discovery
labels:
k8s-app: kube-scheduler
annotations:
prometheus.io/scrape: 'true'
spec:
selector:
k8s-app: kube-scheduler
......
......@@ -68,7 +68,7 @@ data:
regex: "kube-(.*)-prometheus-discovery"
- action: keep
source_labels: [__meta_kubernetes_endpoint_port_name]
regex: "prometheus"
regex: "prometheus.*"
kind: ConfigMap
metadata:
creationTimestamp: null
......
......@@ -5,4 +5,16 @@ metadata:
labels:
prometheus: k8s
spec:
version: v1.3.0
version: v1.4.1
resources:
requests:
# 2Gi is default, but won't schedule if you don't have a node with >2Gi
# memory. Modify based on your target and time-series count for
# production use. This value is mainly meant for demonstration/testing
# purposes.
memory: 400Mi
alerting:
alertmanagers:
- namespace: monitoring
name: alertmanager-main
port: web
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment