diff --git a/jsonnet/kube-prometheus/versions.json b/jsonnet/kube-prometheus/versions.json index ab5e12285acb9ab0f5583f8f18019933d62f0082..0b02880ea813619feef4a84ccc564a76b454007b 100644 --- a/jsonnet/kube-prometheus/versions.json +++ b/jsonnet/kube-prometheus/versions.json @@ -1,7 +1,7 @@ { "alertmanager": "0.23.0", "blackboxExporter": "0.19.0", - "grafana": "8.3.4", + "grafana": "8.3.6", "kubeStateMetrics": "2.3.0", "nodeExporter": "1.3.1", "prometheus": "2.33.1", diff --git a/jsonnetfile.lock.json b/jsonnetfile.lock.json index 453e5d65d5a815e9fb2b174df67219bc5bbf88f5..86074b85a3666d359ec7b199ea1e68381a026e30 100644 --- a/jsonnetfile.lock.json +++ b/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "grafana" } }, - "version": "e7aeb2be37ea3afcda0dd380553440b394567cd5", - "sum": "IujrtNI+ckmR8isarpBU6yBb2jRtVlytG/qlEOH6BF8=" + "version": "8746a15fdf825da92bbb4d13948adaa1d0e17d5f", + "sum": "OumaoU7uTb3NVvHM5Bvua/NwVgywFBv2yWghh551350=" }, { "source": { @@ -18,7 +18,7 @@ "subdir": "contrib/mixin" } }, - "version": "986a2b51f4e87fe72e3fa2e85394dd659268dfcb", + "version": "20c89df5e5e2d738efb9c276d954d754eb86918b", "sum": "wIsqEIGSqnWwJApdQ7k8x2kd/AsffJhYcqUebDiS01w=" }, { @@ -28,7 +28,7 @@ "subdir": "grafana-mixin" } }, - "version": "3a2e3267ba5be3f9693e865479c7ab7e5858d405", + "version": "9fafbfc87e4d79243265c727eb5dbe737a45c6d6", "sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs=" }, { @@ -48,7 +48,7 @@ "subdir": "grafana-builder" } }, - "version": "627790649fd44e549b2fb84bfb487dc5e53b5477", + "version": "0bd3d19d09d3f83b10860ad93397224d327e2f67", "sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc=" }, { @@ -58,8 +58,8 @@ "subdir": "" } }, - "version": "481475d6dd5f297669e518365c3a1f1b040a1bc8", - "sum": "2mKK1IYF5RsJW22fOQ7QpX5CRFeI0zwdnAl4Q8zQlhw=" + "version": "5f43b3783b3a7f505e3e4e484af0b22653802342", + "sum": "pTtVZP1dZZH8XAQpjk1VS9gcPY/+JVzgwq76a1ClxmQ=" }, { "source": { @@ -68,7 +68,7 @@ "subdir": "lib/promgrafonnet" } }, - "version": "481475d6dd5f297669e518365c3a1f1b040a1bc8", + "version": "5f43b3783b3a7f505e3e4e484af0b22653802342", "sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps=" }, { @@ -78,7 +78,7 @@ "subdir": "jsonnet/kube-state-metrics" } }, - "version": "34a1398f1c341697f894d07aed18ddcdd708d507", + "version": "60906c6a2501a1784aeaf0dc9c5ace9bad4b22ec", "sum": "P0dCnbzyPScQGNXwXRcwiPkMLeTq0IPNbSTysDbySnM=" }, { @@ -88,7 +88,7 @@ "subdir": "jsonnet/kube-state-metrics-mixin" } }, - "version": "34a1398f1c341697f894d07aed18ddcdd708d507", + "version": "60906c6a2501a1784aeaf0dc9c5ace9bad4b22ec", "sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk=" }, { @@ -98,7 +98,7 @@ "subdir": "jsonnet/mixin" } }, - "version": "023feeca9d01be2fb114e0a6b7ffa67a5928de08", + "version": "2809ca11242cb6fdc107257a8e0a77ea37055810", "sum": "qZ4WgiweaE6eeKtFK60QUjLO8sf2L9Q8fgafWvDcyfY=", "name": "prometheus-operator-mixin" }, @@ -109,8 +109,8 @@ "subdir": "jsonnet/prometheus-operator" } }, - "version": "023feeca9d01be2fb114e0a6b7ffa67a5928de08", - "sum": "d/Plm5fiKe4cg4noh0USHvZAuaxxjiCtWjFKq32t1IQ=" + "version": "2809ca11242cb6fdc107257a8e0a77ea37055810", + "sum": "Ug6EsSTZhLtGuW4BwLHSGlkBpUgUprRr6m6M+UjVDqM=" }, { "source": { @@ -140,7 +140,7 @@ "subdir": "documentation/prometheus-mixin" } }, - "version": "277bf93952b56227cb750a8129197efa489eddde", + "version": "579331446a04503b2b1515767e7ceaceab2c6342", "sum": "ZjQoYhvgKwJNkg+h+m9lW3SYjnjv5Yx5btEipLhru88=", "name": "prometheus" }, @@ -151,7 +151,7 @@ "subdir": "mixin" } }, - "version": "6e2c00af766d8d76e22f8465e4b789205d30b086", + "version": "b2c1ff02b3920f183ef8ef1626f1e8f17e10f229", "sum": "mVcs4XmsyVdy1Z96vIP+SsSQYK/Yi1yEEsDb6K1z44Q=", "name": "thanos-mixin" }, diff --git a/manifests/grafana-config.yaml b/manifests/grafana-config.yaml index 60f45e614e1450e546424dde7a5066b1ac7263e5..b8e00eb653d4a7e9ba54cd87e5fc85e82756900c 100644 --- a/manifests/grafana-config.yaml +++ b/manifests/grafana-config.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-config namespace: monitoring stringData: diff --git a/manifests/grafana-dashboardDatasources.yaml b/manifests/grafana-dashboardDatasources.yaml index 1b42d5b5d20ccde9382f87fcb2e8d60213925497..641551afdc6f9361e14bbb311fd3a84768d52fde 100644 --- a/manifests/grafana-dashboardDatasources.yaml +++ b/manifests/grafana-dashboardDatasources.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-datasources namespace: monitoring stringData: diff --git a/manifests/grafana-dashboardDefinitions.yaml b/manifests/grafana-dashboardDefinitions.yaml index 362a43ed767d9de8d415891c588c7046ffacc788..ad567aced69bb6e705371f99fa682c6d80db9137 100644 --- a/manifests/grafana-dashboardDefinitions.yaml +++ b/manifests/grafana-dashboardDefinitions.yaml @@ -596,7 +596,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-alertmanager-overview namespace: monitoring - apiVersion: v1 @@ -2357,7 +2357,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-apiserver namespace: monitoring - apiVersion: v1 @@ -4228,7 +4228,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-cluster-total namespace: monitoring - apiVersion: v1 @@ -5407,7 +5407,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-controller-manager namespace: monitoring - apiVersion: v1 @@ -6032,7 +6032,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-grafana-overview namespace: monitoring - apiVersion: v1 @@ -9109,7 +9109,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-cluster namespace: monitoring - apiVersion: v1 @@ -11895,7 +11895,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-namespace namespace: monitoring - apiVersion: v1 @@ -12910,7 +12910,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-node namespace: monitoring - apiVersion: v1 @@ -15368,7 +15368,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-pod namespace: monitoring - apiVersion: v1 @@ -17381,7 +17381,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-workload namespace: monitoring - apiVersion: v1 @@ -19559,7 +19559,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-k8s-resources-workloads-namespace namespace: monitoring - apiVersion: v1 @@ -21802,7 +21802,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-kubelet namespace: monitoring - apiVersion: v1 @@ -23255,7 +23255,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-namespace-by-pod namespace: monitoring - apiVersion: v1 @@ -24980,7 +24980,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-namespace-by-workload namespace: monitoring - apiVersion: v1 @@ -26032,7 +26032,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-node-cluster-rsrc-use namespace: monitoring - apiVersion: v1 @@ -27110,7 +27110,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-node-rsrc-use namespace: monitoring - apiVersion: v1 @@ -28090,7 +28090,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-nodes namespace: monitoring - apiVersion: v1 @@ -28666,7 +28666,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-persistentvolumesusage namespace: monitoring - apiVersion: v1 @@ -29883,7 +29883,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-pod-total namespace: monitoring - apiVersion: v1 @@ -31542,7 +31542,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-prometheus-remote-write namespace: monitoring - apiVersion: v1 @@ -32766,7 +32766,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-prometheus namespace: monitoring - apiVersion: v1 @@ -34026,7 +34026,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-proxy namespace: monitoring - apiVersion: v1 @@ -35127,7 +35127,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-scheduler namespace: monitoring - apiVersion: v1 @@ -36554,7 +36554,7 @@ items: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboard-workload-total namespace: monitoring kind: ConfigMapList diff --git a/manifests/grafana-dashboardSources.yaml b/manifests/grafana-dashboardSources.yaml index a37c549bb98ae1f57969c88d765da4eb1fb3378c..3644a10ecba7a528ae60a7e2d61bdae606aecfa2 100644 --- a/manifests/grafana-dashboardSources.yaml +++ b/manifests/grafana-dashboardSources.yaml @@ -22,6 +22,6 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana-dashboards namespace: monitoring diff --git a/manifests/grafana-deployment.yaml b/manifests/grafana-deployment.yaml index 69eb37a6d7bd3d210075fc4d613dd793e73911dc..0a1bbbd20713ce41122a180c17767d77033e1c6f 100644 --- a/manifests/grafana-deployment.yaml +++ b/manifests/grafana-deployment.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana namespace: monitoring spec: @@ -18,19 +18,19 @@ spec: template: metadata: annotations: - checksum/grafana-config: da579b2ee81dc98ffbcc068a3422822a - checksum/grafana-dashboardproviders: bc79f12017c019002ed650d44571a465 - checksum/grafana-datasources: c921684c6242ca2a8564803a9ae21504 + checksum/grafana-config: 10f45bde8a653540083f689579259f03 + checksum/grafana-dashboardproviders: c7291d0bfef540b2eb119cd1d0f87807 + checksum/grafana-datasources: 7a6fc0f594f9b6a37712c5dc803adc2b labels: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 spec: automountServiceAccountToken: false containers: - env: [] - image: grafana/grafana:8.3.4 + image: grafana/grafana:8.3.6 name: grafana ports: - containerPort: 3000 diff --git a/manifests/grafana-prometheusRule.yaml b/manifests/grafana-prometheusRule.yaml index 5945a5330134c5cfd45b84060ebde34726cbebf1..4622565d860c12d69b0ae920fddfe4303cf4fddc 100644 --- a/manifests/grafana-prometheusRule.yaml +++ b/manifests/grafana-prometheusRule.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 prometheus: k8s role: alert-rules name: grafana-rules diff --git a/manifests/grafana-service.yaml b/manifests/grafana-service.yaml index daaebb070e1204222c7637d36fcd41e06038a882..e3fe778972d293ed0b6bdbe364425da187b5bc16 100644 --- a/manifests/grafana-service.yaml +++ b/manifests/grafana-service.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana namespace: monitoring spec: diff --git a/manifests/grafana-serviceAccount.yaml b/manifests/grafana-serviceAccount.yaml index 2972098a1e04d30da5e752b1d595c69a449c15c2..8653ac63ab6f8cb488bc4a4b61e24462b4422f39 100644 --- a/manifests/grafana-serviceAccount.yaml +++ b/manifests/grafana-serviceAccount.yaml @@ -6,6 +6,6 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana namespace: monitoring diff --git a/manifests/grafana-serviceMonitor.yaml b/manifests/grafana-serviceMonitor.yaml index 369aec6b50432abc7c68703d6a3292f328036e6e..8ef1f28fc412e63e2285598e328c3da0b10a1617 100644 --- a/manifests/grafana-serviceMonitor.yaml +++ b/manifests/grafana-serviceMonitor.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana app.kubernetes.io/part-of: kube-prometheus - app.kubernetes.io/version: 8.3.4 + app.kubernetes.io/version: 8.3.6 name: grafana namespace: monitoring spec: diff --git a/manifests/kubernetesControlPlane-prometheusRule.yaml b/manifests/kubernetesControlPlane-prometheusRule.yaml index 214e78ae4fd8cbb9d426af254d5ea443da456cff..42977a8540113734e904b3468355c094f1c383e3 100644 --- a/manifests/kubernetesControlPlane-prometheusRule.yaml +++ b/manifests/kubernetesControlPlane-prometheusRule.yaml @@ -758,18 +758,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1d])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1d])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1d])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1d])) ) ) + @@ -785,18 +785,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1h])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1h])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1h])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1h])) ) ) + @@ -812,18 +812,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[2h])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[2h])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[2h])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[2h])) ) ) + @@ -839,18 +839,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[30m])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[30m])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[30m])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[30m])) ) ) + @@ -866,18 +866,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[3d])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[3d])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[3d])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[3d])) ) ) + @@ -893,18 +893,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[5m])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[5m])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[5m])) ) ) + @@ -920,18 +920,18 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[6h])) - ( ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[6h])) or vector(0) ) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[6h])) + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="30"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[6h])) ) ) + @@ -947,9 +947,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1d])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1d])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) @@ -963,9 +963,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1h])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1h])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) @@ -979,9 +979,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[2h])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[2h])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) @@ -995,9 +995,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[30m])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[30m])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) @@ -1011,9 +1011,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[3d])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[3d])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) @@ -1027,9 +1027,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[5m])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) @@ -1043,9 +1043,9 @@ spec: ( ( # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[6h])) - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[6h])) ) + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) @@ -1058,32 +1058,17 @@ spec: - name: kube-apiserver-histogram.rules rules: - expr: | - histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0 labels: quantile: "0.99" verb: read record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - expr: | - histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0 labels: quantile: "0.99" verb: write record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.99" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.9" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.5" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - interval: 3m name: kube-apiserver-availability.rules rules: