diff --git a/cluster-autoscaler/apis/go.mod b/cluster-autoscaler/apis/go.mod
index 296387f9fa7ada2c7719b352159153ea56253853..14b2b4565d529959eaa6dedda61312d1b0cfad29 100644
--- a/cluster-autoscaler/apis/go.mod
+++ b/cluster-autoscaler/apis/go.mod
@@ -5,9 +5,9 @@ go 1.22.0
 require (
 	github.com/onsi/ginkgo/v2 v2.16.0
 	github.com/onsi/gomega v1.31.1
-	k8s.io/apimachinery v0.30.5
-	k8s.io/client-go v0.30.5
-	k8s.io/code-generator v0.30.5
+	k8s.io/apimachinery v0.30.11
+	k8s.io/client-go v0.30.11
+	k8s.io/code-generator v0.30.11
 	sigs.k8s.io/structured-merge-diff/v4 v4.4.1
 )
 
@@ -48,7 +48,7 @@ require (
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
-	k8s.io/api v0.30.5 // indirect
+	k8s.io/api v0.30.11 // indirect
 	k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
 	k8s.io/klog/v2 v2.120.1 // indirect
 	k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
diff --git a/cluster-autoscaler/apis/go.sum b/cluster-autoscaler/apis/go.sum
index a7c0f94c8f4e3c77a1618d42673c9fe231b1bb10..63da96097abce782443365032f80fd5dc5190b69 100644
--- a/cluster-autoscaler/apis/go.sum
+++ b/cluster-autoscaler/apis/go.sum
@@ -144,14 +144,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.30.5 h1:Coz05sfEVywzGcA96AJPUfs2B8LBMnh+IIsM+HCfaz8=
-k8s.io/api v0.30.5/go.mod h1:HfNBGFvq9iNK8dmTKjYIdAtMxu8BXTb9c1SJyO6QjKs=
-k8s.io/apimachinery v0.30.5 h1:CQZO19GFgw4zcOjY2H+mJ3k1u1o7zFACTNCB7nu4O18=
-k8s.io/apimachinery v0.30.5/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.5 h1:vEDSzfTz0F8TXcWVdXl+aqV7NAV8M3UvC2qnGTTCoKw=
-k8s.io/client-go v0.30.5/go.mod h1:/q5fHHBmhAUesOOFJACpD7VJ4e57rVtTPDOsvXrPpMk=
-k8s.io/code-generator v0.30.5 h1:vxarF9JPSQIYAzNAqRELLCgLlJcLRiHLRuAuareRCmA=
-k8s.io/code-generator v0.30.5/go.mod h1:UM9d4mXhX3SSURiNfVI4ib5s2SZyIgF1+x2MOgzPNog=
+k8s.io/api v0.30.11 h1:TpkiTTxQ6GSwHnqKOPeQRRFcBknTjOBwFYjWmn25Z1U=
+k8s.io/api v0.30.11/go.mod h1:DZzjCDcat14fMx/4Fm3h5lsbVStfHmgNzNDMy7JQMqU=
+k8s.io/apimachinery v0.30.11 h1:+qV/yXI2R7BxX1zeyELDFb0PopX22znfq5w+icav49k=
+k8s.io/apimachinery v0.30.11/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/client-go v0.30.11 h1:yamC5zf/g5ztZO3SELklaOSZKTOAL3Q0v0i6GBvq+Mg=
+k8s.io/client-go v0.30.11/go.mod h1:umPRna4oj2zLU03T1m7Cla+yMzRFyhuR+jAbDZNDqlM=
+k8s.io/code-generator v0.30.11 h1:+GZ9pVaVmGq3E6AwoThcQtEEkoP3YmVS1FZsQL8chag=
+k8s.io/code-generator v0.30.11/go.mod h1:lGTwJxvisDkV6a6F9zOa83BWhj7DtFoURd4TedQOo8s=
 k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
 k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
 k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod
index 1c5d8a48e60af5c5108061000607a8b6175a39ef..a6a5ba5e38ef73d48415bf1f4940d3c4a849d464 100644
--- a/cluster-autoscaler/go.mod
+++ b/cluster-autoscaler/go.mod
@@ -40,18 +40,18 @@ require (
 	google.golang.org/protobuf v1.33.0
 	gopkg.in/gcfg.v1 v1.2.3
 	gopkg.in/yaml.v2 v2.4.0
-	k8s.io/api v0.30.5
-	k8s.io/apimachinery v0.30.5
-	k8s.io/apiserver v0.30.5
+	k8s.io/api v0.30.11
+	k8s.io/apimachinery v0.30.11
+	k8s.io/apiserver v0.30.11
 	k8s.io/autoscaler/cluster-autoscaler/apis v0.0.0-00010101000000-000000000000
-	k8s.io/client-go v0.30.5
-	k8s.io/cloud-provider v0.30.5
+	k8s.io/client-go v0.30.11
+	k8s.io/cloud-provider v0.30.11
 	k8s.io/cloud-provider-aws v1.27.0
-	k8s.io/component-base v0.30.5
-	k8s.io/component-helpers v0.30.5
+	k8s.io/component-base v0.30.11
+	k8s.io/component-helpers v0.30.11
 	k8s.io/klog/v2 v2.120.1
-	k8s.io/kubelet v0.30.5
-	k8s.io/kubernetes v1.30.5
+	k8s.io/kubelet v0.30.11
+	k8s.io/kubernetes v1.30.11
 	k8s.io/legacy-cloud-providers v0.0.0
 	k8s.io/utils v0.0.0-20231127182322-b307cd553661
 	sigs.k8s.io/cloud-provider-azure v1.29.4
@@ -121,7 +121,7 @@ require (
 	github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
-	github.com/google/cadvisor v0.49.0 // indirect
+	github.com/google/cadvisor v0.49.2 // indirect
 	github.com/google/cel-go v0.17.8 // indirect
 	github.com/google/gnostic-models v0.6.8 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
@@ -202,13 +202,13 @@ require (
 	gopkg.in/warnings.v0 v0.1.2 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 	k8s.io/apiextensions-apiserver v0.0.0 // indirect
-	k8s.io/code-generator v0.30.5 // indirect
-	k8s.io/controller-manager v0.30.5 // indirect
-	k8s.io/cri-api v0.30.5 // indirect
+	k8s.io/code-generator v0.30.11 // indirect
+	k8s.io/controller-manager v0.30.11 // indirect
+	k8s.io/cri-api v0.30.11 // indirect
 	k8s.io/csi-translation-lib v0.27.0 // indirect
 	k8s.io/dynamic-resource-allocation v0.0.0 // indirect
 	k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
-	k8s.io/kms v0.30.5 // indirect
+	k8s.io/kms v0.30.11 // indirect
 	k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
 	k8s.io/kube-scheduler v0.0.0 // indirect
 	k8s.io/kubectl v0.28.0 // indirect
@@ -225,64 +225,64 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0
 
 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
 
-replace k8s.io/api => k8s.io/api v0.30.5
+replace k8s.io/api => k8s.io/api v0.30.11
 
-replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.5
+replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.11
 
-replace k8s.io/apimachinery => k8s.io/apimachinery v0.30.5
+replace k8s.io/apimachinery => k8s.io/apimachinery v0.30.11
 
-replace k8s.io/apiserver => k8s.io/apiserver v0.30.5
+replace k8s.io/apiserver => k8s.io/apiserver v0.30.11
 
-replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.5
+replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.11
 
-replace k8s.io/client-go => k8s.io/client-go v0.30.5
+replace k8s.io/client-go => k8s.io/client-go v0.30.11
 
-replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.5
+replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.11
 
-replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.5
+replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.11
 
-replace k8s.io/code-generator => k8s.io/code-generator v0.30.5
+replace k8s.io/code-generator => k8s.io/code-generator v0.30.11
 
-replace k8s.io/component-base => k8s.io/component-base v0.30.5
+replace k8s.io/component-base => k8s.io/component-base v0.30.11
 
-replace k8s.io/component-helpers => k8s.io/component-helpers v0.30.5
+replace k8s.io/component-helpers => k8s.io/component-helpers v0.30.11
 
-replace k8s.io/controller-manager => k8s.io/controller-manager v0.30.5
+replace k8s.io/controller-manager => k8s.io/controller-manager v0.30.11
 
-replace k8s.io/cri-api => k8s.io/cri-api v0.30.5
+replace k8s.io/cri-api => k8s.io/cri-api v0.30.11
 
-replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.5
+replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.11
 
-replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.5
+replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.11
 
-replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.5
+replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.11
 
-replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.5
+replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.11
 
-replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.5
+replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.11
 
-replace k8s.io/kubectl => k8s.io/kubectl v0.30.5
+replace k8s.io/kubectl => k8s.io/kubectl v0.30.11
 
-replace k8s.io/kubelet => k8s.io/kubelet v0.30.5
+replace k8s.io/kubelet => k8s.io/kubelet v0.30.11
 
-replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.5
+replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.11
 
-replace k8s.io/metrics => k8s.io/metrics v0.30.5
+replace k8s.io/metrics => k8s.io/metrics v0.30.11
 
-replace k8s.io/mount-utils => k8s.io/mount-utils v0.30.5
+replace k8s.io/mount-utils => k8s.io/mount-utils v0.30.11
 
-replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.5
+replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.11
 
-replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.30.5
+replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.30.11
 
-replace k8s.io/sample-controller => k8s.io/sample-controller v0.30.5
+replace k8s.io/sample-controller => k8s.io/sample-controller v0.30.11
 
-replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.5
+replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.11
 
-replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.5
+replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.11
 
-replace k8s.io/kms => k8s.io/kms v0.30.5
+replace k8s.io/kms => k8s.io/kms v0.30.11
 
-replace k8s.io/endpointslice => k8s.io/endpointslice v0.30.5
+replace k8s.io/endpointslice => k8s.io/endpointslice v0.30.11
 
 replace k8s.io/autoscaler/cluster-autoscaler/apis => ./apis
diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum
index 7bae8855c0fffe5d90c341ad0f9f6c52eecdc72b..5545701676c799b096b1a207018afada7159f416 100644
--- a/cluster-autoscaler/go.sum
+++ b/cluster-autoscaler/go.sum
@@ -339,8 +339,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
 github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cadvisor v0.49.0 h1:1PYeiORXmcFYi609M4Qvq5IzcvcVaWgYxDt78uH8jYA=
-github.com/google/cadvisor v0.49.0/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk=
+github.com/google/cadvisor v0.49.2 h1:6RGbLCA9sBq9EMqce+M1gefBibz4pQpwX/kODwC+FqM=
+github.com/google/cadvisor v0.49.2/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk=
 github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
 github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
 github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@@ -1130,55 +1130,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.30.5 h1:Coz05sfEVywzGcA96AJPUfs2B8LBMnh+IIsM+HCfaz8=
-k8s.io/api v0.30.5/go.mod h1:HfNBGFvq9iNK8dmTKjYIdAtMxu8BXTb9c1SJyO6QjKs=
-k8s.io/apiextensions-apiserver v0.30.5 h1:JfXTIyzXf5+ryncbp7T/uaVjLdvkwtqoNG2vo7S2a6M=
-k8s.io/apiextensions-apiserver v0.30.5/go.mod h1:uVLEME2UPA6UN22i+jTu66B9/0CnsjlHkId+Awo0lvs=
-k8s.io/apimachinery v0.30.5 h1:CQZO19GFgw4zcOjY2H+mJ3k1u1o7zFACTNCB7nu4O18=
-k8s.io/apimachinery v0.30.5/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/apiserver v0.30.5 h1:roo3cfvUS7zvI6u+bY35Xv3rSDXbY9dwl1gN+rxx0S4=
-k8s.io/apiserver v0.30.5/go.mod h1:p5UqIn1WPdOFo7uO/ZUdX464hHZy1DP384znr7FOIXA=
-k8s.io/client-go v0.30.5 h1:vEDSzfTz0F8TXcWVdXl+aqV7NAV8M3UvC2qnGTTCoKw=
-k8s.io/client-go v0.30.5/go.mod h1:/q5fHHBmhAUesOOFJACpD7VJ4e57rVtTPDOsvXrPpMk=
-k8s.io/cloud-provider v0.30.5 h1:rkJvDNMZUNoRoRQNQ9iaW7AqayXQT2Fa65JRBjfQBf8=
-k8s.io/cloud-provider v0.30.5/go.mod h1:bKNgnDb9iSw2ymV/wFAgTG9jgpvRiF/2LM/KqmmWycQ=
+k8s.io/api v0.30.11 h1:TpkiTTxQ6GSwHnqKOPeQRRFcBknTjOBwFYjWmn25Z1U=
+k8s.io/api v0.30.11/go.mod h1:DZzjCDcat14fMx/4Fm3h5lsbVStfHmgNzNDMy7JQMqU=
+k8s.io/apiextensions-apiserver v0.30.11 h1:r0boYooz99DF6wn+myaK92I6oe80DmWSv8gueP+mcWc=
+k8s.io/apiextensions-apiserver v0.30.11/go.mod h1:d8Fje2TL9Oc6F48Y84ZlSCP0KQOOM5P6AYoK38WLeCE=
+k8s.io/apimachinery v0.30.11 h1:+qV/yXI2R7BxX1zeyELDFb0PopX22znfq5w+icav49k=
+k8s.io/apimachinery v0.30.11/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/apiserver v0.30.11 h1:FpNjcxDPIWKL1T6hSG+FBn43GvwnjD10IGATTickA4w=
+k8s.io/apiserver v0.30.11/go.mod h1:gYexbWfBUQvIc0V5ieOGaUcJa2euK4J8jpvamwgadlo=
+k8s.io/client-go v0.30.11 h1:yamC5zf/g5ztZO3SELklaOSZKTOAL3Q0v0i6GBvq+Mg=
+k8s.io/client-go v0.30.11/go.mod h1:umPRna4oj2zLU03T1m7Cla+yMzRFyhuR+jAbDZNDqlM=
+k8s.io/cloud-provider v0.30.11 h1:8oilVoHcTNQK8CF90MPNlr80DOyv7Y3P1P2BzGyu5D4=
+k8s.io/cloud-provider v0.30.11/go.mod h1:/fWjPF6tFm2nvIjVaUM3P36OGfRnnrNi2bujFjoqugk=
 k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA=
 k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA=
-k8s.io/code-generator v0.30.5 h1:vxarF9JPSQIYAzNAqRELLCgLlJcLRiHLRuAuareRCmA=
-k8s.io/code-generator v0.30.5/go.mod h1:UM9d4mXhX3SSURiNfVI4ib5s2SZyIgF1+x2MOgzPNog=
-k8s.io/component-base v0.30.5 h1:O6W8GfdBuyctVy7lu7I0yo8kB6bYgzGzjCyaagb2BR0=
-k8s.io/component-base v0.30.5/go.mod h1:eliJtfE7RG18UHMWrqPQWodf1GnQVFGA6McNOHYi11g=
-k8s.io/component-helpers v0.30.5 h1:jXrCpym8Ed+FyhqO/djlC0YHmiiV+ZlLcQhl1GXprD0=
-k8s.io/component-helpers v0.30.5/go.mod h1:zNV3WR4ZUfFFDRpv5Gs2a+Ey3LtZ8ilFiKqxF90dLG4=
-k8s.io/controller-manager v0.30.5 h1:aBgRd2hDJ25x6W86FFj/DI27uQQD757OZUgdFjrzzqM=
-k8s.io/controller-manager v0.30.5/go.mod h1:CoCm8RhJrZCue2pYHhc/eZ72ogPPZEhDHJV/FWxOaU4=
-k8s.io/cri-api v0.30.5 h1:FRwv3mhbSy3t/kF1FAvDTK/fm8cRTxdnkFLIOx6rx/s=
-k8s.io/cri-api v0.30.5/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg=
-k8s.io/csi-translation-lib v0.30.5 h1:QKtVWzqGBLfJhjHUCb4lnUiuYSjYFPfP+VtjUQ+wF0I=
-k8s.io/csi-translation-lib v0.30.5/go.mod h1:/P9f7OKEvHTnk0Fs8kAotEWPXkZA9skLFda8oeCU3u8=
-k8s.io/dynamic-resource-allocation v0.30.5 h1:Y5sDEG0aUwFng3Et8HRSxNsridE6asiFrLtCnho7IBk=
-k8s.io/dynamic-resource-allocation v0.30.5/go.mod h1:7ajxDRt/4bYxQ/H6hfy9uBRyl6YEyLLbL3Au1P3WHXU=
+k8s.io/code-generator v0.30.11 h1:+GZ9pVaVmGq3E6AwoThcQtEEkoP3YmVS1FZsQL8chag=
+k8s.io/code-generator v0.30.11/go.mod h1:lGTwJxvisDkV6a6F9zOa83BWhj7DtFoURd4TedQOo8s=
+k8s.io/component-base v0.30.11 h1:y0GDnKad+VFWKlfWqnu3oHKL5eRQ3AC8xBNwLGnMdyc=
+k8s.io/component-base v0.30.11/go.mod h1:JSQ3NPwvwGijXtApFqBYtsBitdbfXnwwLJkWuttRMV0=
+k8s.io/component-helpers v0.30.11 h1:HE1e73oHh1Qxw6rhHPWkhvMMWMvUOr3K6/BYyxUMGv0=
+k8s.io/component-helpers v0.30.11/go.mod h1:aSDI6Pd8Q442AwJHAOlrbeE55U8t5RNBILdAok4L/uY=
+k8s.io/controller-manager v0.30.11 h1:zABf3o41nd8L5Lxp1YjhQBhXucKgpFYJ7CBPoE5vKks=
+k8s.io/controller-manager v0.30.11/go.mod h1:PJ7eibyG6pE5xzlPMtITuUEdclHIMIG02KUFIB0vJ+8=
+k8s.io/cri-api v0.30.11 h1:zv8FysvmBJpWqnN0WWd50GIWvBEoETCERfev65/pWg0=
+k8s.io/cri-api v0.30.11/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg=
+k8s.io/csi-translation-lib v0.30.11 h1:BkXKrXthnV19PzbWCI+li3aMsZ8xCxVZD/r8P+1ZPUg=
+k8s.io/csi-translation-lib v0.30.11/go.mod h1:lzHf1yf50o9clbqEtUJdgUMRj2DQxAoNVPjORwRl6xk=
+k8s.io/dynamic-resource-allocation v0.30.11 h1:HGY/QBWTf4tIxRVh5hJbJQm4ONkq9NIYnwFsvx/R70w=
+k8s.io/dynamic-resource-allocation v0.30.11/go.mod h1:JjUQ6SVHGIt/wwWqOAgiy0ku+uFrLBIhaNn8QBl0nLw=
 k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
 k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
 k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
 k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
 k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.30.5 h1:hv7tnGDrxbgpedBhBfGhSu9+v5aW5uhG6w36fBIK1K8=
-k8s.io/kms v0.30.5/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
+k8s.io/kms v0.30.11 h1:/aEco3RpkQVq5qvBM1HmxoOreE4C65COMRqwnaNG8ak=
+k8s.io/kms v0.30.11/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/kube-scheduler v0.30.5 h1:25D73QoAjUlz5lEvOktXSXpI7h2IUxp8GcTBeHUXiTo=
-k8s.io/kube-scheduler v0.30.5/go.mod h1:nh2XRIthymH2kULWYnbaWrUjiePmMjJboVjI19xYpy4=
-k8s.io/kubectl v0.30.5 h1:UHIdbLWHjL/+CaFc9ZESwpwdvpp9EDPLdgGolhMv8tQ=
-k8s.io/kubectl v0.30.5/go.mod h1:Pr+NYcX5Lx8eG04B/0t1rzjWcXsvvbda8swiZV9plQw=
-k8s.io/kubelet v0.30.5 h1:7jsJo1MkmGC3VEE0cf1tHiFKzi8DMBVNih3/OttSBaw=
-k8s.io/kubelet v0.30.5/go.mod h1:vzllyQKrbFpTq0WOHV6yX0gUs6KqTwCBAO13cAearMc=
-k8s.io/kubernetes v1.30.5 h1:uay5j9vFQnjuXRfooZ+wPzbDHPHLk4nRGKYr6tq+QvQ=
-k8s.io/kubernetes v1.30.5/go.mod h1:eWEwBuUIgunE32nhS0xM5hiN4BIp3ISrAIlGTyNN2JY=
-k8s.io/legacy-cloud-providers v0.30.5 h1:OK9wLKHccWnAn4oCtdfd7ZX06CgsDjnPaxM8qIOS6wM=
-k8s.io/legacy-cloud-providers v0.30.5/go.mod h1:AufXrjxt5om+ZgeJ3ZW+Pei0Js4SiKZfwZrPnpGY5F8=
-k8s.io/mount-utils v0.30.5 h1:kUdMKbpUCVRN4wdgusilsmhSIGQ8NHN4df7zPnGkDQU=
-k8s.io/mount-utils v0.30.5/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo=
+k8s.io/kube-scheduler v0.30.11 h1:S6bfaYc9+lRYpbYoSxPA0pP76FMZyqW4t8B58001RY0=
+k8s.io/kube-scheduler v0.30.11/go.mod h1:Y0tIIzBX5uGm36GcFBLgqxjbsP86BsOTgbNkmpY6EkY=
+k8s.io/kubectl v0.30.11 h1:pBk1AzDpndHl9oBfqJS9J2CGYNyik+x/QanXSERM7gE=
+k8s.io/kubectl v0.30.11/go.mod h1:a8AoybYsyppPEctupfJk4uaSy9uUWdvNfqRmSzbPPCQ=
+k8s.io/kubelet v0.30.11 h1:YIyQ1y+GN2XLbSKWPwnBELRbHKQBhu39gF/dDVeMDnw=
+k8s.io/kubelet v0.30.11/go.mod h1:tmoSrMaJHuRHFB/IyQtIAOpWefz1W68CiP6ao54QzfM=
+k8s.io/kubernetes v1.30.11 h1:CWMxX1vZT3GcyA2UfsKPTTuOrGrt9MQHqINJfNM1/uc=
+k8s.io/kubernetes v1.30.11/go.mod h1:DGWYRXHx5NhImLiR9FvIVBsOKxwKZOX6bPF/YP7TqHY=
+k8s.io/legacy-cloud-providers v0.30.11 h1:cplI9FxmA0lHYpgvJPAUsQ7BdQ2yQopvGzGXErPIz/g=
+k8s.io/legacy-cloud-providers v0.30.11/go.mod h1:BaStZ1/ZtkZkLj7J17L2ampYyHbNi52wmFV9uDGwFIk=
+k8s.io/mount-utils v0.30.11 h1:Pro6d0wgwgTIlkXx+WGL5YPhVY6SHzIabs7zjzS+Prc=
+k8s.io/mount-utils v0.30.11/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo=
 k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
 k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go
index e9c24f0a72a9493b50a722eed5bd78e7b4fb1666..e16b68a2a0a622a824e1f07227fecf6a2310b007 100644
--- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go
+++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go
@@ -32,9 +32,6 @@ import (
 // The namespace under which crio aliases are unique.
 const CrioNamespace = "crio"
 
-// The namespace suffix under which crio aliases are unique.
-const CrioNamespaceSuffix = ".scope"
-
 // The namespace systemd runs components under.
 const SystemdNamespace = "system-systemd"
 
@@ -117,21 +114,16 @@ func (f *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
 		// TODO(runcom): should we include crio-conmon cgroups?
 		return false, false, nil
 	}
-	if strings.HasPrefix(path.Base(name), SystemdNamespace) {
-		return true, false, nil
-	}
 	if !strings.HasPrefix(path.Base(name), CrioNamespace) {
 		return false, false, nil
 	}
+	if strings.HasPrefix(path.Base(name), SystemdNamespace) {
+		return true, false, nil
+	}
 	// if the container is not associated with CRI-O, we can't handle it or accept it.
 	if !isContainerName(name) {
 		return false, false, nil
 	}
-
-	if !strings.HasSuffix(path.Base(name), CrioNamespaceSuffix) {
-		// this mean it's a sandbox container
-		return true, false, nil
-	}
 	return true, true, nil
 }
 
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 0000000000000000000000000000000000000000..acc9a670b220955a378aa5c8a6980a13cddd1e5e
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,264 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+	// Compile-time check this implements the OpenTelemetry API.
+
+	_ metric.MeterProvider                  = MeterProvider{}
+	_ metric.Meter                          = Meter{}
+	_ metric.Observer                       = Observer{}
+	_ metric.Registration                   = Registration{}
+	_ metric.Int64Counter                   = Int64Counter{}
+	_ metric.Float64Counter                 = Float64Counter{}
+	_ metric.Int64UpDownCounter             = Int64UpDownCounter{}
+	_ metric.Float64UpDownCounter           = Float64UpDownCounter{}
+	_ metric.Int64Histogram                 = Int64Histogram{}
+	_ metric.Float64Histogram               = Float64Histogram{}
+	_ metric.Int64ObservableCounter         = Int64ObservableCounter{}
+	_ metric.Float64ObservableCounter       = Float64ObservableCounter{}
+	_ metric.Int64ObservableGauge           = Int64ObservableGauge{}
+	_ metric.Float64ObservableGauge         = Float64ObservableGauge{}
+	_ metric.Int64ObservableUpDownCounter   = Int64ObservableUpDownCounter{}
+	_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+	_ metric.Int64Observer                  = Int64Observer{}
+	_ metric.Float64Observer                = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+	return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+	return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+	return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+	return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+	return Int64Histogram{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+	return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+	return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+	return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+	return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+	return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+	return Float64Histogram{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+	return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+	return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+	return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+	return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+	metric.Int64Observable
+	embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+	metric.Float64Observable
+	embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go
index 81a7d6ddb3a0fc9cd343ab82b378d7254ca8bda0..cb7b4bd1cf96137cfdf0a235f79d85893f0cd2d5 100644
--- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go
+++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go
@@ -1155,7 +1155,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati
 	tokens[privilegedLoopbackToken] = &user.DefaultInfo{
 		Name:   user.APIServerUser,
 		UID:    uid,
-		Groups: []string{user.SystemPrivilegedGroup},
+		Groups: []string{user.AllAuthenticated, user.SystemPrivilegedGroup},
 	}
 
 	tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences)
diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go
index 9effcb768f296fbd1416813b4c40d8d0481abfb7..8672c09420d391b0d8b55947e614272393c000aa 100644
--- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go
+++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go
@@ -50,7 +50,7 @@ var (
 func handleError(w http.ResponseWriter, r *http.Request, err error) {
 	errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI)
 	http.Error(w, errorMsg, http.StatusInternalServerError)
-	klog.Errorf(err.Error())
+	klog.Error(err.Error())
 }
 
 // requestWatermark is used to track maximal numbers of requests in a particular phase of handling
diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go
index a1fc3168c5dd1ad88c0e9d33abf75431c705362c..10f9775efcc19526ba0414ba3c077d0c9af3e2d6 100644
--- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go
+++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go
@@ -399,6 +399,10 @@ func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR
 		StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker,
 	}
 
+	if ret.StorageObjectCountTracker == nil {
+		ret.StorageObjectCountTracker = storageConfig.StorageObjectCountTracker
+	}
+
 	if f.Options.EnableWatchCache {
 		sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes)
 		if err != nil {
diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/tracing.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/tracing.go
index d56e7df511d74eff5ed0e501237a69d93093af0b..7be62fad04cedd5fef7b277da04405048114689d 100644
--- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/tracing.go
+++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/tracing.go
@@ -23,7 +23,9 @@ import (
 	"net"
 
 	"github.com/spf13/pflag"
+	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+	"go.opentelemetry.io/otel/metric/noop"
 	"go.opentelemetry.io/otel/sdk/resource"
 	"go.opentelemetry.io/otel/semconv/v1.12.0"
 	"google.golang.org/grpc"
@@ -48,6 +50,12 @@ var (
 	codecs    = serializer.NewCodecFactory(cfgScheme)
 )
 
+func init() {
+	// Prevent memory leak from OTel metrics, which we don't use:
+	// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190
+	otel.SetMeterProvider(noop.NewMeterProvider())
+}
+
 func init() {
 	install.Install(cfgScheme)
 }
diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v4.go
index 69ca934a0d72b4bbd7348e66f426bafc26f50212..6146bdf12a1827a38a5a34802c3e28afa131080c 100644
--- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v4.go
+++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v4.go
@@ -115,5 +115,5 @@ func (d *errorDecoderV4) decode(message []byte) error {
 		return errors.New("error stream protocol error: unknown error")
 	}
 
-	return fmt.Errorf(status.Message)
+	return errors.New(status.Message)
 }
diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go
index b17d0a7a157becf5cfaeeb758d9ed09db15a1047..ee437300ffb3070a5931a2100ab5fc86468325a2 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go
@@ -720,6 +720,8 @@ type KubeletConfiguration struct {
 	EnableSystemLogHandler *bool `json:"enableSystemLogHandler,omitempty"`
 	// enableSystemLogQuery enables the node log query feature on the /logs endpoint.
 	// EnableSystemLogHandler has to be enabled in addition for this feature to work.
+	// Enabling this feature has security implications. The recommendation is to enable it on a need basis for debugging
+	// purposes and disabling otherwise.
 	// Default: false
 	// +featureGate=NodeLogQuery
 	// +optional
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
index d8d01b21e02b470c223a20cca7abc0b8ba59451b..90783f3fdf11add3dd3a7a81960d1fb34f38e8d3 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
@@ -38,6 +38,8 @@ import (
 	jsonpatch "github.com/evanphx/json-patch"
 	"github.com/spf13/cobra"
 	"github.com/spf13/pflag"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/metric/noop"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 	"k8s.io/klog/v2"
@@ -120,6 +122,9 @@ import (
 
 func init() {
 	utilruntime.Must(logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate))
+	// Prevent memory leak from OTel metrics, which we don't use:
+	// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190
+	otel.SetMeterProvider(noop.NewMeterProvider())
 }
 
 const (
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
index 8ad023f242ff6a74c3ba5278294b1b803f2859af..389c16319b7bf51d59fd341af9aa5e313f5cd8d7 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
@@ -99,6 +99,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae
 						allowedReplacementPods = make([]string, 0, len(nodeToDaemonPods))
 					}
 					allowedReplacementPods = append(allowedReplacementPods, oldPod.Name)
+					numUnavailable++
 				case numUnavailable >= maxUnavailable:
 					// no point considering any other candidates
 					continue
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
index a42852c5ba3e155f216cd2c52cc4e07b6cff60b1..45194393e9bdc904402119975a2ceb5a553f56b3 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
@@ -541,7 +541,8 @@ const (
 	// alpha: v1.27
 	// beta: v1.30
 	//
-	// Enables querying logs of node services using the /logs endpoint
+	// Enables querying logs of node services using the /logs endpoint. Enabling this feature has security implications.
+	// The recommendation is to enable it on a need basis for debugging purposes and disabling otherwise.
 	NodeLogQuery featuregate.Feature = "NodeLogQuery"
 
 	// owner: @xing-yang @sonasingh46
@@ -562,6 +563,12 @@ const (
 	// Permits kubelet to run with swap enabled.
 	NodeSwap featuregate.Feature = "NodeSwap"
 
+	// owner: @cici37
+	// kep: https://kep.k8s.io/5080
+	//
+	// Enables ordered namespace deletion.
+	OrderedNamespaceDeletion featuregate.Feature = "OrderedNamespaceDeletion"
+
 	// owner: @mortent, @atiratree, @ravig
 	// kep: http://kep.k8s.io/3018
 	// alpha: v1.26
@@ -1148,6 +1155,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
 
 	NodeSwap: {Default: true, PreRelease: featuregate.Beta},
 
+	OrderedNamespaceDeletion: {Default: false, PreRelease: featuregate.Beta},
+
 	PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.Beta},
 
 	PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.Beta},
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
index dd61ac23fd04d348b79595b59b8314ffbbed34f0..8102dfea2fc87283f75a7d5ce8cca667f4c81dae 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
@@ -408,6 +408,8 @@ type KubeletConfiguration struct {
 	EnableSystemLogHandler bool
 	// EnableSystemLogQuery enables the node log query feature on the /logs endpoint.
 	// EnableSystemLogHandler has to be enabled in addition for this feature to work.
+	// Enabling this feature has security implications. The recommendation is to enable it on a need basis for debugging
+	// purposes and disabling otherwise.
 	// +featureGate=NodeLogQuery
 	// +optional
 	EnableSystemLogQuery bool
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go
index c8baf64186087f9e45ec0fedb73e70fb7aab692c..f79f0cd57aea0a17be5dedbcc50d461daabf639d 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go
@@ -20,6 +20,7 @@ limitations under the License.
 package cm
 
 import (
+	"errors"
 	"fmt"
 	"strconv"
 	"strings"
@@ -117,7 +118,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
 		if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
 			message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
 			cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
-			return fmt.Errorf(message)
+			return errors.New(message)
 		}
 		cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
 	}
@@ -126,7 +127,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
 		if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
 			message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
 			cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
-			return fmt.Errorf(message)
+			return errors.New(message)
 		}
 		cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
 	}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal.go
index cd3a02649b2736fa1b4056af8c7a9bec296597f1..25806b892d6401b0a62158f31b6320aa485b14b1 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal.go
@@ -316,7 +316,7 @@ func (n *nodeLogQuery) splitNativeVsFileLoggers(ctx context.Context) ([]string,
 // copyServiceLogs invokes journalctl or Get-WinEvent with the provided args. Note that
 // services are explicitly passed here to account for the heuristics.
 func (n *nodeLogQuery) copyServiceLogs(ctx context.Context, w io.Writer, services []string, previousBoot int) {
-	cmdStr, args, err := getLoggingCmd(n, services)
+	cmdStr, args, cmdEnv, err := getLoggingCmd(n, services)
 	if err != nil {
 		fmt.Fprintf(w, "\nfailed to get logging cmd: %v\n", err)
 		return
@@ -324,6 +324,7 @@ func (n *nodeLogQuery) copyServiceLogs(ctx context.Context, w io.Writer, service
 	cmd := exec.CommandContext(ctx, cmdStr, args...)
 	cmd.Stdout = w
 	cmd.Stderr = w
+	cmd.Env = append(os.Environ(), cmdEnv...)
 
 	if err := cmd.Run(); err != nil {
 		if _, ok := err.(*exec.ExitError); ok {
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_linux.go
index 3e24f3c5fcb060efe51a2aa26b0ce60f6ee35a0c..156320e770b62f40e329bd62f3349ae4fc187e2c 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_linux.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_linux.go
@@ -26,9 +26,13 @@ import (
 )
 
 // getLoggingCmd returns the journalctl cmd and arguments for the given nodeLogQuery and boot. Note that
-// services are explicitly passed here to account for the heuristics
-func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) {
-	args := []string{
+// services are explicitly passed here to account for the heuristics.
+// The return values are:
+// - cmd: the command to be executed
+// - args: arguments to the command
+// - cmdEnv: environment variables when the command will be executed
+func getLoggingCmd(n *nodeLogQuery, services []string) (cmd string, args []string, cmdEnv []string, err error) {
+	args = []string{
 		"--utc",
 		"--no-pager",
 		"--output=short-precise",
@@ -55,7 +59,7 @@ func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error)
 		args = append(args, "--boot", fmt.Sprintf("%d", *n.Boot))
 	}
 
-	return "journalctl", args, nil
+	return "journalctl", args, nil, nil
 }
 
 // checkForNativeLogger checks journalctl output for a service
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_others.go
index 2f9e0ecb1a5dfc658800ed2ee9f1f7eab900c53f..9e4900710ea7ddc6a82b7683d58a6a6918557a9b 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_others.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_others.go
@@ -24,8 +24,8 @@ import (
 )
 
 // getLoggingCmd on unsupported operating systems returns the echo command and a warning message (as strings)
-func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) {
-	return "", []string{}, errors.New("Operating System Not Supported")
+func getLoggingCmd(n *nodeLogQuery, services []string) (cmd string, args []string, cmdEnv []string, err error) {
+	return "", args, cmdEnv, errors.New("Operating System Not Supported")
 }
 
 // checkForNativeLogger on unsupported operating systems returns false
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_windows.go
index a805cfc5453d8112a94230adc16678766bb06265..ffe2df1772da6a704c51d4465182ae304304fde5 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_windows.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_server_journal_windows.go
@@ -27,43 +27,107 @@ import (
 
 const powershellExe = "PowerShell.exe"
 
-// getLoggingCmd returns the powershell cmd and arguments for the given nodeLogQuery and boot
-func getLoggingCmd(n *nodeLogQuery, services []string) (string, []string, error) {
-	args := []string{
+// getLoggingCmd returns the powershell cmd, arguments, and environment variables for the given nodeLogQuery and boot.
+// All string inputs are environment variables to stop subcommands expressions from being executed.
+// The return values are:
+// - cmd: the command to be executed
+// - args: arguments to the command
+// - cmdEnv: environment variables when the command will be executed
+func getLoggingCmd(n *nodeLogQuery, services []string) (cmd string, args []string, cmdEnv []string, err error) {
+	cmdEnv = getLoggingCmdEnv(n, services)
+
+	var includeSinceTime, includeUntilTime, includeTailLines, includePattern bool
+	if n.SinceTime != nil {
+		includeSinceTime = true
+	}
+	if n.UntilTime != nil {
+		includeUntilTime = true
+	}
+	if n.TailLines != nil {
+		includeTailLines = true
+	}
+	if len(n.Pattern) > 0 {
+		includePattern = true
+	}
+
+	var includeServices []bool
+	for _, service := range services {
+		includeServices = append(includeServices, len(service) > 0)
+	}
+
+	args = getLoggingCmdArgs(includeSinceTime, includeUntilTime, includeTailLines, includePattern, includeServices)
+
+	return powershellExe, args, cmdEnv, nil
+}
+
+// getLoggingCmdArgs returns arguments that need to be passed to powershellExe
+func getLoggingCmdArgs(includeSinceTime, includeUntilTime, includeTailLines, includePattern bool, services []bool) (args []string) {
+	args = []string{
 		"-NonInteractive",
 		"-ExecutionPolicy", "Bypass",
 		"-Command",
 	}
 
-	psCmd := "Get-WinEvent -FilterHashtable @{LogName='Application'"
-	if n.SinceTime != nil {
-		psCmd += fmt.Sprintf("; StartTime='%s'", n.SinceTime.Format(dateLayout))
+	psCmd := `Get-WinEvent -FilterHashtable @{LogName='Application'`
+
+	if includeSinceTime {
+		psCmd += fmt.Sprintf(`; StartTime="$Env:kubelet_sinceTime"`)
 	}
-	if n.UntilTime != nil {
-		psCmd += fmt.Sprintf("; EndTime='%s'", n.UntilTime.Format(dateLayout))
+	if includeUntilTime {
+		psCmd += fmt.Sprintf(`; EndTime="$Env:kubelet_untilTime"`)
 	}
+
 	var providers []string
-	for _, service := range services {
-		if len(service) > 0 {
-			providers = append(providers, "'"+service+"'")
+	for i := range services {
+		if services[i] {
+			providers = append(providers, fmt.Sprintf("$Env:kubelet_provider%d", i))
 		}
 	}
+
 	if len(providers) > 0 {
 		psCmd += fmt.Sprintf("; ProviderName=%s", strings.Join(providers, ","))
 	}
-	psCmd += "}"
-	if n.TailLines != nil {
-		psCmd += fmt.Sprintf(" -MaxEvents %d", *n.TailLines)
+
+	psCmd += `}`
+	if includeTailLines {
+		psCmd += fmt.Sprint(` -MaxEvents $Env:kubelet_tailLines`)
 	}
-	psCmd += " | Sort-Object TimeCreated"
-	if len(n.Pattern) > 0 {
-		psCmd += fmt.Sprintf(" | Where-Object -Property Message -Match '%s'", n.Pattern)
+	psCmd += ` | Sort-Object TimeCreated`
+
+	if includePattern {
+		psCmd += fmt.Sprintf(` | Where-Object -Property Message -Match "$Env:kubelet_pattern"`)
 	}
-	psCmd += " | Format-Table -AutoSize -Wrap"
+	psCmd += ` | Format-Table -AutoSize -Wrap`
 
 	args = append(args, psCmd)
 
-	return powershellExe, args, nil
+	return args
+}
+
+// getLoggingCmdEnv returns the environment variables that will be present when powershellExe is executed
+func getLoggingCmdEnv(n *nodeLogQuery, services []string) (cmdEnv []string) {
+	if n.SinceTime != nil {
+		cmdEnv = append(cmdEnv, fmt.Sprintf("kubelet_sinceTime=%s", n.SinceTime.Format(dateLayout)))
+	}
+	if n.UntilTime != nil {
+		cmdEnv = append(cmdEnv, fmt.Sprintf("kubelet_untilTime=%s", n.UntilTime.Format(dateLayout)))
+	}
+
+	for i, service := range services {
+		if len(service) > 0 {
+			cmdEnv = append(cmdEnv, fmt.Sprintf("kubelet_provider%d=%s", i, service))
+		}
+	}
+
+	if n.TailLines != nil {
+		cmdEnv = append(cmdEnv, fmt.Sprintf("kubelet_tailLines=%d", *n.TailLines))
+	}
+
+	if len(n.Pattern) > 0 {
+		cmdEnv = append(cmdEnv, fmt.Sprintf("kubelet_pattern=%s", n.Pattern))
+	}
+
+	return cmdEnv
 }
 
 // checkForNativeLogger always returns true for Windows
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
index fea716b6ac6dc3e3f2499f3faed8e44737bd9966..ddb10a1aac17f7411e2b99faf13a0efa302a0bc7 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
@@ -263,7 +263,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
 		m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
 		return s.Message(), ErrPreStartHook
 	}
-	m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name))
+	m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
 
 	// Step 3: start the container.
 	err = m.runtimeService.StartContainer(ctx, containerID)
@@ -272,7 +272,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
 		m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
 		return s.Message(), kubecontainer.ErrRunContainer
 	}
-	m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, fmt.Sprintf("Started container %s", container.Name))
+	m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
 
 	// Symlink container logs to the legacy container log location for cluster logging
 	// support.
@@ -751,7 +751,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
 	if len(message) == 0 {
 		message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
 	}
-	m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, message)
+	m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
 
 	if gracePeriodOverride != nil {
 		gracePeriod = *gracePeriodOverride
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go
index 46d1c7d7653fb8f4fd27cb6d55e5ef5256b45fec..c0456e42fc01838ee617e76cb7180262f6a45cbc 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go
@@ -21,6 +21,7 @@ keep track of registered plugins.
 package cache
 
 import (
+	"errors"
 	"fmt"
 	"sync"
 	"time"
@@ -100,7 +101,7 @@ func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, d
 // that can be used in logs.
 // The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ",
 func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
-	return fmt.Errorf(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
+	return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
 }
 
 // GenerateError returns simple and detailed errors for plugins to register
@@ -108,7 +109,7 @@ func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (de
 // The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ".
 func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
 	simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err))
-	return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
+	return errors.New(simpleMsg), errors.New(detailedMsg)
 }
 
 // Generates an error string with the format ": <err>" if err exists
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go
index a630d8076b56726a5288cb3b046455ec83441fb5..9755af60741ba943f99fc2c62322284289773c29 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go
@@ -437,17 +437,6 @@ func (s *Server) InstallDefaultHandlers() {
 	s.restfulCont.Handle(proberMetricsPath,
 		compbasemetrics.HandlerFor(p, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
 	)
-
-	// Only enable checkpoint API if the feature is enabled
-	if utilfeature.DefaultFeatureGate.Enabled(features.ContainerCheckpoint) {
-		s.addMetricsBucketMatcher("checkpoint")
-		ws = &restful.WebService{}
-		ws.Path(checkpointPath).Produces(restful.MIME_JSON)
-		ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
-			To(s.checkpoint).
-			Operation("checkpoint"))
-		s.restfulCont.Add(ws)
-	}
 }
 
 // InstallDebuggingHandlers registers the HTTP request patterns that serve logs or run commands/containers
@@ -542,6 +531,17 @@ func (s *Server) InstallDebuggingHandlers() {
 		To(s.getRunningPods).
 		Operation("getRunningPods"))
 	s.restfulCont.Add(ws)
+
+	// Only enable checkpoint API if the feature is enabled
+	if utilfeature.DefaultFeatureGate.Enabled(features.ContainerCheckpoint) {
+		s.addMetricsBucketMatcher("checkpoint")
+		ws = &restful.WebService{}
+		ws.Path(checkpointPath).Produces(restful.MIME_JSON)
+		ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
+			To(s.checkpoint).
+			Operation("checkpoint"))
+		s.restfulCont.Add(ws)
+	}
 }
 
 // InstallDebuggingDisabledHandlers registers the HTTP request patterns that provide better error message
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
index 2741e459f32cd120f434e7d1eb15e4d742bf5f3c..96ceeb82c5eea79d301e4a71e2dfe9cca80668aa 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
@@ -168,6 +168,11 @@ type ActualStateOfWorld interface {
 	// or have a mount/unmount operation pending.
 	GetAttachedVolumes() []AttachedVolume
 
+	// GetAttachedVolume returns the volume that is known to be attached to the node
+	// with the given volume name. If the volume is not found, the second return value
+	// is false.
+	GetAttachedVolume(volumeName v1.UniqueVolumeName) (AttachedVolume, bool)
+
 	// SyncReconstructedVolume check the volume.outerVolumeSpecName in asw and
 	// the one populated from dsw, if they do not match, update this field from the value from dsw.
 	SyncReconstructedVolume(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, outerVolumeSpecName string)
@@ -1104,6 +1109,18 @@ func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
 	return allAttachedVolumes
 }
 
+func (asw *actualStateOfWorld) GetAttachedVolume(volumeName v1.UniqueVolumeName) (AttachedVolume, bool) {
+	asw.RLock()
+	defer asw.RUnlock()
+
+	volumeObj, ok := asw.attachedVolumes[volumeName]
+	if !ok {
+		return AttachedVolume{}, false
+	}
+
+	return asw.newAttachedVolume(&volumeObj), true
+}
+
 func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
 	asw.RLock()
 	defer asw.RUnlock()
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go
index 4129247658b3d67f076d03a5458da2c4b3f1e634..cd818a32a66cfc017e8089df07e5e9d839cc07a9 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_common.go
@@ -270,6 +270,11 @@ func (rc *reconciler) unmountDetachDevices() {
 		// Check IsOperationPending to avoid marking a volume as detached if it's in the process of mounting.
 		if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName, attachedVolume.SELinuxMountContext) &&
 			!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName, nestedpendingoperations.EmptyNodeName) {
+
+			// Re-read the actual state of the world, maybe the volume got mounted in the meantime.
+			// This is safe, because there is no pending operation (checked above) and no new operation
+			// could start in the meantime. The only goroutine that adds new operations is this reconciler.
+			attachedVolume, _ = rc.actualStateOfWorld.GetAttachedVolume(attachedVolume.VolumeName)
 			if attachedVolume.DeviceMayBeMounted() {
 				// Volume is globally mounted to device, unmount it
 				klog.V(5).InfoS(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go
index bc33aaafb1cca79705a00bfe23745b74c1e4d2d4..2a4866c4df1e7ad8588a3643374c9664044384be 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go
@@ -19,6 +19,7 @@ package scheduler
 import (
 	"bytes"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"net/http"
 	"strings"
@@ -287,7 +288,7 @@ func (h *HTTPExtender) Filter(
 		return nil, nil, nil, err
 	}
 	if result.Error != "" {
-		return nil, nil, nil, fmt.Errorf(result.Error)
+		return nil, nil, nil, errors.New(result.Error)
 	}
 
 	if h.nodeCacheCapable && result.NodeNames != nil {
@@ -373,7 +374,7 @@ func (h *HTTPExtender) Bind(binding *v1.Binding) error {
 		return err
 	}
 	if result.Error != "" {
-		return fmt.Errorf(result.Error)
+		return errors.New(result.Error)
 	}
 	return nil
 }
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go
index 4ead255aca6ebb1b4b09fe4ce7e03f95c7f3c212..8594a447480cbaec854d3a591fd6ce49a6d18b51 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go
@@ -191,6 +191,8 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
 	}
 	var victims []*v1.Pod
 	numViolatingVictim := 0
+	// Sort potentialVictims by pod priority from high to low, which ensures to
+	// reprieve higher priority pods first.
 	sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i].Pod, potentialVictims[j].Pod) })
 	// Try to reprieve as many pods as possible. We first try to reprieve the PDB
 	// violating victims and then other non-violating ones. In both cases, we start
@@ -225,6 +227,11 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
 			return nil, 0, framework.AsStatus(err)
 		}
 	}
+
+	// Sort victims after reprieving pods to keep the pods in the victims sorted in order of priority from high to low.
+	if len(violatingVictims) != 0 && len(nonViolatingVictims) != 0 {
+		sort.Slice(victims, func(i, j int) bool { return util.MoreImportantPod(victims[i], victims[j]) })
+	}
 	return victims, numViolatingVictim, framework.NewStatus(framework.Success)
 }
 
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go
index f6ce916c6bfe1d17f311a0bcd90fdaed7724a1e8..5ab477e1e0f032e6f6e651a3c912e6eac89f0665 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go
@@ -45,7 +45,6 @@ import (
 	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 	"k8s.io/kubernetes/pkg/features"
 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
-	"k8s.io/kubernetes/pkg/volume/util"
 )
 
 // ConflictReason is used for the special strings which explain why
@@ -127,8 +126,6 @@ type InTreeToCSITranslator interface {
 //  1. The scheduler takes a Pod off the scheduler queue and processes it serially:
 //     a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked
 //     here, pod volume information will be saved in current scheduling cycle state for later use.
-//     If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce
-//     down the list of eligible nodes based on the bound PV's NodeAffinity (if any).
 //     b. Invokes all filter plugins, parallelized across nodes.  FindPodVolumes() is invoked here.
 //     c. Invokes all score plugins.  Future/TBD
 //     d. Selects the best node for the Pod.
@@ -151,14 +148,6 @@ type SchedulerVolumeBinder interface {
 	// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
 	GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
 
-	// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
-	// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
-	// in subsequent scheduling stages.
-	//
-	// If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made
-	// and all nodes should be considered.
-	GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string])
-
 	// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
 	// node and returns pod's volumes information.
 	//
@@ -381,55 +370,6 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
 	return
 }
 
-// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
-// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
-// in subsequent scheduling stages.
-//
-// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes
-// should be considered.
-func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
-	if len(boundClaims) == 0 {
-		return
-	}
-
-	var errs []error
-	for _, pvc := range boundClaims {
-		pvName := pvc.Spec.VolumeName
-		pv, err := b.pvCache.GetPV(pvName)
-		if err != nil {
-			errs = append(errs, err)
-			continue
-		}
-
-		// if the PersistentVolume is local and has node affinity matching specific node(s),
-		// add them to the eligible nodes
-		nodeNames := util.GetLocalPersistentVolumeNodeNames(pv)
-		if len(nodeNames) != 0 {
-			// on the first found list of eligible nodes for the local PersistentVolume,
-			// insert to the eligible node set.
-			if eligibleNodes == nil {
-				eligibleNodes = sets.New(nodeNames...)
-			} else {
-				// for subsequent finding of eligible nodes for the local PersistentVolume,
-				// take the intersection of the nodes with the existing eligible nodes
-				// for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2,
-				// then the eligible node list should be empty.
-				eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...))
-			}
-		}
-	}
-
-	if len(errs) > 0 {
-		logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs)
-		return nil
-	}
-
-	if eligibleNodes != nil {
-		logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes)
-	}
-	return
-}
-
 // AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
 // volume information for the chosen node, and:
 // 1. Update the pvCache with the new prebound PV.
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go
index 667669c65b44cdb36bfe95f4de4957024dc39bc1..f563c3c756372a16a0cdf3a399a3cd2d8c54f9f1 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go
@@ -20,7 +20,6 @@ import (
 	"context"
 
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
 	"k8s.io/klog/v2"
 )
 
@@ -55,11 +54,6 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVo
 	return &PodVolumeClaims{}, nil
 }
 
-// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
-func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
-	return nil
-}
-
 // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
 func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
 	return nil, b.config.FindReasons, b.config.FindErr
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go
index cb56ed9260782e19418b22c472fa4c702c08acad..2bb0fb56b6d7d04367f7a0a9f79a2a28712bbe24 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go
@@ -194,14 +194,6 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
 		status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
 		return nil, status
 	}
-	// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
-	var result *framework.PreFilterResult
-	if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil {
-		result = &framework.PreFilterResult{
-			NodeNames: eligibleNodes,
-		}
-	}
-
 	state.Write(stateKey, &stateData{
 		podVolumesByNode: make(map[string]*PodVolumes),
 		podVolumeClaims: &PodVolumeClaims{
@@ -210,7 +202,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
 			unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
 		},
 	})
-	return result, nil
+	return nil, nil
 }
 
 // PreFilterExtensions returns prefilter extensions, pod add and remove.
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
index 97eaa6936e6fb5581b634c350c33c373bc941807..9973644811583b1c3e0d5cd0023c2ae0f2e21f16 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
@@ -18,6 +18,7 @@ package cache
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"sync"
 	"time"
@@ -272,7 +273,7 @@ func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapsho
 		// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
 		// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
 		cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true)
-		return fmt.Errorf(errMsg)
+		return errors.New(errMsg)
 	}
 
 	return nil
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
index b17646320fc99976ffdab6bd5547503a8c7fa938..08dcbf6d505b9eca51b5be2edd8275f355be783c 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
@@ -269,7 +269,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
 }
 
 func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
-	klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
+	klog.V(4).Info(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
 
 	if deviceMountPath == "" {
 		return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty"))
@@ -363,7 +363,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
 		// finished, we should remove the directory.
 		if err != nil && volumetypes.IsOperationFinishedError(err) {
 			// clean up metadata
-			klog.Errorf(log("attacher.MountDevice failed: %v", err))
+			klog.Error(log("attacher.MountDevice failed: %v", err))
 			if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
 				klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
 			}
@@ -377,7 +377,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
 	}
 
 	if !stageUnstageSet {
-		klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
+		klog.Info(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
 		// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
 		return nil
 	}
@@ -415,7 +415,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
 		return err
 	}
 
-	klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
+	klog.V(4).Info(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
 	return err
 }
 
@@ -604,7 +604,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
 			return nil
 		}
 
-		klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
+		klog.Error(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
 		return err
 	}
 
@@ -627,7 +627,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
 		return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
 	}
 	if !stageUnstageSet {
-		klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
+		klog.Info(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
 		// Just	delete the global directory + json file
 		if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
 			return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
@@ -650,7 +650,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
 		return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
 	}
 
-	klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
+	klog.V(4).Info(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
 	return nil
 }
 
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
index fa2570b42c1fb390f10d6bd194bb9acf2593d0c1..80c6b088ddc7a9fddec384793bb409127e25291a 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
@@ -105,7 +105,7 @@ var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{}
 // Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev
 func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
 	dir := getVolumeDevicePluginDir(m.specName, m.plugin.host)
-	klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
+	klog.V(4).Info(log("blockMapper.GetGlobalMapPath = %s", dir))
 	return dir, nil
 }
 
@@ -137,7 +137,7 @@ func (m *csiBlockMapper) getPublishPath() string {
 // returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName}
 func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
 	path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName))
-	klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
+	klog.V(4).Info(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
 	return path, m.specName
 }
 
@@ -149,10 +149,10 @@ func (m *csiBlockMapper) stageVolumeForBlock(
 	csiSource *v1.CSIPersistentVolumeSource,
 	attachment *storage.VolumeAttachment,
 ) (string, error) {
-	klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called"))
+	klog.V(4).Info(log("blockMapper.stageVolumeForBlock called"))
 
 	stagingPath := m.GetStagingPath()
-	klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
+	klog.V(4).Info(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
 
 	// Check whether "STAGE_UNSTAGE_VOLUME" is set
 	stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
@@ -160,7 +160,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
 		return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
 	}
 	if !stageUnstageSet {
-		klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
+		klog.Info(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
 		return "", nil
 	}
 	publishVolumeInfo := map[string]string{}
@@ -200,7 +200,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
 		return "", err
 	}
 
-	klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
+	klog.V(4).Info(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
 	return stagingPath, nil
 }
 
@@ -212,7 +212,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
 	csiSource *v1.CSIPersistentVolumeSource,
 	attachment *storage.VolumeAttachment,
 ) (string, error) {
-	klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called"))
+	klog.V(4).Info(log("blockMapper.publishVolumeForBlock called"))
 
 	publishVolumeInfo := map[string]string{}
 	if attachment != nil {
@@ -279,7 +279,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
 
 // SetUpDevice ensures the device is attached returns path where the device is located.
 func (m *csiBlockMapper) SetUpDevice() (string, error) {
-	klog.V(4).Infof(log("blockMapper.SetUpDevice called"))
+	klog.V(4).Info(log("blockMapper.SetUpDevice called"))
 
 	// Get csiSource from spec
 	if m.spec == nil {
@@ -341,7 +341,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
 }
 
 func (m *csiBlockMapper) MapPodDevice() (string, error) {
-	klog.V(4).Infof(log("blockMapper.MapPodDevice called"))
+	klog.V(4).Info(log("blockMapper.MapPodDevice called"))
 
 	// Get csiSource from spec
 	if m.spec == nil {
@@ -408,7 +408,7 @@ func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiCli
 	if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
 		return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
 	}
-	klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
+	klog.V(4).Info(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
 
 	return nil
 }
@@ -421,7 +421,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
 		return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
 	}
 	if !stageUnstageSet {
-		klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
+		klog.Info(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
 		return nil
 	}
 
@@ -431,7 +431,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
 	if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
 		return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err))
 	}
-	klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
+	klog.V(4).Info(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
 
 	// Remove stagingPath directory and its contents
 	if err := os.RemoveAll(stagingPath); err != nil {
@@ -457,7 +457,7 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error
 	stagingPath := m.GetStagingPath()
 	if _, err := os.Stat(stagingPath); err != nil {
 		if os.IsNotExist(err) {
-			klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
+			klog.V(4).Info(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
 		} else {
 			return err
 		}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
index 21cdf29807bafe5e5e320e27ddf12b04c2f1b928..b31a777c85a495b4adbaa7c792539c24eff125cb 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
@@ -101,7 +101,7 @@ func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error {
 }
 
 func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
-	klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
+	klog.V(4).Info(log("Mounter.SetUpAt(%s)", dir))
 
 	csi, err := c.csiClientGetter.Get()
 	if err != nil {
@@ -346,7 +346,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
 		klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID))
 	}
 
-	klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
+	klog.V(4).Info(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
 	return nil
 }
 
@@ -358,7 +358,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
 	csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
 	if err != nil {
 		if apierrors.IsNotFound(err) {
-			klog.V(5).Infof(log("CSIDriver %q not found, not adding service account token information", c.driverName))
+			klog.V(5).Info(log("CSIDriver %q not found, not adding service account token information", c.driverName))
 			return nil, nil
 		}
 		return nil, err
@@ -394,7 +394,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
 		outputs[audience] = tr.Status
 	}
 
-	klog.V(4).Infof(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
+	klog.V(4).Info(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
 	tokens, _ := json.Marshal(outputs)
 	return map[string]string{
 		"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
@@ -416,7 +416,7 @@ func (c *csiMountMgr) TearDown() error {
 	return c.TearDownAt(c.GetPath())
 }
 func (c *csiMountMgr) TearDownAt(dir string) error {
-	klog.V(4).Infof(log("Unmounter.TearDownAt(%s)", dir))
+	klog.V(4).Info(log("Unmounter.TearDownAt(%s)", dir))
 
 	volID := c.volumeID
 	csi, err := c.csiClientGetter.Get()
@@ -447,7 +447,7 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
 	if err := removeMountDir(c.plugin, dir); err != nil {
 		return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
 	}
-	klog.V(4).Infof(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
+	klog.V(4).Info(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
 
 	return nil
 }
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
index 28b2c50e631b0365cc5e2a1353a50c49ab9391ff..c8a317980dd5a1ecb026f40064d8fc6d53f93f74 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
@@ -97,7 +97,7 @@ var PluginHandler = &RegistrationHandler{}
 // ValidatePlugin is called by kubelet's plugin watcher upon detection
 // of a new registration socket opened by CSI Driver registrar side car.
 func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
-	klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
+	klog.Info(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
 		pluginName, endpoint, strings.Join(versions, ",")))
 
 	_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
@@ -110,7 +110,7 @@ func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string,
 
 // RegisterPlugin is called when a plugin can be registered
 func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
-	klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
+	klog.Info(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
 
 	highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
 	if err != nil {
@@ -436,7 +436,7 @@ func (p *csiPlugin) NewMounter(
 }
 
 func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
-	klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
+	klog.V(4).Info(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
 
 	kvh, ok := p.host.(volume.KubeletVolumeHost)
 	if !ok {
@@ -705,7 +705,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
 }
 
 func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
-	klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
+	klog.V(4).Info(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
 	unmapper := &csiBlockMapper{
 		plugin:   p,
 		podUID:   podUID,
@@ -847,7 +847,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
 	csiDriver, err := p.getCSIDriver(driverName)
 	if err != nil {
 		if apierrors.IsNotFound(err) {
-			klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", driverName))
+			klog.V(4).Info(log("CSIDriver %q not found, not adding pod information", driverName))
 			return false, nil
 		}
 		return false, err
@@ -855,7 +855,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
 
 	// if PodInfoOnMount is not set or false we do not set pod attributes
 	if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false {
-		klog.V(4).Infof(log("CSIDriver %q does not require pod information", driverName))
+		klog.V(4).Info(log("CSIDriver %q does not require pod information", driverName))
 		return false, nil
 	}
 	return true, nil
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/expander.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/expander.go
index d6aae06010131ddeaf4fbf11a14c7352272eb538..8efdae665d4203077142098d52823b08b57e93e8 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/expander.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/expander.go
@@ -36,7 +36,7 @@ func (c *csiPlugin) RequiresFSResize() bool {
 }
 
 func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
-	klog.V(4).Infof(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
+	klog.V(4).Info(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
 	csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec)
 	if err != nil {
 		return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err))
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
index ec0e74c3b82efeba9a08d4945697a68804c3b0b1..732d0c9cf70735b9c0660cbbf2312b9174e4272c 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
@@ -266,7 +266,7 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
 		return nil, errors.New(status.Status)
 	} else if status.Status != StatusSuccess {
 		errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
-		klog.Errorf(errMsg)
+		klog.Error(errMsg)
 		return nil, fmt.Errorf("%s", errMsg)
 	}
 
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
index 32b40ecf25713229fbdfcd243a7c54f418c1a9b4..1d201bcf4a1c0f1aa96b90d1f644529677b0a768 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
@@ -366,7 +366,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
 	kubeClient := host.GetKubeClient()
 	if kubeClient == nil {
 		err := fmt.Errorf("failed to get kubeclient when creating portworx client")
-		klog.Errorf(err.Error())
+		klog.Error(err.Error())
 		return nil, err
 	}
 
@@ -379,7 +379,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
 
 	if svc == nil {
 		err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName)
-		klog.Errorf(err.Error())
+		klog.Error(err.Error())
 		return nil, err
 	}
 
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go
index f1d2c9c59ffdf68473b7afe73d4237430348e0d2..3b3b55ba468fb3b4c0bc9d4d70d0bfb61ffaa99f 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go
@@ -17,6 +17,7 @@ limitations under the License.
 package secret
 
 import (
+	"errors"
 	"fmt"
 
 	"k8s.io/klog/v2"
@@ -24,7 +25,7 @@ import (
 	utilstrings "k8s.io/utils/strings"
 
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/kubernetes/pkg/volume"
@@ -190,7 +191,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs
 	optional := b.source.Optional != nil && *b.source.Optional
 	secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
 	if err != nil {
-		if !(errors.IsNotFound(err) && optional) {
+		if !(apierrors.IsNotFound(err) && optional) {
 			klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err)
 			return err
 		}
@@ -282,8 +283,8 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32,
 					continue
 				}
 				errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key)
-				klog.Errorf(errMsg)
-				return nil, fmt.Errorf(errMsg)
+				klog.Error(errMsg)
+				return nil, errors.New(errMsg)
 			}
 
 			fileProjection.Data = []byte(content)
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
index 3ac4dcffe9a90988238b6261377186281db6c82b..a6837fa65e9ba2c33cae4b96c35086cc70dfd373 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
@@ -444,7 +444,8 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
 		if fileProjection.FsUser == nil {
 			continue
 		}
-		if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
+
+		if err := w.chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
 			klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
 			return err
 		}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..c12a0f4cd317e268fff538e79e9cb5b330f36546
--- /dev/null
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_linux.go
@@ -0,0 +1,27 @@
+//go:build linux
+// +build linux
+
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "os"
+
+// chown changes the numeric uid and gid of the named file.
+func (w *AtomicWriter) chown(name string, uid, gid int) error {
+	return os.Chown(name, uid, gid)
+}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..cdfb83e639725c1ab426a06bf50a2e1ebb45bb39
--- /dev/null
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_unsupported.go
@@ -0,0 +1,33 @@
+//go:build !linux
+// +build !linux
+
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"runtime"
+
+	"k8s.io/klog/v2"
+)
+
+// chown changes the numeric uid and gid of the named file.
+// This is a no-op on unsupported platforms.
+func (w *AtomicWriter) chown(name string, uid, _ /* gid */ int) error {
+	klog.Warningf("%s: skipping change of Linux owner %v for file %s; unsupported on %s", w.logContext, uid, name, runtime.GOOS)
+	return nil
+}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
index 66ac77835c3e780b6c9de8e7fed5067ddfdf665f..18cbec072f420d0b2328b5f0eda3aec11f66702d 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
@@ -31,8 +31,13 @@ import (
 	"k8s.io/klog/v2"
 )
 
-// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
+// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent. If called with a device
+// already resolved to devicemapper, do nothing.
 func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
+	if strings.HasPrefix(device, "/dev/dm-") {
+		return device
+	}
+
 	io := handler.getIo
 	disk, err := findDeviceForPath(device, io)
 	if err != nil {
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/node_expander.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/node_expander.go
index cf9c57504e8572d6e57e4d9a824008539e9f1134..6730e689ac8895aa607ba87f158d4bbf495f665f 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/node_expander.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/node_expander.go
@@ -116,7 +116,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
 
 		if err != nil {
 			msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err)
-			klog.Errorf(msg.Error())
+			klog.Error(msg.Error())
 			return false, err, testResponseData{}
 		}
 	}
@@ -126,7 +126,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
 			var markFailedError error
 			ne.pvc, markFailedError = util.MarkNodeExpansionFailed(ne.pvc, ne.kubeClient)
 			if markFailedError != nil {
-				klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
+				klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
 			}
 		}
 
@@ -135,7 +135,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
 		// expansion operation should not block mounting
 		if volumetypes.IsFailedPreconditionError(resizeErr) {
 			ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName)
-			klog.Errorf(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
+			klog.Error(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
 			return false, nil, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
 		}
 		return false, resizeErr, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
index 9d5aa77947ce923f6462f16f42cbe09d6e07309a..d46d411606a9fcfbc0fb321cbb022e82063b7ea2 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
@@ -358,13 +358,13 @@ func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
 
 // GenerateErrorDetailed returns detailed errors for volumes to attach
 func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
-	return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
+	return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
 }
 
 // GenerateError returns simple and detailed errors for volumes to attach
 func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
 	simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
-	return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
+	return errors.New(simpleMsg), errors.New(detailedMsg)
 }
 
 // String combines key fields of the volume for logging in text format.
@@ -523,13 +523,13 @@ func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
 
 // GenerateErrorDetailed returns detailed errors for volumes to mount
 func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
-	return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
+	return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
 }
 
 // GenerateError returns simple and detailed errors for volumes to mount
 func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
 	simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
-	return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
+	return errors.New(simpleMsg), errors.New(detailedMsg)
 }
 
 // AttachedVolume represents a volume that is attached to a node.
@@ -585,13 +585,13 @@ func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
 
 // GenerateErrorDetailed returns detailed errors for attached volumes
 func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
-	return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
+	return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
 }
 
 // GenerateError returns simple and detailed errors for attached volumes
 func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
 	simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
-	return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
+	return errors.New(simpleMsg), errors.New(detailedMsg)
 }
 
 // String combines key fields of the volume for logging in text format.
@@ -757,13 +757,13 @@ func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
 
 // GenerateErrorDetailed returns simple and detailed errors for mounted volumes
 func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
-	return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
+	return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
 }
 
 // GenerateError returns simple and detailed errors for mounted volumes
 func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
 	simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
-	return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
+	return errors.New(simpleMsg), errors.New(detailedMsg)
 }
 
 type operationExecutor struct {
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
index 10e9a83c2e79e3d9960b152e62ebbaa042a2a50e..69a78394a774dc1c9ce81a73496deab3f77a8955 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
@@ -202,7 +202,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
 		volumePlugin, err :=
 			og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
 		if err != nil || volumePlugin == nil {
-			klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
+			klog.Error(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
 			continue
 		}
 		volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
@@ -397,7 +397,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
 		for _, pod := range volumeToAttach.ScheduledPods {
 			og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
 		}
-		klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
+		klog.Info(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
 
 		// Update actual state of world
 		addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
@@ -517,7 +517,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
 			return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
 		}
 
-		klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
+		klog.Info(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
 
 		// Update actual state of world
 		actualStateOfWorld.MarkVolumeAsDetached(
@@ -731,7 +731,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
 			// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
 			// volume tear down when pod is deleted, and also makes sure pod will not start using it.
 			if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil {
-				klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
+				klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
 			}
 			return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
 		}
@@ -789,7 +789,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
 		// Only devices which were uncertain can be marked as unmounted
 		markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName)
 		if markDeviceUnmountError != nil {
-			klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
+			klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
 		}
 		return
 	}
@@ -800,7 +800,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
 		// which was previously marked as mounted here as uncertain.
 		markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
 		if markDeviceUncertainError != nil {
-			klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
+			klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
 		}
 	}
 
@@ -818,7 +818,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
 
 		t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
 		if t != nil {
-			klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
+			klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
 		}
 		return
 
@@ -828,7 +828,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
 		actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted {
 		t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts)
 		if t != nil {
-			klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
+			klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
 		}
 	}
 }
@@ -876,7 +876,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
 			markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts)
 			if markMountUncertainErr != nil {
 				// There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly.
-				klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
+				klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
 			}
 
 			// On failure, return error. Caller will log and retry.
@@ -899,7 +899,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
 			volumeToUnmount.PodName, volumeToUnmount.VolumeName)
 		if markVolMountedErr != nil {
 			// On failure, just log and exit
-			klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
+			klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
 		}
 
 		return volumetypes.NewOperationContext(nil, nil, migrated)
@@ -950,7 +950,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
 			// If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed,
 			// using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted
 			deviceMountPath = deviceToDetach.DeviceMountPath
-			klog.Warningf(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
+			klog.Warning(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
 				"GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), ""))
 		}
 		refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
@@ -969,7 +969,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
 			markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
 			if markDeviceUncertainErr != nil {
 				// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
-				klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
+				klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
 			}
 
 			// On failure, return error. Caller will log and retry.
@@ -990,7 +990,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
 			markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
 			if markDeviceUncertainErr != nil {
 				// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
-				klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
+				klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
 			}
 			eventErr, detailedErr := deviceToDetach.GenerateError(
 				"UnmountDevice failed",
@@ -1236,7 +1236,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
 			// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
 			// volume tear down when pod is deleted, and also makes sure pod will not start using it.
 			if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil {
-				klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
+				klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
 			}
 			return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
 		}
@@ -1355,7 +1355,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
 			volumeToUnmount.PodName, volumeToUnmount.VolumeName)
 		if markVolUnmountedErr != nil {
 			// On failure, just log and exit
-			klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
+			klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
 		}
 
 		return volumetypes.NewOperationContext(nil, nil, migrated)
@@ -1469,7 +1469,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
 			return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
 		}
 
-		klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
+		klog.Info(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
 
 		// Update actual state of world
 		markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
@@ -1604,7 +1604,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
 	node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{})
 	if fetchErr != nil {
 		if errors.IsNotFound(fetchErr) {
-			klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
+			klog.Warning(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
 			return nil
 		}
 
@@ -1621,7 +1621,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
 	}
 
 	// Volume is not marked as in use by node
-	klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
+	klog.Info(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
 	return nil
 }
 
@@ -2045,7 +2045,7 @@ func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
 	resizeDone, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions)
 	if err != nil {
 		e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
-		klog.Errorf(e2.Error())
+		klog.Error(e2.Error())
 		return false, e1, e2
 	}
 	if resizeDone {
@@ -2076,7 +2076,7 @@ func (og *operationGenerator) expandVolumeDuringMount(volumeToMount VolumeToMoun
 		if pvcStatusCap.Cmp(pvSpecCap) < 0 {
 			if volumeToMount.VolumeSpec.ReadOnly {
 				simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
-				klog.Warningf(detailedMsg)
+				klog.Warning(detailedMsg)
 				og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
 				og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
 				return true, nil
@@ -2141,7 +2141,7 @@ func (og *operationGenerator) nodeExpandVolume(
 
 			if volumeToMount.VolumeSpec.ReadOnly {
 				simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
-				klog.Warningf(detailedMsg)
+				klog.Warning(detailedMsg)
 				og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
 				og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
 				return true, nil
@@ -2181,7 +2181,7 @@ func (og *operationGenerator) checkForRecoveryFromExpansion(pvc *v1.PersistentVo
 	// and hence we are going to keep expanding using older logic.
 	if resizeStatus == "" && allocatedResource == nil {
 		_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller")
-		klog.Warningf(detailedMsg)
+		klog.Warning(detailedMsg)
 		return false
 	}
 	return true
@@ -2223,7 +2223,7 @@ func (og *operationGenerator) legacyCallNodeExpandOnPlugin(resizeOp nodeResizeOp
 		// expansion operation should not block mounting
 		if volumetypes.IsFailedPreconditionError(resizeErr) {
 			actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName)
-			klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
+			klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
 			return true, nil
 		}
 		return false, resizeErr
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go
index b7197dbdfe4e476f7a6158e508a50de0acd9138f..e438ba21e46def5908cb82e2f4776e3aa5682350 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go
@@ -18,11 +18,12 @@ package recyclerclient
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"sync"
 
 	"k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/fields"
 	"k8s.io/apimachinery/pkg/watch"
@@ -72,7 +73,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
 	// Start the pod
 	_, err = recyclerClient.CreatePod(pod)
 	if err != nil {
-		if errors.IsAlreadyExists(err) {
+		if apierrors.IsAlreadyExists(err) {
 			deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
 			if deleteErr != nil {
 				return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
@@ -128,7 +129,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
 				}
 				if pod.Status.Phase == v1.PodFailed {
 					if pod.Status.Message != "" {
-						return fmt.Errorf(pod.Status.Message)
+						return errors.New(pod.Status.Message)
 					}
 					return fmt.Errorf("pod failed, pod.Status.Message unknown")
 				}
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go
index 601dc64601348bdcdfea25a55e53a4e649ac625f..d1691cd806ef908a15228c10bbcda20acde1adb4 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go
@@ -575,44 +575,6 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
 		volume.ConfigMap != nil
 }
 
-// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for
-// local PersistentVolumes. nil is returned if the PV does not have any
-// specific node affinity node selector terms and match expressions.
-// PersistentVolume with node affinity has select and match expressions
-// in the form of:
-//
-//	nodeAffinity:
-//	  required:
-//	    nodeSelectorTerms:
-//	    - matchExpressions:
-//	      - key: kubernetes.io/hostname
-//	        operator: In
-//	        values:
-//	        - <node1>
-//	        - <node2>
-func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
-	if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil {
-		return nil
-	}
-
-	var result sets.Set[string]
-	for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
-		var nodes sets.Set[string]
-		for _, matchExpr := range term.MatchExpressions {
-			if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn {
-				if nodes == nil {
-					nodes = sets.New(matchExpr.Values...)
-				} else {
-					nodes = nodes.Intersection(sets.New(matchExpr.Values...))
-				}
-			}
-		}
-		result = result.Union(nodes)
-	}
-
-	return sets.List(result)
-}
-
 // GetPodVolumeNames returns names of volumes that are used in a pod,
 // either as filesystem mount or raw block device, together with list
 // of all SELinux contexts of all containers that use the volumes.
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go
index 23917ad9f3d87202f9af80c718e6ec339e5c343d..e0747c489483f519fea9427310c9b0091d4a2f9f 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go
@@ -18,6 +18,7 @@ package utils
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"strings"
 	"time"
@@ -99,7 +100,7 @@ func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []str
 	}
 	for _, labelKey := range labelKeys {
 		if node.Labels != nil && len(node.Labels[labelKey]) != 0 {
-			return fmt.Errorf("Failed removing label " + labelKey + " of the node " + nodeName)
+			return errors.New("Failed removing label " + labelKey + " of the node " + nodeName)
 		}
 	}
 	return nil
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go
index 60f20751a8a5640efedd9f282c51189fe5975345..3c287b0b7a85f08b389b15ad68a1342073275fe6 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go
@@ -18,6 +18,7 @@ package utils
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"time"
 
@@ -226,7 +227,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
 	})
 	if wait.Interrupted(err) {
 		LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
-		err = fmt.Errorf(reason)
+		err = errors.New(reason)
 	}
 	if newRS == nil {
 		return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go
index 3fb9b58dcc177400aef0311f37c394e1ba2be860..f5fa5c2a048cc0ff584d87e1e91385c02949155f 100644
--- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go
+++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go
@@ -825,7 +825,7 @@ func (config *RCConfig) start(ctx context.Context) error {
 			*config.CreatedPods = startupStatus.Created
 		}
 		if !config.Silent {
-			config.RCConfigLog(startupStatus.String(config.Name))
+			config.RCConfigLog("%s", startupStatus.String(config.Name))
 		}
 
 		if config.PodStatusFile != nil {
@@ -849,8 +849,8 @@ func (config *RCConfig) start(ctx context.Context) error {
 		if podDeletionsCount > config.MaxAllowedPodDeletions {
 			// Number of pods which disappeared is over threshold
 			err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", "))
-			config.RCConfigLog(err.Error())
-			config.RCConfigLog(diff.String(sets.NewString()))
+			config.RCConfigLog("%s", err.Error())
+			config.RCConfigLog("%s", diff.String(sets.NewString()))
 			return err
 		}
 
diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go
index 07ce76de1986a72c1417a6cc0d34b7edf2a98ed4..65a00d706551c6c947894379930ecd223b1036db 100644
--- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go
+++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_linux.go
@@ -393,7 +393,7 @@ func (*Mounter) List() ([]MountPoint, error) {
 
 func statx(file string) (unix.Statx_t, error) {
 	var stat unix.Statx_t
-	if err := unix.Statx(0, file, unix.AT_STATX_DONT_SYNC, 0, &stat); err != nil {
+	if err := unix.Statx(unix.AT_FDCWD, file, unix.AT_STATX_DONT_SYNC, 0, &stat); err != nil {
 		if err == unix.ENOSYS {
 			return stat, errStatxNotSupport
 		}
@@ -577,7 +577,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target
 			sensitiveOptionsLog := sanitizedOptionsForLogging(options, sensitiveOptions)
 			detailedErr := fmt.Sprintf("format of disk %q failed: type:(%q) target:(%q) options:(%q) errcode:(%v) output:(%v) ", source, fstype, target, sensitiveOptionsLog, err, string(output))
 			klog.Error(detailedErr)
-			return NewMountError(FormatFailed, detailedErr)
+			return NewMountError(FormatFailed, "%s", detailedErr)
 		}
 
 		klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
@@ -600,7 +600,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target
 	// Mount the disk
 	klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target)
 	if err := mounter.MountSensitive(source, target, fstype, options, sensitiveOptions); err != nil {
-		return NewMountError(mountErrorValue, err.Error())
+		return NewMountError(mountErrorValue, "%s", err.Error())
 	}
 
 	return nil
diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt
index 087711a0bd4164b02bce760d9f4c88937511a510..91cb04c23e4ca796126c60d04975f6c1c8814bc2 100644
--- a/cluster-autoscaler/vendor/modules.txt
+++ b/cluster-autoscaler/vendor/modules.txt
@@ -384,7 +384,7 @@ github.com/golang/protobuf/ptypes/any
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/timestamp
 github.com/golang/protobuf/ptypes/wrappers
-# github.com/google/cadvisor v0.49.0
+# github.com/google/cadvisor v0.49.2
 ## explicit; go 1.19
 github.com/google/cadvisor/cache/memory
 github.com/google/cadvisor/collector
@@ -818,6 +818,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
 ## explicit; go 1.20
 go.opentelemetry.io/otel/metric
 go.opentelemetry.io/otel/metric/embedded
+go.opentelemetry.io/otel/metric/noop
 # go.opentelemetry.io/otel/sdk v1.20.0
 ## explicit; go 1.20
 go.opentelemetry.io/otel/sdk
@@ -1139,7 +1140,7 @@ gopkg.in/yaml.v2
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
-# k8s.io/api v0.30.5 => k8s.io/api v0.30.5
+# k8s.io/api v0.30.11 => k8s.io/api v0.30.11
 ## explicit; go 1.22.0
 k8s.io/api/admission/v1
 k8s.io/api/admission/v1beta1
@@ -1197,10 +1198,10 @@ k8s.io/api/storage/v1
 k8s.io/api/storage/v1alpha1
 k8s.io/api/storage/v1beta1
 k8s.io/api/storagemigration/v1alpha1
-# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.30.5
+# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.30.11
 ## explicit; go 1.22.0
 k8s.io/apiextensions-apiserver/pkg/features
-# k8s.io/apimachinery v0.30.5 => k8s.io/apimachinery v0.30.5
+# k8s.io/apimachinery v0.30.11 => k8s.io/apimachinery v0.30.11
 ## explicit; go 1.22.0
 k8s.io/apimachinery/pkg/api/equality
 k8s.io/apimachinery/pkg/api/errors
@@ -1264,7 +1265,7 @@ k8s.io/apimachinery/pkg/watch
 k8s.io/apimachinery/third_party/forked/golang/json
 k8s.io/apimachinery/third_party/forked/golang/netutil
 k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/apiserver v0.30.5 => k8s.io/apiserver v0.30.5
+# k8s.io/apiserver v0.30.11 => k8s.io/apiserver v0.30.11
 ## explicit; go 1.22.0
 k8s.io/apiserver/pkg/admission
 k8s.io/apiserver/pkg/admission/configuration
@@ -1426,7 +1427,7 @@ k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/e
 k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/externalversions/autoscaling.x-k8s.io/v1beta1
 k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/externalversions/internalinterfaces
 k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1beta1
-# k8s.io/client-go v0.30.5 => k8s.io/client-go v0.30.5
+# k8s.io/client-go v0.30.11 => k8s.io/client-go v0.30.11
 ## explicit; go 1.22.0
 k8s.io/client-go/applyconfigurations/admissionregistration/v1
 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -1761,7 +1762,7 @@ k8s.io/client-go/util/homedir
 k8s.io/client-go/util/keyutil
 k8s.io/client-go/util/retry
 k8s.io/client-go/util/workqueue
-# k8s.io/cloud-provider v0.30.5 => k8s.io/cloud-provider v0.30.5
+# k8s.io/cloud-provider v0.30.11 => k8s.io/cloud-provider v0.30.11
 ## explicit; go 1.22.0
 k8s.io/cloud-provider
 k8s.io/cloud-provider/api
@@ -1784,7 +1785,7 @@ k8s.io/cloud-provider/volume/helpers
 # k8s.io/cloud-provider-aws v1.27.0
 ## explicit; go 1.20
 k8s.io/cloud-provider-aws/pkg/providers/v1
-# k8s.io/code-generator v0.30.5 => k8s.io/code-generator v0.30.5
+# k8s.io/code-generator v0.30.11 => k8s.io/code-generator v0.30.11
 ## explicit; go 1.22.0
 k8s.io/code-generator
 k8s.io/code-generator/cmd/applyconfiguration-gen
@@ -1820,7 +1821,7 @@ k8s.io/code-generator/cmd/register-gen/generators
 k8s.io/code-generator/pkg/namer
 k8s.io/code-generator/pkg/util
 k8s.io/code-generator/third_party/forked/golang/reflect
-# k8s.io/component-base v0.30.5 => k8s.io/component-base v0.30.5
+# k8s.io/component-base v0.30.11 => k8s.io/component-base v0.30.11
 ## explicit; go 1.22.0
 k8s.io/component-base/cli/flag
 k8s.io/component-base/codec
@@ -1850,7 +1851,7 @@ k8s.io/component-base/tracing
 k8s.io/component-base/tracing/api/v1
 k8s.io/component-base/version
 k8s.io/component-base/version/verflag
-# k8s.io/component-helpers v0.30.5 => k8s.io/component-helpers v0.30.5
+# k8s.io/component-helpers v0.30.11 => k8s.io/component-helpers v0.30.11
 ## explicit; go 1.22.0
 k8s.io/component-helpers/apimachinery/lease
 k8s.io/component-helpers/node/topology
@@ -1860,7 +1861,7 @@ k8s.io/component-helpers/scheduling/corev1
 k8s.io/component-helpers/scheduling/corev1/nodeaffinity
 k8s.io/component-helpers/storage/ephemeral
 k8s.io/component-helpers/storage/volume
-# k8s.io/controller-manager v0.30.5 => k8s.io/controller-manager v0.30.5
+# k8s.io/controller-manager v0.30.11 => k8s.io/controller-manager v0.30.11
 ## explicit; go 1.22.0
 k8s.io/controller-manager/config
 k8s.io/controller-manager/config/v1
@@ -1872,16 +1873,16 @@ k8s.io/controller-manager/pkg/features
 k8s.io/controller-manager/pkg/features/register
 k8s.io/controller-manager/pkg/leadermigration/config
 k8s.io/controller-manager/pkg/leadermigration/options
-# k8s.io/cri-api v0.30.5 => k8s.io/cri-api v0.30.5
+# k8s.io/cri-api v0.30.11 => k8s.io/cri-api v0.30.11
 ## explicit; go 1.22.0
 k8s.io/cri-api/pkg/apis
 k8s.io/cri-api/pkg/apis/runtime/v1
 k8s.io/cri-api/pkg/errors
-# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.30.5
+# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.30.11
 ## explicit; go 1.22.0
 k8s.io/csi-translation-lib
 k8s.io/csi-translation-lib/plugins
-# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.30.5
+# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.30.11
 ## explicit; go 1.22.0
 k8s.io/dynamic-resource-allocation/resourceclaim
 k8s.io/dynamic-resource-allocation/structured/namedresources/cel
@@ -1903,7 +1904,7 @@ k8s.io/klog/v2/internal/severity
 k8s.io/klog/v2/internal/sloghandler
 k8s.io/klog/v2/internal/verbosity
 k8s.io/klog/v2/textlogger
-# k8s.io/kms v0.30.5 => k8s.io/kms v0.30.5
+# k8s.io/kms v0.30.11 => k8s.io/kms v0.30.11
 ## explicit; go 1.22.0
 k8s.io/kms/apis/v1beta1
 k8s.io/kms/apis/v2
@@ -1935,14 +1936,14 @@ k8s.io/kube-openapi/pkg/validation/errors
 k8s.io/kube-openapi/pkg/validation/spec
 k8s.io/kube-openapi/pkg/validation/strfmt
 k8s.io/kube-openapi/pkg/validation/strfmt/bson
-# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.30.5
+# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.30.11
 ## explicit; go 1.22.0
 k8s.io/kube-scheduler/config/v1
 k8s.io/kube-scheduler/extender/v1
-# k8s.io/kubectl v0.28.0 => k8s.io/kubectl v0.30.5
+# k8s.io/kubectl v0.28.0 => k8s.io/kubectl v0.30.11
 ## explicit; go 1.22.0
 k8s.io/kubectl/pkg/scale
-# k8s.io/kubelet v0.30.5 => k8s.io/kubelet v0.30.5
+# k8s.io/kubelet v0.30.11 => k8s.io/kubelet v0.30.11
 ## explicit; go 1.22.0
 k8s.io/kubelet/config/v1
 k8s.io/kubelet/config/v1alpha1
@@ -1964,7 +1965,7 @@ k8s.io/kubelet/pkg/cri/streaming
 k8s.io/kubelet/pkg/cri/streaming/portforward
 k8s.io/kubelet/pkg/cri/streaming/remotecommand
 k8s.io/kubelet/pkg/types
-# k8s.io/kubernetes v1.30.5
+# k8s.io/kubernetes v1.30.11
 ## explicit; go 1.22.0
 k8s.io/kubernetes/cmd/kubelet/app
 k8s.io/kubernetes/cmd/kubelet/app/options
@@ -2209,11 +2210,11 @@ k8s.io/kubernetes/pkg/volume/validation
 k8s.io/kubernetes/pkg/windows/service
 k8s.io/kubernetes/test/utils
 k8s.io/kubernetes/third_party/forked/golang/expansion
-# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.30.5
+# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.30.11
 ## explicit; go 1.22.0
 k8s.io/legacy-cloud-providers/gce
 k8s.io/legacy-cloud-providers/gce/gcpcredential
-# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.30.5
+# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.30.11
 ## explicit; go 1.22.0
 k8s.io/mount-utils
 # k8s.io/utils v0.0.0-20231127182322-b307cd553661
@@ -2364,34 +2365,34 @@ sigs.k8s.io/yaml/goyaml.v2
 # github.com/aws/aws-sdk-go/service/eks => github.com/aws/aws-sdk-go/service/eks v1.38.49
 # github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0
 # github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
-# k8s.io/api => k8s.io/api v0.30.5
-# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.5
-# k8s.io/apimachinery => k8s.io/apimachinery v0.30.5
-# k8s.io/apiserver => k8s.io/apiserver v0.30.5
-# k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.5
-# k8s.io/client-go => k8s.io/client-go v0.30.5
-# k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.5
-# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.5
-# k8s.io/code-generator => k8s.io/code-generator v0.30.5
-# k8s.io/component-base => k8s.io/component-base v0.30.5
-# k8s.io/component-helpers => k8s.io/component-helpers v0.30.5
-# k8s.io/controller-manager => k8s.io/controller-manager v0.30.5
-# k8s.io/cri-api => k8s.io/cri-api v0.30.5
-# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.5
-# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.5
-# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.5
-# k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.5
-# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.5
-# k8s.io/kubectl => k8s.io/kubectl v0.30.5
-# k8s.io/kubelet => k8s.io/kubelet v0.30.5
-# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.5
-# k8s.io/metrics => k8s.io/metrics v0.30.5
-# k8s.io/mount-utils => k8s.io/mount-utils v0.30.5
-# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.5
-# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.30.5
-# k8s.io/sample-controller => k8s.io/sample-controller v0.30.5
-# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.5
-# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.5
-# k8s.io/kms => k8s.io/kms v0.30.5
-# k8s.io/endpointslice => k8s.io/endpointslice v0.30.5
+# k8s.io/api => k8s.io/api v0.30.11
+# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.11
+# k8s.io/apimachinery => k8s.io/apimachinery v0.30.11
+# k8s.io/apiserver => k8s.io/apiserver v0.30.11
+# k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.11
+# k8s.io/client-go => k8s.io/client-go v0.30.11
+# k8s.io/cloud-provider => k8s.io/cloud-provider v0.30.11
+# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.11
+# k8s.io/code-generator => k8s.io/code-generator v0.30.11
+# k8s.io/component-base => k8s.io/component-base v0.30.11
+# k8s.io/component-helpers => k8s.io/component-helpers v0.30.11
+# k8s.io/controller-manager => k8s.io/controller-manager v0.30.11
+# k8s.io/cri-api => k8s.io/cri-api v0.30.11
+# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.30.11
+# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.11
+# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.11
+# k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.11
+# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.11
+# k8s.io/kubectl => k8s.io/kubectl v0.30.11
+# k8s.io/kubelet => k8s.io/kubelet v0.30.11
+# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.11
+# k8s.io/metrics => k8s.io/metrics v0.30.11
+# k8s.io/mount-utils => k8s.io/mount-utils v0.30.11
+# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.11
+# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.30.11
+# k8s.io/sample-controller => k8s.io/sample-controller v0.30.11
+# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.11
+# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.11
+# k8s.io/kms => k8s.io/kms v0.30.11
+# k8s.io/endpointslice => k8s.io/endpointslice v0.30.11
 # k8s.io/autoscaler/cluster-autoscaler/apis => ./apis
diff --git a/cluster-autoscaler/version/version.go b/cluster-autoscaler/version/version.go
index 16e4214af60f55c8685d6d2ce9fa072bce083f5d..88b2f8591ca16ce9a8408006f0f7418907e75465 100644
--- a/cluster-autoscaler/version/version.go
+++ b/cluster-autoscaler/version/version.go
@@ -17,4 +17,4 @@ limitations under the License.
 package version
 
 // ClusterAutoscalerVersion contains version of CA.
-const ClusterAutoscalerVersion = "1.30.3"
+const ClusterAutoscalerVersion = "1.30.4"