diff --git a/cluster-autoscaler/apis/go.mod b/cluster-autoscaler/apis/go.mod
index 1cef2407a657d55d1285cc96be3c4eed5647fa18..162dacc4bf439becf307c333836d1cfd70da4578 100644
--- a/cluster-autoscaler/apis/go.mod
+++ b/cluster-autoscaler/apis/go.mod
@@ -5,9 +5,9 @@ go 1.24.0
 require (
 	github.com/onsi/ginkgo/v2 v2.21.0
 	github.com/onsi/gomega v1.35.1
-	k8s.io/apimachinery v0.33.0-beta.0
-	k8s.io/client-go v0.33.0-beta.0
-	k8s.io/code-generator v0.33.0-beta.0
+	k8s.io/apimachinery v0.34.0-alpha.0
+	k8s.io/client-go v0.34.0-alpha.0
+	k8s.io/code-generator v0.34.0-alpha.0
 	sigs.k8s.io/structured-merge-diff/v4 v4.6.0
 )
 
@@ -35,22 +35,22 @@ require (
 	github.com/spf13/pflag v1.0.5 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
 	golang.org/x/mod v0.21.0 // indirect
-	golang.org/x/net v0.33.0 // indirect
+	golang.org/x/net v0.38.0 // indirect
 	golang.org/x/oauth2 v0.27.0 // indirect
-	golang.org/x/sync v0.11.0 // indirect
-	golang.org/x/sys v0.30.0 // indirect
-	golang.org/x/term v0.29.0 // indirect
-	golang.org/x/text v0.22.0 // indirect
+	golang.org/x/sync v0.12.0 // indirect
+	golang.org/x/sys v0.31.0 // indirect
+	golang.org/x/term v0.30.0 // indirect
+	golang.org/x/text v0.23.0 // indirect
 	golang.org/x/time v0.9.0 // indirect
 	golang.org/x/tools v0.26.0 // indirect
 	google.golang.org/protobuf v1.36.5 // indirect
 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
-	k8s.io/api v0.33.0-beta.0 // indirect
+	k8s.io/api v0.34.0-alpha.0 // indirect
 	k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
 	k8s.io/klog/v2 v2.130.1 // indirect
-	k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 // indirect
+	k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
 	k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
 	sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
 	sigs.k8s.io/randfill v1.0.0 // indirect
diff --git a/cluster-autoscaler/apis/go.sum b/cluster-autoscaler/apis/go.sum
index f67423488470312fcbc8626becea955a19be8fbf..8d44dc9b160a04b9fd17c270ef9f2c055d27281b 100644
--- a/cluster-autoscaler/apis/go.sum
+++ b/cluster-autoscaler/apis/go.sum
@@ -93,26 +93,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
 golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
 golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
-golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
 golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
 golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -137,20 +137,20 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.33.0-beta.0 h1:/sAUrfXsjKPST2mZjpWhjRdzSR6SD5KlJpiOgCQQhAQ=
-k8s.io/api v0.33.0-beta.0/go.mod h1:TYyCgedkG4OVS4+4D2n25BdbMcexMSLx6Y7OkAzkxLQ=
-k8s.io/apimachinery v0.33.0-beta.0 h1:vLDBChfQwyimk6AbuT7OZOIqxSg/44JlXuxqBk85j68=
-k8s.io/apimachinery v0.33.0-beta.0/go.mod h1:S2OIkExGqJOXYSYcAJwQ9zWcc6BkBUdTJUu4M7z0cvo=
-k8s.io/client-go v0.33.0-beta.0 h1:xRGKK5hU39pb6CFDCDOOlG+LEenB93/RK9hoP4eyAsU=
-k8s.io/client-go v0.33.0-beta.0/go.mod h1:RF6hSu+FncpgHQs1zA1UfGbMq8gxay89r37bCQe+Mj4=
-k8s.io/code-generator v0.33.0-beta.0 h1:QYiWYFUT9G7lnF1ucDYr/sZUaG/kptrooX2PJxEL+Go=
-k8s.io/code-generator v0.33.0-beta.0/go.mod h1:RBvFpvqtyQygCBjMayNyYqdzy+89LdzqAx0Th+dgmzQ=
+k8s.io/api v0.34.0-alpha.0 h1:plVaaO0yCTOGvWjEiEvvecQOPpf/IYdLnVMsfGfGMQo=
+k8s.io/api v0.34.0-alpha.0/go.mod h1:brriDRpq4yMP4PN4P48NfXVLwWSwaIFSe0+pOajiwjQ=
+k8s.io/apimachinery v0.34.0-alpha.0 h1:arymqm+uCpPEAVWBCvNF+yq01AJzsoUeUd2DYpoHuzc=
+k8s.io/apimachinery v0.34.0-alpha.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/client-go v0.34.0-alpha.0 h1:+hfihZ7vffuzoS4BoYg2nWs+9Bc1hXpZ7+iev2ISCo0=
+k8s.io/client-go v0.34.0-alpha.0/go.mod h1:0sClwbFRpXuYhqaJEqLiy+e9dlC7FOhFHc9ZdvLDAbU=
+k8s.io/code-generator v0.34.0-alpha.0 h1:aM4APBz/eAR8Qw4RWiCpfocZ2O2UUTi0UqTfvalouHc=
+k8s.io/code-generator v0.34.0-alpha.0/go.mod h1:lwzb0eIHnmHnkhcHbxXf87XR512Xm7mF2RHtDKEW71c=
 k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
 k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
 k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
 k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 h1:t0huyHnz6HsokckRxAF1bY0cqPFwzINKCL7yltEjZQc=
-k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
 sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml
index ff1105e43ca7354028d6e4ef0a3dbbcdbb246372..92997423ae7dd41784d2943f0190288f2dc61e91 100644
--- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml
+++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml
@@ -51,7 +51,7 @@ rules:
     resources: ["statefulsets", "replicasets", "daemonsets"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["storage.k8s.io"]
-    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
+    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["batch", "extensions"]
     resources: ["jobs"]
@@ -146,7 +146,7 @@ spec:
           type: RuntimeDefault
       serviceAccountName: cluster-autoscaler
       containers:
-        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
+        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
           name: cluster-autoscaler
           resources:
             limits:
diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml
index 1eef576adc770a2544507f08a41d757833cbf3b5..e1407b9ab2699124e17b08abc9dace0cd9f10548 100644
--- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml
+++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml
@@ -51,7 +51,7 @@ rules:
     resources: ["statefulsets", "replicasets", "daemonsets"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["storage.k8s.io"]
-    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
+    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["batch", "extensions"]
     resources: ["jobs"]
@@ -146,7 +146,7 @@ spec:
           type: RuntimeDefault
       serviceAccountName: cluster-autoscaler
       containers:
-        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
+        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
           name: cluster-autoscaler
           resources:
             limits:
diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml
index 67d57bc4dcdc7c40b424255d693a6f58f6ebab41..6b5daec774df0a2951bef8e7b9f9fe3bb70f1704 100644
--- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml
+++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml
@@ -51,7 +51,7 @@ rules:
     resources: ["statefulsets", "replicasets", "daemonsets"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["storage.k8s.io"]
-    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
+    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["batch", "extensions"]
     resources: ["jobs"]
@@ -146,7 +146,7 @@ spec:
           type: RuntimeDefault
       serviceAccountName: cluster-autoscaler
       containers:
-        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
+        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
           name: cluster-autoscaler
           resources:
             limits:
diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-control-plane.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-control-plane.yaml
index 1bc986ce19d5f7d9e55e140ff7cceb85a22308be..81f54347f4e07709cc8913d10b32412a62c3029d 100644
--- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-control-plane.yaml
+++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-control-plane.yaml
@@ -51,7 +51,7 @@ rules:
     resources: ["statefulsets", "replicasets", "daemonsets"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["storage.k8s.io"]
-    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
+    resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
     verbs: ["watch", "list", "get"]
   - apiGroups: ["batch", "extensions"]
     resources: ["jobs"]
@@ -153,7 +153,7 @@ spec:
       nodeSelector:
         kubernetes.io/role: control-plane
       containers:
-        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
+        - image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
           name: cluster-autoscaler
           resources:
             limits:
diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
index c4d0007a322c5595d6a314cf8228fe7c321b74c3..cd4739423622c3fc17411419d849c80932913419 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go
@@ -20,7 +20,6 @@ import (
 	"context"
 	"fmt"
 	"net/http"
-	"strings"
 	"testing"
 	"time"
 
@@ -422,8 +421,7 @@ func TestDeleteInstances(t *testing.T) {
 		},
 	}, nil)
 	err = as.DeleteInstances(instances)
-	expectedErrStr := "The specified account is disabled."
-	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
+	assert.Error(t, err)
 }
 
 func TestAgentPoolDeleteNodes(t *testing.T) {
@@ -478,8 +476,7 @@ func TestAgentPoolDeleteNodes(t *testing.T) {
 			ObjectMeta: v1.ObjectMeta{Name: "node"},
 		},
 	})
-	expectedErrStr := "The specified account is disabled."
-	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
+	assert.Error(t, err)
 
 	as.minSize = 3
 	err = as.DeleteNodes([]*apiv1.Node{})
diff --git a/cluster-autoscaler/cloudprovider/clusterapi/README.md b/cluster-autoscaler/cloudprovider/clusterapi/README.md
index 00a9c1fba2f1eb15935b4e531c46428b40922a5b..686230fb49967946bf39c02863a072a30221bb61 100644
--- a/cluster-autoscaler/cloudprovider/clusterapi/README.md
+++ b/cluster-autoscaler/cloudprovider/clusterapi/README.md
@@ -214,6 +214,11 @@ autoscaler about the sizing of the nodes in the node group. At the minimum,
 you must specify the CPU and memory annotations, these annotations should
 match the expected capacity of the nodes created from the infrastructure.
 
+> Note: The scale from zero annotations will override any capacity information
+> supplied by the Cluster API provider in the infrastructure machine templates.
+> If both the annotations and the provider supplied capacity information are
+> present, the annotations will take precedence.
+
 For example, if my MachineDeployment will create nodes that have "16000m" CPU,
 "128G" memory, "100Gi" ephemeral disk storage, 2 NVidia GPUs, and can support
 200 max pods, the following annotations will instruct the autoscaler how to
@@ -290,6 +295,12 @@ metadata:
     capacity.cluster-autoscaler.kubernetes.io/taints: "key1=value1:NoSchedule,key2=value2:NoExecute"
 ```
 
+> Note: The labels supplied through the capacity annotation will be combined
+> with the labels to be propagated from the scalable Cluster API resource.
+> The annotation does not override the labels in the scalable resource.
+> Please see the [Cluster API Book chapter on Metadata propagation](https://cluster-api.sigs.k8s.io/reference/api/metadata-propagation)
+> for more information.
+
 #### Per-NodeGroup autoscaling options
 
 Custom autoscaling options per node group (MachineDeployment/MachinePool/MachineSet) can be specified as annoations with a common prefix:
@@ -406,8 +417,6 @@ spec:
        ## replicas: 1
 ```
 
-**Warning**: If the Autoscaler is enabled **and** the replicas field is set for a `MachineDeployment` or `MachineSet` the Cluster may enter a broken state where replicas become unpredictable.
-
 If the replica field is unset in the Cluster definition Autoscaling can be enabled [as described above](#enabling-autoscaling)
 
 ## Special note on GPU instances
diff --git a/cluster-autoscaler/config/flags/flags.go b/cluster-autoscaler/config/flags/flags.go
index 11533a0822176bbb43fa85d767d5f21d0a1677d8..8012fde264a7f2fea40c1be305c61cbbfd180293 100644
--- a/cluster-autoscaler/config/flags/flags.go
+++ b/cluster-autoscaler/config/flags/flags.go
@@ -34,7 +34,7 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/utils/units"
 
 	"k8s.io/client-go/rest"
-	klog "k8s.io/klog/v2"
+	"k8s.io/klog/v2"
 	kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
 	scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config"
 )
@@ -269,12 +269,12 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 		klog.Fatalf("Failed to get scheduler config: %v", err)
 	}
 
-	if isFlagPassed("drain-priority-config") && isFlagPassed("max-graceful-termination-sec") {
+	if pflag.CommandLine.Changed("drain-priority-config") && pflag.CommandLine.Changed("max-graceful-termination-sec") {
 		klog.Fatalf("Invalid configuration, could not use --drain-priority-config together with --max-graceful-termination-sec")
 	}
 
 	var drainPriorityConfigMap []kubelet_config.ShutdownGracePeriodByPodPriority
-	if isFlagPassed("drain-priority-config") {
+	if pflag.CommandLine.Changed("drain-priority-config") {
 		drainPriorityConfigMap = parseShutdownGracePeriodsAndPriorities(*drainPriorityConfig)
 		if len(drainPriorityConfigMap) == 0 {
 			klog.Fatalf("Invalid configuration, parsing --drain-priority-config")
@@ -409,16 +409,6 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 	}
 }
 
-func isFlagPassed(name string) bool {
-	found := false
-	flag.Visit(func(f *flag.Flag) {
-		if f.Name == name {
-			found = true
-		}
-	})
-	return found
-}
-
 func minMaxFlagString(min, max int64) string {
 	return fmt.Sprintf("%v:%v", min, max)
 }
diff --git a/cluster-autoscaler/config/flags/flags_test.go b/cluster-autoscaler/config/flags/flags_test.go
index a10678426bf608310769f1104311313dcb99092b..3851a9a5daa7216377dd1615b8a0990fa5bf18b6 100644
--- a/cluster-autoscaler/config/flags/flags_test.go
+++ b/cluster-autoscaler/config/flags/flags_test.go
@@ -17,11 +17,15 @@ limitations under the License.
 package flags
 
 import (
+	"flag"
 	"testing"
 
 	"k8s.io/autoscaler/cluster-autoscaler/config"
 	kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
 
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+	"github.com/spf13/pflag"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -146,3 +150,47 @@ func TestParseShutdownGracePeriodsAndPriorities(t *testing.T) {
 		})
 	}
 }
+
+func TestCreateAutoscalingOptions(t *testing.T) {
+	for _, tc := range []struct {
+		testName            string
+		flags               []string
+		wantOptionsAsserter func(t *testing.T, gotOptions config.AutoscalingOptions)
+	}{
+		{
+			testName: "DrainPriorityConfig defaults to an empty list when the flag isn't passed",
+			flags:    []string{},
+			wantOptionsAsserter: func(t *testing.T, gotOptions config.AutoscalingOptions) {
+				if diff := cmp.Diff([]kubelet_config.ShutdownGracePeriodByPodPriority{}, gotOptions.DrainPriorityConfig, cmpopts.EquateEmpty()); diff != "" {
+					t.Errorf("createAutoscalingOptions(): unexpected DrainPriorityConfig field (-want +got): %s", diff)
+				}
+			},
+		},
+		{
+			testName: "DrainPriorityConfig is parsed correctly when the flag passed",
+			flags:    []string{"--drain-priority-config", "5000:60,3000:50,0:40"},
+			wantOptionsAsserter: func(t *testing.T, gotOptions config.AutoscalingOptions) {
+				wantConfig := []kubelet_config.ShutdownGracePeriodByPodPriority{
+					{Priority: 5000, ShutdownGracePeriodSeconds: 60},
+					{Priority: 3000, ShutdownGracePeriodSeconds: 50},
+					{Priority: 0, ShutdownGracePeriodSeconds: 40},
+				}
+				if diff := cmp.Diff(wantConfig, gotOptions.DrainPriorityConfig); diff != "" {
+					t.Errorf("createAutoscalingOptions(): unexpected DrainPriorityConfig field (-want +got): %s", diff)
+				}
+			},
+		},
+	} {
+		t.Run(tc.testName, func(t *testing.T) {
+			pflag.CommandLine = pflag.NewFlagSet("test", pflag.ExitOnError)
+			pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+			err := pflag.CommandLine.Parse(tc.flags)
+			if err != nil {
+				t.Errorf("pflag.CommandLine.Parse() got unexpected error: %v", err)
+			}
+
+			gotOptions := createAutoscalingOptions()
+			tc.wantOptionsAsserter(t, gotOptions)
+		})
+	}
+}
diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
index 1ac006c6d06564f42f57dc2c59bef6cdf685204f..242caeb1a6b40b2a4d675e63dcffaaf0ab7653bf 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
@@ -26,7 +26,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/config"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -111,7 +110,7 @@ func TestFilterOutExpendable(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			processor := NewFilterOutExpendablePodListProcessor()
 			snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-			err := snapshot.SetClusterState(tc.nodes, nil, drasnapshot.Snapshot{})
+			err := snapshot.SetClusterState(tc.nodes, nil, nil)
 			assert.NoError(t, err)
 
 			pods, err := processor.Process(&context.AutoscalingContext{
diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
index 61303ed3a8d154abb3b78538dc60df6857abfc2e..5eb7db7b64e51ba791f723fb3db8d6c77dc43d91 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
@@ -27,7 +27,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -281,7 +280,7 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
 				}
 
 				clusterSnapshot := snapshotFactory()
-				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, drasnapshot.Snapshot{}); err != nil {
+				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, nil); err != nil {
 					assert.NoError(b, err)
 				}
 
diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator.go b/cluster-autoscaler/core/scaledown/actuation/actuator.go
index 2542b383667505d1c5634bae73935786b3f36fdc..55ef2e5a8fa66a1367965ac3aa1b6b286604659a 100644
--- a/cluster-autoscaler/core/scaledown/actuation/actuator.go
+++ b/cluster-autoscaler/core/scaledown/actuation/actuator.go
@@ -406,7 +406,7 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS
 	scheduledPods := kube_util.ScheduledPods(pods)
 	nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
+	var draSnapshot *drasnapshot.Snapshot
 	if a.ctx.DynamicResourceAllocationEnabled && a.ctx.DraProvider != nil {
 		draSnapshot, err = a.ctx.DraProvider.Snapshot()
 		if err != nil {
diff --git a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
index de00c8bb1180a4bf10640fd728b424da469d5b0b..41544693574b9496788e2dbdd15b3fca2a101a68 100644
--- a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
+++ b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
@@ -42,7 +42,7 @@ type testCase struct {
 	desc                        string
 	nodes                       []*apiv1.Node
 	pods                        []*apiv1.Pod
-	draSnapshot                 drasnapshot.Snapshot
+	draSnapshot                 *drasnapshot.Snapshot
 	draEnabled                  bool
 	wantUnneeded                []string
 	wantUnremovable             []*simulator.UnremovableNode
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
index 1ba94b3d6b7e0eb3bf171026cc22239b3abc9d9b..70be8d036a16f9616b2147ae30b7ba32aa8d7296 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
@@ -46,7 +46,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/status"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -1044,7 +1043,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
 	// build orchestrator
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1154,7 +1153,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1197,7 +1196,7 @@ func TestBinpackingLimiter(t *testing.T) {
 
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1257,7 +1256,7 @@ func TestScaleUpNoHelp(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1412,7 +1411,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
 			listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
 			ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1496,7 +1495,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
 			}
 			context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = context.ClusterSnapshot.SetClusterState(nodes, podList, drasnapshot.Snapshot{})
+			err = context.ClusterSnapshot.SetClusterState(nodes, podList, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1672,7 +1671,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*apiv1.Node{n1, n2}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 	processors := processorstest.NewTestProcessors(&context)
diff --git a/cluster-autoscaler/core/scaleup/resource/manager_test.go b/cluster-autoscaler/core/scaleup/resource/manager_test.go
index 280942601ef3822a2db7790f33894575bd595578..7d4573afed790dda206a1a912ae2900e0ee42676 100644
--- a/cluster-autoscaler/core/scaleup/resource/manager_test.go
+++ b/cluster-autoscaler/core/scaleup/resource/manager_test.go
@@ -32,7 +32,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/core/test"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
 	utils_test "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -72,7 +71,7 @@ func TestDeltaForNode(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -115,7 +114,7 @@ func TestResourcesLeft(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -168,7 +167,7 @@ func TestApplyLimits(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -235,7 +234,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*corev1.Node{n1}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go
index 95285b828923de14703ab66410a2c918f83aa9d4..365bc8526f967805c3384afae2b558befe34b2f9 100644
--- a/cluster-autoscaler/core/static_autoscaler.go
+++ b/cluster-autoscaler/core/static_autoscaler.go
@@ -276,8 +276,17 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
 
 	stateUpdateStart := time.Now()
 
+	var draSnapshot *drasnapshot.Snapshot
+	if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
+		var err error
+		draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
+		if err != nil {
+			return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
+		}
+	}
+
 	// Get nodes and pods currently living on cluster
-	allNodes, readyNodes, typedErr := a.obtainNodeLists()
+	allNodes, readyNodes, typedErr := a.obtainNodeLists(draSnapshot)
 	if typedErr != nil {
 		klog.Errorf("Failed to get node list: %v", typedErr)
 		return typedErr
@@ -302,6 +311,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
 		klog.Errorf("Failed to get daemonset list: %v", err)
 		return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
 	}
+
 	// Snapshot scale-down actuation status before cache refresh.
 	scaleDownActuationStatus := a.scaleDownActuator.CheckStatus()
 	// Call CloudProvider.Refresh before any other calls to cloud provider.
@@ -335,14 +345,6 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
 	}
 	nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
-	if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
-		draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
-		if err != nil {
-			return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
-		}
-	}
-
 	if err := a.ClusterSnapshot.SetClusterState(allNodes, nonExpendableScheduledPods, draSnapshot); err != nil {
 		return caerrors.ToAutoscalerError(caerrors.InternalError, err).AddPrefix("failed to initialize ClusterSnapshot: ")
 	}
@@ -980,7 +982,7 @@ func (a *StaticAutoscaler) ExitCleanUp() {
 	a.clusterStateRegistry.Stop()
 }
 
-func (a *StaticAutoscaler) obtainNodeLists() ([]*apiv1.Node, []*apiv1.Node, caerrors.AutoscalerError) {
+func (a *StaticAutoscaler) obtainNodeLists(draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node, caerrors.AutoscalerError) {
 	allNodes, err := a.AllNodeLister().List()
 	if err != nil {
 		klog.Errorf("Failed to list all nodes: %v", err)
@@ -998,7 +1000,7 @@ func (a *StaticAutoscaler) obtainNodeLists() ([]*apiv1.Node, []*apiv1.Node, caer
 	// Treat those nodes as unready until GPU actually becomes available and let
 	// our normal handling for booting up nodes deal with this.
 	// TODO: Remove this call when we handle dynamically provisioned resources.
-	allNodes, readyNodes = a.processors.CustomResourcesProcessor.FilterOutNodesWithUnreadyResources(a.AutoscalingContext, allNodes, readyNodes)
+	allNodes, readyNodes = a.processors.CustomResourcesProcessor.FilterOutNodesWithUnreadyResources(a.AutoscalingContext, allNodes, readyNodes, draSnapshot)
 	allNodes, readyNodes = taints.FilterOutNodesWithStartupTaints(a.taintConfig, allNodes, readyNodes)
 	return allNodes, readyNodes, nil
 }
diff --git a/cluster-autoscaler/core/static_autoscaler_dra_test.go b/cluster-autoscaler/core/static_autoscaler_dra_test.go
index 21e77d0c143f84d9ae4d2438e88ccd4002cd67af..8545b3c1448646d4591bf7054879de9d0351099b 100644
--- a/cluster-autoscaler/core/static_autoscaler_dra_test.go
+++ b/cluster-autoscaler/core/static_autoscaler_dra_test.go
@@ -181,8 +181,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
 	req1Nic := testDeviceRequest{name: "req1Nic", count: 1, selectors: singleAttrSelector(exampleDriver, nicAttribute, nicTypeA)}
 	req1Global := testDeviceRequest{name: "req1Global", count: 1, selectors: singleAttrSelector(exampleDriver, globalDevAttribute, globalDevTypeA)}
 
-	sharedGpuBClaim := testResourceClaim("sharedGpuBClaim", nil, "", []testDeviceRequest{req1GpuB}, nil, nil)
-	sharedAllocatedGlobalClaim := testResourceClaim("sharedGlobalClaim", nil, "", []testDeviceRequest{req1Global}, []testAllocation{{request: req1Global.name, driver: exampleDriver, pool: "global-pool", device: globalDevice + "-0"}}, nil)
+	sharedGpuBClaim := testResourceClaim("sharedGpuBClaim", nil, "", []testDeviceRequest{req1GpuB}, nil)
+	sharedAllocatedGlobalClaim := testResourceClaim("sharedGlobalClaim", nil, "", []testDeviceRequest{req1Global}, []testAllocation{{request: req1Global.name, driver: exampleDriver, pool: "global-pool", device: globalDevice + "-0"}})
 
 	testCases := map[string]struct {
 		nodeGroups           map[*testNodeGroupDef]int
@@ -250,10 +250,8 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
 			expectedScaleUps: map[string]int{node1Gpu1Nic1slice.name: 3},
 		},
 		"scale-up: scale from 0 nodes in a node group": {
-			nodeGroups: map[*testNodeGroupDef]int{node1Gpu1Nic1slice: 0},
-			pods: append(
-				unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
-			),
+			nodeGroups:       map[*testNodeGroupDef]int{node1Gpu1Nic1slice: 0},
+			pods:             unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
 			expectedScaleUps: map[string]int{node1Gpu1Nic1slice.name: 3},
 		},
 		"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources": {
@@ -264,9 +262,7 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
 					scheduledPod(baseSmallPod, "template-1", node3GpuA1slice.name+"-template", map[*testDeviceRequest][]string{&req1GpuA: {gpuDevice + "-1"}}),
 				},
 			},
-			pods: append(
-				unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}),
-			),
+			pods:             unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}),
 			expectedScaleUps: map[string]int{node3GpuA1slice.name: 3},
 		},
 		"scale-up: scale from 0 nodes in a node group, with pods on the template nodes consuming DRA resources, including shared claims": {
@@ -278,16 +274,12 @@ func TestStaticAutoscalerDynamicResources(t *testing.T) {
 					scheduledPod(baseSmallPod, "template-1", node3GpuA1slice.name+"-template", map[*testDeviceRequest][]string{&req1GpuA: {gpuDevice + "-1"}}, sharedAllocatedGlobalClaim),
 				},
 			},
-			pods: append(
-				unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}, sharedAllocatedGlobalClaim),
-			),
+			pods:             unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA}, sharedAllocatedGlobalClaim),
 			expectedScaleUps: map[string]int{node3GpuA1slice.name: 3},
 		},
 		"no scale-up: pods requesting multiple different devices, but they're on different nodes": {
 			nodeGroups: map[*testNodeGroupDef]int{node1GpuA1slice: 1, node1Nic1slice: 1},
-			pods: append(
-				unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
-			),
+			pods:       unscheduledPods(baseSmallPod, "unschedulable", 3, []testDeviceRequest{req1GpuA, req1Nic}),
 		},
 		"scale-up: pods requesting a shared, unallocated claim": {
 			extraResourceClaims: []*resourceapi.ResourceClaim{sharedGpuBClaim},
@@ -597,13 +589,13 @@ func resourceClaimsForPod(pod *apiv1.Pod, nodeName string, claimCount int, reque
 			}
 		}
 
-		claims = append(claims, testResourceClaim(name, pod, nodeName, claimRequests, claimAllocations, nil))
+		claims = append(claims, testResourceClaim(name, pod, nodeName, claimRequests, claimAllocations))
 	}
 
 	return claims
 }
 
-func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string, requests []testDeviceRequest, allocations []testAllocation, reservedFor []*apiv1.Pod) *resourceapi.ResourceClaim {
+func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string, requests []testDeviceRequest, allocations []testAllocation) *resourceapi.ResourceClaim {
 	var deviceRequests []resourceapi.DeviceRequest
 	for _, request := range requests {
 		var selectors []resourceapi.DeviceSelector
@@ -673,15 +665,6 @@ func testResourceClaim(claimName string, owningPod *apiv1.Pod, nodeName string,
 					UID:      owningPod.UID,
 				},
 			}
-		} else {
-			for _, pod := range podReservations {
-				podReservations = append(podReservations, resourceapi.ResourceClaimConsumerReference{
-					APIGroup: "",
-					Resource: "pods",
-					Name:     pod.Name,
-					UID:      pod.UID,
-				})
-			}
 		}
 		claim.Status = resourceapi.ResourceClaimStatus{
 			Allocation: &resourceapi.AllocationResult{
diff --git a/cluster-autoscaler/estimator/binpacking_estimator.go b/cluster-autoscaler/estimator/binpacking_estimator.go
index a8bb78cc1fe25765e01a33ecf9389f3acfba332b..10ed1dfa3d635f3e3ccfae03d77d10bfdcba8f3f 100644
--- a/cluster-autoscaler/estimator/binpacking_estimator.go
+++ b/cluster-autoscaler/estimator/binpacking_estimator.go
@@ -20,6 +20,8 @@ import (
 	"fmt"
 	"strconv"
 
+	"slices"
+
 	apiv1 "k8s.io/api/core/v1"
 	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/metrics"
@@ -27,6 +29,7 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/klog/v2"
+	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
 )
 
 // BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods.
@@ -171,7 +174,8 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
 
 		if estimationState.lastNodeName != "" {
 			// Try to schedule the pod on only newly created node.
-			if err := e.clusterSnapshot.SchedulePod(pod, estimationState.lastNodeName); err == nil {
+			err := e.clusterSnapshot.SchedulePod(pod, estimationState.lastNodeName)
+			if err == nil {
 				// The pod was scheduled on the newly created node.
 				found = true
 				estimationState.trackScheduledPod(pod, estimationState.lastNodeName)
@@ -180,6 +184,24 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
 				return false, err
 			}
 			// The pod can't be scheduled on the newly created node because of scheduling predicates.
+
+			// Check if node failed because of topology constraints.
+			if isPodUsingHostNameTopologyKey(pod) && hasTopologyConstraintError(err) {
+				// If the pod can't be scheduled on the last node because of topology constraints, we can stop binpacking.
+				// The pod can't be scheduled on any new node either, because it has the same topology constraints.
+				nodeName, err := e.clusterSnapshot.SchedulePodOnAnyNodeMatching(pod, func(nodeInfo *framework.NodeInfo) bool {
+					return nodeInfo.Node().Name != estimationState.lastNodeName // only skip the last node that failed scheduling
+				})
+				if err != nil && err.Type() == clustersnapshot.SchedulingInternalError {
+					// Unexpected error.
+					return false, err
+				}
+				if nodeName != "" {
+					// The pod was scheduled on a different node, so we can continue binpacking.
+					found = true
+					estimationState.trackScheduledPod(pod, nodeName)
+				}
+			}
 		}
 
 		if !found {
@@ -240,6 +262,33 @@ func (e *BinpackingNodeEstimator) addNewNodeToSnapshot(
 	return nil
 }
 
+// isTopologyConstraintError determines if an error is related to pod topology spread constraints
+// by checking the predicate name and reasons
+func hasTopologyConstraintError(err clustersnapshot.SchedulingError) bool {
+	if err == nil {
+		return false
+	}
+
+	// Check reasons for mentions of topology or constraints
+	return slices.Contains(err.FailingPredicateReasons(), podtopologyspread.ErrReasonConstraintsNotMatch)
+}
+
+// isPodUsingHostNameTopoKey returns true if the pod has any topology spread
+// constraint that uses the kubernetes.io/hostname topology key
+func isPodUsingHostNameTopologyKey(pod *apiv1.Pod) bool {
+	if pod == nil || pod.Spec.TopologySpreadConstraints == nil {
+		return false
+	}
+
+	for _, constraint := range pod.Spec.TopologySpreadConstraints {
+		if constraint.TopologyKey == apiv1.LabelHostname {
+			return true
+		}
+	}
+
+	return false
+}
+
 func observeBinpackingHeterogeneity(podsEquivalenceGroups []PodEquivalenceGroup, nodeTemplate *framework.NodeInfo) {
 	node := nodeTemplate.Node()
 	var instanceType, cpuCount string
diff --git a/cluster-autoscaler/estimator/binpacking_estimator_test.go b/cluster-autoscaler/estimator/binpacking_estimator_test.go
index ac205f16ba46e6874806c352ac5bb48463856088..00a32dbf5d0ce4f4aeb4b582b0dbc0d1fd38ba9b 100644
--- a/cluster-autoscaler/estimator/binpacking_estimator_test.go
+++ b/cluster-autoscaler/estimator/binpacking_estimator_test.go
@@ -178,13 +178,13 @@ func TestBinpackingEstimate(t *testing.T) {
 			podsEquivalenceGroup: []PodEquivalenceGroup{makePodEquivalenceGroup(
 				BuildTestPod(
 					"estimatee",
-					20,
-					100,
+					200,
+					200,
 					WithNamespace("universe"),
 					WithLabels(map[string]string{
 						"app": "estimatee",
 					}),
-					WithMaxSkew(2, "kubernetes.io/hostname")), 8)},
+					WithMaxSkew(2, "kubernetes.io/hostname", 1)), 8)},
 			expectNodeCount: 4,
 			expectPodCount:  8,
 		},
@@ -201,10 +201,27 @@ func TestBinpackingEstimate(t *testing.T) {
 					WithLabels(map[string]string{
 						"app": "estimatee",
 					}),
-					WithMaxSkew(2, "topology.kubernetes.io/zone")), 8)},
+					WithMaxSkew(2, "topology.kubernetes.io/zone", 1)), 8)},
 			expectNodeCount: 1,
 			expectPodCount:  2,
 		},
+		{
+			name:       "hostname topology spreading with maxSkew=1 with a large scaleup handles scheduling pods retroactively",
+			millicores: 1000,
+			memory:     5000,
+			podsEquivalenceGroup: []PodEquivalenceGroup{makePodEquivalenceGroup(
+				BuildTestPod(
+					"estimatee",
+					20,
+					100,
+					WithNamespace("universe"),
+					WithLabels(map[string]string{
+						"app": "estimatee",
+					}),
+					WithMaxSkew(1, "kubernetes.io/hostname", 3)), 12)},
+			expectNodeCount: 3,
+			expectPodCount:  12,
+		},
 	}
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod
index 80507bc7274523b27b638ffca861f8a905a54ae9..771b594de94ee525c3a79e5ab35d5da3626f675b 100644
--- a/cluster-autoscaler/go.mod
+++ b/cluster-autoscaler/go.mod
@@ -32,29 +32,29 @@ require (
 	github.com/stretchr/testify v1.10.0
 	github.com/vburenin/ifacemaker v1.2.1
 	go.uber.org/mock v0.4.0
-	golang.org/x/crypto v0.35.0
-	golang.org/x/net v0.33.0
+	golang.org/x/crypto v0.36.0
+	golang.org/x/net v0.38.0
 	golang.org/x/oauth2 v0.27.0
-	golang.org/x/sys v0.30.0
+	golang.org/x/sys v0.31.0
 	google.golang.org/api v0.151.0
 	google.golang.org/grpc v1.68.1
 	google.golang.org/protobuf v1.36.5
 	gopkg.in/gcfg.v1 v1.2.3
 	gopkg.in/yaml.v2 v2.4.0
-	k8s.io/api v0.33.0-beta.0
-	k8s.io/apimachinery v0.33.0-beta.0
-	k8s.io/apiserver v0.33.0-beta.0
+	k8s.io/api v0.34.0-alpha.0
+	k8s.io/apimachinery v0.34.0-alpha.0
+	k8s.io/apiserver v0.34.0-alpha.0
 	k8s.io/autoscaler/cluster-autoscaler/apis v0.0.0-20240627115740-d52e4b9665d7
-	k8s.io/client-go v0.33.0-beta.0
+	k8s.io/client-go v0.34.0-alpha.0
 	k8s.io/cloud-provider v0.30.1
 	k8s.io/cloud-provider-aws v1.27.0
 	k8s.io/cloud-provider-gcp/providers v0.28.2
-	k8s.io/component-base v0.33.0-beta.0
-	k8s.io/component-helpers v0.33.0-beta.0
+	k8s.io/component-base v0.34.0-alpha.0
+	k8s.io/component-helpers v0.34.0-alpha.0
 	k8s.io/dynamic-resource-allocation v0.0.0
 	k8s.io/klog/v2 v2.130.1
-	k8s.io/kubelet v0.33.0-beta.0
-	k8s.io/kubernetes v1.33.0-beta.0
+	k8s.io/kubelet v0.34.0-alpha.0
+	k8s.io/kubernetes v1.34.0-alpha.0
 	k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
 	sigs.k8s.io/cloud-provider-azure v1.29.4
 	sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13
@@ -122,7 +122,7 @@ require (
 	github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
 	github.com/godbus/dbus/v5 v5.1.0 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
-	github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
+	github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
 	github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
@@ -134,7 +134,7 @@ require (
 	github.com/google/s2a-go v0.1.7 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
 	github.com/googleapis/gax-go/v2 v2.12.0 // indirect
-	github.com/gorilla/websocket v1.5.3 // indirect
+	github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
 	github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -171,9 +171,9 @@ require (
 	github.com/stoewer/go-strcase v1.3.0 // indirect
 	github.com/stretchr/objx v0.5.2 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
-	go.etcd.io/etcd/api/v3 v3.5.16 // indirect
-	go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
-	go.etcd.io/etcd/client/v3 v3.5.16 // indirect
+	go.etcd.io/etcd/api/v3 v3.5.21 // indirect
+	go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
+	go.etcd.io/etcd/client/v3 v3.5.21 // indirect
 	go.opencensus.io v0.24.0 // indirect
 	go.opentelemetry.io/auto/sdk v1.1.0 // indirect
 	go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 // indirect
@@ -190,9 +190,9 @@ require (
 	go.uber.org/zap v1.27.0 // indirect
 	golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
 	golang.org/x/mod v0.21.0 // indirect
-	golang.org/x/sync v0.11.0 // indirect
-	golang.org/x/term v0.29.0 // indirect
-	golang.org/x/text v0.22.0 // indirect
+	golang.org/x/sync v0.12.0 // indirect
+	golang.org/x/term v0.30.0 // indirect
+	golang.org/x/text v0.23.0 // indirect
 	golang.org/x/time v0.9.0 // indirect
 	golang.org/x/tools v0.26.0 // indirect
 	google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
@@ -203,14 +203,14 @@ require (
 	gopkg.in/warnings.v0 v0.1.2 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 	k8s.io/apiextensions-apiserver v0.0.0 // indirect
-	k8s.io/code-generator v0.33.0-beta.0 // indirect
-	k8s.io/controller-manager v0.33.0-beta.0 // indirect
-	k8s.io/cri-api v0.33.0-beta.0 // indirect
+	k8s.io/code-generator v0.34.0-alpha.0 // indirect
+	k8s.io/controller-manager v0.34.0-alpha.0 // indirect
+	k8s.io/cri-api v0.34.0-alpha.0 // indirect
 	k8s.io/cri-client v0.0.0 // indirect
 	k8s.io/csi-translation-lib v0.27.0 // indirect
 	k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
-	k8s.io/kms v0.33.0-beta.0 // indirect
-	k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 // indirect
+	k8s.io/kms v0.34.0-alpha.0 // indirect
+	k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
 	k8s.io/kube-scheduler v0.0.0 // indirect
 	k8s.io/kubectl v0.28.0 // indirect
 	k8s.io/mount-utils v0.26.0-alpha.0 // indirect
@@ -227,66 +227,66 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0
 
 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
 
-replace k8s.io/api => k8s.io/api v0.33.0-beta.0
+replace k8s.io/api => k8s.io/api v0.34.0-alpha.0
 
-replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.0-beta.0
+replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.0-alpha.0
 
-replace k8s.io/apimachinery => k8s.io/apimachinery v0.33.0-beta.0
+replace k8s.io/apimachinery => k8s.io/apimachinery v0.34.0-alpha.0
 
-replace k8s.io/apiserver => k8s.io/apiserver v0.33.0-beta.0
+replace k8s.io/apiserver => k8s.io/apiserver v0.34.0-alpha.0
 
-replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.0-beta.0
+replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.34.0-alpha.0
 
-replace k8s.io/client-go => k8s.io/client-go v0.33.0-beta.0
+replace k8s.io/client-go => k8s.io/client-go v0.34.0-alpha.0
 
-replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.0-beta.0
+replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.34.0-alpha.0
 
-replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0-beta.0
+replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.0-alpha.0
 
-replace k8s.io/code-generator => k8s.io/code-generator v0.33.0-beta.0
+replace k8s.io/code-generator => k8s.io/code-generator v0.34.0-alpha.0
 
-replace k8s.io/component-base => k8s.io/component-base v0.33.0-beta.0
+replace k8s.io/component-base => k8s.io/component-base v0.34.0-alpha.0
 
-replace k8s.io/component-helpers => k8s.io/component-helpers v0.33.0-beta.0
+replace k8s.io/component-helpers => k8s.io/component-helpers v0.34.0-alpha.0
 
-replace k8s.io/controller-manager => k8s.io/controller-manager v0.33.0-beta.0
+replace k8s.io/controller-manager => k8s.io/controller-manager v0.34.0-alpha.0
 
-replace k8s.io/cri-api => k8s.io/cri-api v0.33.0-beta.0
+replace k8s.io/cri-api => k8s.io/cri-api v0.34.0-alpha.0
 
-replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.0-beta.0
+replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.0-alpha.0
 
-replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.0-beta.0
+replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.0-alpha.0
 
-replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.0-beta.0
+replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.0-alpha.0
 
-replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.0-beta.0
+replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.0-alpha.0
 
-replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.0-beta.0
+replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.0-alpha.0
 
-replace k8s.io/kubectl => k8s.io/kubectl v0.33.0-beta.0
+replace k8s.io/kubectl => k8s.io/kubectl v0.34.0-alpha.0
 
-replace k8s.io/kubelet => k8s.io/kubelet v0.33.0-beta.0
+replace k8s.io/kubelet => k8s.io/kubelet v0.34.0-alpha.0
 
-replace k8s.io/metrics => k8s.io/metrics v0.33.0-beta.0
+replace k8s.io/metrics => k8s.io/metrics v0.34.0-alpha.0
 
-replace k8s.io/mount-utils => k8s.io/mount-utils v0.33.0-beta.0
+replace k8s.io/mount-utils => k8s.io/mount-utils v0.34.0-alpha.0
 
-replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.0-beta.0
+replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.0-alpha.0
 
-replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.0-beta.0
+replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.34.0-alpha.0
 
-replace k8s.io/sample-controller => k8s.io/sample-controller v0.33.0-beta.0
+replace k8s.io/sample-controller => k8s.io/sample-controller v0.34.0-alpha.0
 
-replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.0-beta.0
+replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.34.0-alpha.0
 
-replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.0-beta.0
+replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.0-alpha.0
 
-replace k8s.io/kms => k8s.io/kms v0.33.0-beta.0
+replace k8s.io/kms => k8s.io/kms v0.34.0-alpha.0
 
-replace k8s.io/endpointslice => k8s.io/endpointslice v0.33.0-beta.0
+replace k8s.io/endpointslice => k8s.io/endpointslice v0.34.0-alpha.0
 
 replace k8s.io/autoscaler/cluster-autoscaler/apis => ./apis
 
-replace k8s.io/cri-client => k8s.io/cri-client v0.33.0-beta.0
+replace k8s.io/cri-client => k8s.io/cri-client v0.34.0-alpha.0
 
-replace k8s.io/externaljwt => k8s.io/externaljwt v0.33.0-beta.0
+replace k8s.io/externaljwt => k8s.io/externaljwt v0.34.0-alpha.0
diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum
index cc439664e2a2d05c90101faa93cb49816b55d8b9..eb5c4c7b21646d2571e3f5b76b10a19a6ce0ec39 100644
--- a/cluster-autoscaler/go.sum
+++ b/cluster-autoscaler/go.sum
@@ -191,8 +191,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
-github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
 github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -244,8 +244,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
 github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
 github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
 github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
-github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
-github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
@@ -384,20 +384,20 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
 go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
-go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
-go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
-go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
-go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
-go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
-go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
-go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
-go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
-go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
-go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
-go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
-go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
-go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
-go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
+go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
+go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
+go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
+go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
+go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
+go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
+go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
+go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
+go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
+go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
+go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
+go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
+go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
+go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
 go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
@@ -439,8 +439,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
 golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
-golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
@@ -471,8 +471,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
 golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
@@ -486,8 +486,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -504,16 +504,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
 golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
-golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -522,8 +522,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
 golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
 golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
 golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -597,56 +597,56 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.33.0-beta.0 h1:/sAUrfXsjKPST2mZjpWhjRdzSR6SD5KlJpiOgCQQhAQ=
-k8s.io/api v0.33.0-beta.0/go.mod h1:TYyCgedkG4OVS4+4D2n25BdbMcexMSLx6Y7OkAzkxLQ=
-k8s.io/apiextensions-apiserver v0.33.0-beta.0 h1:3oqBvfd26IOekt96KEfE8A0wA/k1wDSBfTPirkRun1Q=
-k8s.io/apiextensions-apiserver v0.33.0-beta.0/go.mod h1:TKTeoFcmGvtiDNV+wj8wJfZhamZNOhvi9yOIE2d1iWs=
-k8s.io/apimachinery v0.33.0-beta.0 h1:vLDBChfQwyimk6AbuT7OZOIqxSg/44JlXuxqBk85j68=
-k8s.io/apimachinery v0.33.0-beta.0/go.mod h1:S2OIkExGqJOXYSYcAJwQ9zWcc6BkBUdTJUu4M7z0cvo=
-k8s.io/apiserver v0.33.0-beta.0 h1:EGjNQ4ocOGEq/KaYFuBS6MiUxZL9WmySu+QpMz+sBrk=
-k8s.io/apiserver v0.33.0-beta.0/go.mod h1:6gxw8BX1YZxi2NtOsFIoURP9bVRkP3sNqle0KVXz1cA=
-k8s.io/client-go v0.33.0-beta.0 h1:xRGKK5hU39pb6CFDCDOOlG+LEenB93/RK9hoP4eyAsU=
-k8s.io/client-go v0.33.0-beta.0/go.mod h1:RF6hSu+FncpgHQs1zA1UfGbMq8gxay89r37bCQe+Mj4=
-k8s.io/cloud-provider v0.33.0-beta.0 h1:SIeUrmbGz8dZZ0B0zIFPpEpV+5oGqVAFDhJ7YiFxZTU=
-k8s.io/cloud-provider v0.33.0-beta.0/go.mod h1:n5dF1uxbcax4W1WSMMUoP/S7Hjs6W1R5BdCjW0skZM8=
+k8s.io/api v0.34.0-alpha.0 h1:plVaaO0yCTOGvWjEiEvvecQOPpf/IYdLnVMsfGfGMQo=
+k8s.io/api v0.34.0-alpha.0/go.mod h1:brriDRpq4yMP4PN4P48NfXVLwWSwaIFSe0+pOajiwjQ=
+k8s.io/apiextensions-apiserver v0.34.0-alpha.0 h1:2rk3mhFhb3X7LqNThP2V82UAp2SvLv7UHAn4LcPDPD0=
+k8s.io/apiextensions-apiserver v0.34.0-alpha.0/go.mod h1:aRq9qS8T7BKsEZKuwNWitDeO+iKpe04vYK2xHU4NNQ0=
+k8s.io/apimachinery v0.34.0-alpha.0 h1:arymqm+uCpPEAVWBCvNF+yq01AJzsoUeUd2DYpoHuzc=
+k8s.io/apimachinery v0.34.0-alpha.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/apiserver v0.34.0-alpha.0 h1:/di3tEpcgVmRQ+8eBHogwdBR5vehH+hRnMH32ndEtOY=
+k8s.io/apiserver v0.34.0-alpha.0/go.mod h1:Cc7sDNMFnR5BghLjQpJmUsq4aUNp7iWCZ55RrKW04Y8=
+k8s.io/client-go v0.34.0-alpha.0 h1:+hfihZ7vffuzoS4BoYg2nWs+9Bc1hXpZ7+iev2ISCo0=
+k8s.io/client-go v0.34.0-alpha.0/go.mod h1:0sClwbFRpXuYhqaJEqLiy+e9dlC7FOhFHc9ZdvLDAbU=
+k8s.io/cloud-provider v0.34.0-alpha.0 h1:+iUGvbyWM4CHiKbmdkt/iyVFtDHjF1rOsyQft5UlFac=
+k8s.io/cloud-provider v0.34.0-alpha.0/go.mod h1:VJFyCyldpNmF2xzBJE+821KExRBcuNBRiiHD8WqOYLI=
 k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA=
 k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA=
 k8s.io/cloud-provider-gcp/providers v0.28.2 h1:I65pFTLNMQSj7YuW3Mg3pZIXmw0naCmF6TGAuz4/sZE=
 k8s.io/cloud-provider-gcp/providers v0.28.2/go.mod h1:P8dxRvvLtX7xUwVUzA/QOqv8taCzBaVsVMnjnpjmYXE=
-k8s.io/code-generator v0.33.0-beta.0 h1:QYiWYFUT9G7lnF1ucDYr/sZUaG/kptrooX2PJxEL+Go=
-k8s.io/code-generator v0.33.0-beta.0/go.mod h1:RBvFpvqtyQygCBjMayNyYqdzy+89LdzqAx0Th+dgmzQ=
-k8s.io/component-base v0.33.0-beta.0 h1:EEEzTLuzO1Li+YNHcDLQJgxX6AhfxAZqusYRGbIHfhg=
-k8s.io/component-base v0.33.0-beta.0/go.mod h1:J9MYu3hIiNSNAhjiax9ktqplTpXPLP2RLXhzfJj1ahY=
-k8s.io/component-helpers v0.33.0-beta.0 h1:K0ehdYOLgvS0o7pNJ1fxn1IvDT7Vnnc5IjQde9FioOk=
-k8s.io/component-helpers v0.33.0-beta.0/go.mod h1:YiRLGS1YwfPKgRty5KPVgJdNgROn8btJ2KmfiDiIbxw=
-k8s.io/controller-manager v0.33.0-beta.0 h1:iDBYPfvB1U5Z+E2baBwcU1ZBEPIMQ1Vna5ZODiuMtQU=
-k8s.io/controller-manager v0.33.0-beta.0/go.mod h1:9YW7KwP/UENsnNGaD9+AoW+A4qorwzOj2HjO3+C1L0s=
-k8s.io/cri-api v0.33.0-beta.0 h1:kUm8dbrFJhq433uCHbPUcPigcMhr+pJSQLssDQQ6qvU=
-k8s.io/cri-api v0.33.0-beta.0/go.mod h1:AWeYLzfWgDAsuMDuL4Cdv4QN6w8I38Skhl7VL5Kt88Y=
-k8s.io/cri-client v0.33.0-beta.0 h1:qXUQfgZjVOJJZi5da3sSLpSrzRhr0Fl3D9nUZNYjxRk=
-k8s.io/cri-client v0.33.0-beta.0/go.mod h1:9+kkuFbEClFEziyj3+Ia4Tt3cGQpdL1QMAn5Jxp7i6Q=
-k8s.io/csi-translation-lib v0.33.0-beta.0 h1:JMdubqoa2pBK7aT/7V7rbHEJZPjOWiy0rsl2EiT3Pi0=
-k8s.io/csi-translation-lib v0.33.0-beta.0/go.mod h1:WAdrPAfrlpX+JzvZp03eBu0k+vxc+KztWOmQ83bXdC0=
-k8s.io/dynamic-resource-allocation v0.33.0-beta.0 h1:RS+awbea4tBwdPSJCy+eOrBU7CU243uaQ78MYjIjPbU=
-k8s.io/dynamic-resource-allocation v0.33.0-beta.0/go.mod h1:TMF5neWMSImYXSKmtGcBxi8+3r0r6zTKmWDq09uu354=
+k8s.io/code-generator v0.34.0-alpha.0 h1:aM4APBz/eAR8Qw4RWiCpfocZ2O2UUTi0UqTfvalouHc=
+k8s.io/code-generator v0.34.0-alpha.0/go.mod h1:lwzb0eIHnmHnkhcHbxXf87XR512Xm7mF2RHtDKEW71c=
+k8s.io/component-base v0.34.0-alpha.0 h1:K/EyE1SX52rDrb+cpRn4MYh2pIJNzxMVqiiJbss5gFo=
+k8s.io/component-base v0.34.0-alpha.0/go.mod h1:AwuvLTXn5h+ijia1CUBszmsbDNkOkipcwqz0IjGwUuU=
+k8s.io/component-helpers v0.34.0-alpha.0 h1:/XBER9s8XN1dZdMjArYj+WvwKLy1U8pKBA5YLYyGC8Q=
+k8s.io/component-helpers v0.34.0-alpha.0/go.mod h1:7v3yLCKYXbXWZV2Zx19k3WzKgmmjJaHJKyUiUuWr3vg=
+k8s.io/controller-manager v0.34.0-alpha.0 h1:vdxEOA97ADUjIIXklwRAK/eWVJYLaqoCVDOXAEIo7YE=
+k8s.io/controller-manager v0.34.0-alpha.0/go.mod h1:N+4fMmhvvwStvBV0cRA4fDWO41dRnFx7WKtnvZz6PpM=
+k8s.io/cri-api v0.34.0-alpha.0 h1:s0rfuGqBJObds8cWrq5uNPWcUrC707NQv+JPb9x61Es=
+k8s.io/cri-api v0.34.0-alpha.0/go.mod h1:OLQvT45OpIA+tv91ZrpuFIGY+Y2Ho23poS7n115Aocs=
+k8s.io/cri-client v0.34.0-alpha.0 h1:rYxCLNZlecNrrUp++MGUSZa9vxacVcgYBHDlzEWjG9s=
+k8s.io/cri-client v0.34.0-alpha.0/go.mod h1:Kd0X9qvWgiH1t4R43Br69c/GnFinKKDeah8dKU1NJAk=
+k8s.io/csi-translation-lib v0.34.0-alpha.0 h1:IY8WTHF4tShtdq4Bhhz9MK55YETZediGLGjeqFNw4p4=
+k8s.io/csi-translation-lib v0.34.0-alpha.0/go.mod h1:K8LDx63jcQWhWpgsl9LXzz3epM08y+BqB2hIC7vjLrs=
+k8s.io/dynamic-resource-allocation v0.34.0-alpha.0 h1:xsDXrWpd++6RZM9Hv6dN7OWHWfbEXyI8kx9u0R/tzeI=
+k8s.io/dynamic-resource-allocation v0.34.0-alpha.0/go.mod h1:3XaIVAbt0QApbx+AZRSvnJ5pN7N/ipXMo1KY1eCU86M=
 k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
 k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
 k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
 k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.33.0-beta.0 h1:vuDqh9eIXJ8GdAekbWBTJ1zbGpZmn5455QE9W+Ynl1c=
-k8s.io/kms v0.33.0-beta.0/go.mod h1:Y4hMZ7VWEUZIR0X6fX4HcoOaIFL7k/1sJUJp1kVu8ig=
-k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 h1:t0huyHnz6HsokckRxAF1bY0cqPFwzINKCL7yltEjZQc=
-k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
-k8s.io/kube-scheduler v0.33.0-beta.0 h1:h79DT1YNh7K8LGhsmZpRCPIuC086APmoKnFHvCEXOUo=
-k8s.io/kube-scheduler v0.33.0-beta.0/go.mod h1:X1icvrNj/YJL5nG2p4nvgntJfz9++bRircE/dTHmofc=
-k8s.io/kubectl v0.33.0-beta.0 h1:iUj3GRRJoNYDs+33Ty6OO+dk/oMui8g3fgyenx2H0as=
-k8s.io/kubectl v0.33.0-beta.0/go.mod h1:nq1RKFPc1wBQKbqV4hKRfCCt+ND46a0Q1moiCm3+EE4=
-k8s.io/kubelet v0.33.0-beta.0 h1:Lvv7unOCBbq2Pat3nhos7G7IO8FAxnV9ipu72t8wDHU=
-k8s.io/kubelet v0.33.0-beta.0/go.mod h1:ZEs0MMzwF9iz3h+jYtRPRWzVWCDPyV2yBBzWgRB+gQw=
-k8s.io/kubernetes v1.33.0-beta.0 h1:h27iikxaUwfS4Hx+owBk1XMkBGKWdW3HKNguibK+kjM=
-k8s.io/kubernetes v1.33.0-beta.0/go.mod h1:/Ko9OUJBoF0BzbR/kPMr88qES8PeZ5Uw6H0yyKc/U+Y=
-k8s.io/mount-utils v0.33.0-beta.0 h1:WfUvSZ+UX1jC1LAo2S53fa8PsnD+awl5owAOatxiX8s=
-k8s.io/mount-utils v0.33.0-beta.0/go.mod h1:eYstRv7n35VQLbGB31hX5DepuowfDaOM/FulA1GTn9A=
+k8s.io/kms v0.34.0-alpha.0 h1:URGYBkKIJW9+RzS3ayTKbELow8pfDrCxwnLzW8Nfsqk=
+k8s.io/kms v0.34.0-alpha.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kube-scheduler v0.34.0-alpha.0 h1:KNKwFwyQWKj4CvIQiXyNhZThBYfQFOkn4XGZme1e4M8=
+k8s.io/kube-scheduler v0.34.0-alpha.0/go.mod h1:8QwR6p8Gn64s5q24o4aDylieHJVkSdi7o+lrtsdBM3U=
+k8s.io/kubectl v0.34.0-alpha.0 h1:j9e39A0PtVRTtkl5CvAmpowKD/ZT3dL6KgF/DNBHQ14=
+k8s.io/kubectl v0.34.0-alpha.0/go.mod h1:Gte7DASB26vNZKWwipr8DDCxscefflco+uaW16/TLZ8=
+k8s.io/kubelet v0.34.0-alpha.0 h1:N0VWnJB3XRLpdRy3FX4/CUD/nODpjyI/7ab0HvrK1Pc=
+k8s.io/kubelet v0.34.0-alpha.0/go.mod h1:51ZqBsNuEIg/L675e4ddKY+gXxibR1e6jBAvwEmQW4M=
+k8s.io/kubernetes v1.34.0-alpha.0 h1:CQgvuSXe2bBsVVnANySuiwHe/nW7orvxrLhMTUHWji0=
+k8s.io/kubernetes v1.34.0-alpha.0/go.mod h1:0n2XbxETvcqjlkOAxsWMdi82xaVVrbv9iDm1IB4EkW4=
+k8s.io/mount-utils v0.34.0-alpha.0 h1:iVBW1y5GZHFWyN811CBj1QOIUSE3SkvFkYWgnBRpLeo=
+k8s.io/mount-utils v0.34.0-alpha.0/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE=
 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go
index 2c1272525bcaf273631f5d6b37a476ef2e4ca3cd..7d090a7416ce304ac739de48be685c3f6d698393 100644
--- a/cluster-autoscaler/main.go
+++ b/cluster-autoscaler/main.go
@@ -118,12 +118,6 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot
 	drainabilityRules := rules.Default(deleteOptions)
 
 	var snapshotStore clustersnapshot.ClusterSnapshotStore = store.NewDeltaSnapshotStore(autoscalingOptions.ClusterSnapshotParallelism)
-	if autoscalingOptions.DynamicResourceAllocationEnabled {
-		// TODO(DRA): Remove this once DeltaSnapshotStore is integrated with DRA.
-		klog.Warningf("Using BasicSnapshotStore instead of DeltaSnapshotStore because DRA is enabled. Autoscaling performance/scalability might be decreased.")
-		snapshotStore = store.NewBasicSnapshotStore()
-	}
-
 	opts := core.AutoscalerOptions{
 		AutoscalingOptions:   autoscalingOptions,
 		FrameworkHandle:      fwHandle,
diff --git a/cluster-autoscaler/processors/customresources/custom_resources_processor.go b/cluster-autoscaler/processors/customresources/custom_resources_processor.go
index 052be2dd8407ad23a582d473c0e9014e4386eb87..d68444a3a561be49f351b1da6d605cf5a0cfa3b1 100644
--- a/cluster-autoscaler/processors/customresources/custom_resources_processor.go
+++ b/cluster-autoscaler/processors/customresources/custom_resources_processor.go
@@ -20,6 +20,7 @@ import (
 	apiv1 "k8s.io/api/core/v1"
 	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 )
 
@@ -35,14 +36,9 @@ type CustomResourceTarget struct {
 type CustomResourcesProcessor interface {
 	// FilterOutNodesWithUnreadyResources removes nodes that should have a custom resource, but don't have
 	// it in allocatable from ready nodes list and updates their status to unready on all nodes list.
-	FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node) ([]*apiv1.Node, []*apiv1.Node)
+	FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node)
 	// GetNodeResourceTargets returns mapping of resource names to their targets.
 	GetNodeResourceTargets(context *context.AutoscalingContext, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError)
 	// CleanUp cleans up processor's internal structures.
 	CleanUp()
 }
-
-// NewDefaultCustomResourcesProcessor returns a default instance of CustomResourcesProcessor.
-func NewDefaultCustomResourcesProcessor() CustomResourcesProcessor {
-	return &GpuCustomResourcesProcessor{}
-}
diff --git a/cluster-autoscaler/processors/customresources/default_custom_processor.go b/cluster-autoscaler/processors/customresources/default_custom_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a8c8464206b1aca2574e8807af23ee3a3dbe618
--- /dev/null
+++ b/cluster-autoscaler/processors/customresources/default_custom_processor.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package customresources
+
+import (
+	apiv1 "k8s.io/api/core/v1"
+	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+	"k8s.io/autoscaler/cluster-autoscaler/context"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
+)
+
+// DefaultCustomResourcesProcessor handles multiple custom resource processors and
+// executes them in order.
+type DefaultCustomResourcesProcessor struct {
+	customResourcesProcessors []CustomResourcesProcessor
+}
+
+// NewDefaultCustomResourcesProcessor returns an instance of DefaultCustomResourcesProcessor.
+func NewDefaultCustomResourcesProcessor(draEnabled bool) CustomResourcesProcessor {
+	customProcessors := []CustomResourcesProcessor{&GpuCustomResourcesProcessor{}}
+	if draEnabled {
+		customProcessors = append(customProcessors, &DraCustomResourcesProcessor{})
+	}
+	return &DefaultCustomResourcesProcessor{customProcessors}
+}
+
+// FilterOutNodesWithUnreadyResources calls the corresponding method for internal custom resources processors in order.
+func (p *DefaultCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
+	newAllNodes := allNodes
+	newReadyNodes := readyNodes
+	for _, processor := range p.customResourcesProcessors {
+		newAllNodes, newReadyNodes = processor.FilterOutNodesWithUnreadyResources(context, newAllNodes, newReadyNodes, draSnapshot)
+	}
+	return newAllNodes, newReadyNodes
+}
+
+// GetNodeResourceTargets calls the corresponding method for internal custom resources processors in order.
+func (p *DefaultCustomResourcesProcessor) GetNodeResourceTargets(context *context.AutoscalingContext, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
+	customResourcesTargets := []CustomResourceTarget{}
+	for _, processor := range p.customResourcesProcessors {
+		targets, err := processor.GetNodeResourceTargets(context, node, nodeGroup)
+		if err != nil {
+			return nil, err
+		}
+		customResourcesTargets = append(customResourcesTargets, targets...)
+	}
+	return customResourcesTargets, nil
+}
+
+// CleanUp cleans up all internal custom resources processors.
+func (p *DefaultCustomResourcesProcessor) CleanUp() {
+	for _, processor := range p.customResourcesProcessors {
+		processor.CleanUp()
+	}
+}
diff --git a/cluster-autoscaler/processors/customresources/default_custom_processor_test.go b/cluster-autoscaler/processors/customresources/default_custom_processor_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5946f009231aa38ee443e2e946a8f0cdae2a0db8
--- /dev/null
+++ b/cluster-autoscaler/processors/customresources/default_custom_processor_test.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package customresources
+
+import (
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
+
+	apiv1 "k8s.io/api/core/v1"
+	"k8s.io/autoscaler/cluster-autoscaler/context"
+	utils "k8s.io/autoscaler/cluster-autoscaler/utils/test"
+)
+
+func TestDefaultProcessorFilterOut(t *testing.T) {
+	processor := DefaultCustomResourcesProcessor{[]CustomResourcesProcessor{
+		&mockCustomResourcesProcessor{nodeMark: "p1"},
+		&mockCustomResourcesProcessor{nodeMark: "p2"},
+		&mockCustomResourcesProcessor{nodeMark: "p3"},
+	}}
+
+	testCases := map[string]struct {
+		allNodes              []*apiv1.Node
+		nodesInitialReadiness map[string]bool
+		expectedReadyNodes    map[string]bool
+	}{
+		"filtering one node by one processor": {
+			allNodes: []*apiv1.Node{
+				utils.BuildTestNode("p1_node_1", 500, 100),
+				utils.BuildTestNode("node_2", 500, 100),
+			},
+			nodesInitialReadiness: map[string]bool{
+				"p1_node_1": true,
+				"node_2":    true,
+			},
+			expectedReadyNodes: map[string]bool{
+				"node_2": true,
+			},
+		},
+		"filtering multiple nodes by one processor": {
+			allNodes: []*apiv1.Node{
+				utils.BuildTestNode("p1_node_1", 500, 100),
+				utils.BuildTestNode("p1_node_2", 500, 100),
+				utils.BuildTestNode("node_3", 500, 100),
+			},
+			nodesInitialReadiness: map[string]bool{
+				"p1_node_1": true,
+				"p1_node_2": true,
+				"node_3":    false,
+			},
+			expectedReadyNodes: map[string]bool{},
+		},
+		"filtering one node by multiple processors": {
+			allNodes: []*apiv1.Node{
+				utils.BuildTestNode("p1_p3_node_1", 500, 100),
+				utils.BuildTestNode("p1_node_2", 500, 100),
+				utils.BuildTestNode("node_3", 500, 100),
+			},
+			nodesInitialReadiness: map[string]bool{
+				"p1_node_1": true,
+				"p1_node_2": false,
+				"node_3":    false,
+			},
+			expectedReadyNodes: map[string]bool{},
+		},
+		"filtering multiple nodes by multiple processor": {
+			allNodes: []*apiv1.Node{
+				utils.BuildTestNode("p1_node_1", 500, 100),
+				utils.BuildTestNode("p1_node_2", 500, 100),
+				utils.BuildTestNode("node_3", 500, 100),
+				utils.BuildTestNode("node_4", 500, 100),
+				utils.BuildTestNode("p2_node_5", 500, 100),
+				utils.BuildTestNode("p3_node_6", 500, 100),
+			},
+			nodesInitialReadiness: map[string]bool{
+				"p1_node_1": false,
+				"p1_node_2": true,
+				"node_3":    false,
+				"node_4":    true,
+				"p2_node_5": true,
+				"p3_node_6": true,
+			},
+			expectedReadyNodes: map[string]bool{
+				"node_4": true,
+			},
+		},
+	}
+	for tcName, tc := range testCases {
+		t.Run(tcName, func(t *testing.T) {
+			readyNodes := []*apiv1.Node{}
+			for _, node := range tc.allNodes {
+				if tc.nodesInitialReadiness[node.Name] {
+					readyNodes = append(readyNodes, node)
+				}
+			}
+			resultedAllNodes, resultedReadyNodes := processor.FilterOutNodesWithUnreadyResources(nil, tc.allNodes, readyNodes, nil)
+			assert.ElementsMatch(t, tc.allNodes, resultedAllNodes)
+			assert.True(t, len(resultedReadyNodes) == len(tc.expectedReadyNodes))
+			for _, node := range resultedReadyNodes {
+				assert.True(t, tc.expectedReadyNodes[node.Name])
+			}
+
+		})
+	}
+
+}
+
+func TestDefaultProcessorGetNodeResourceTargets(t *testing.T) {
+	processor := DefaultCustomResourcesProcessor{[]CustomResourcesProcessor{
+		&mockCustomResourcesProcessor{nodeMark: "p1", customResourceTargetsToAdd: []string{"p1_R1", "p1_R2"}, customResourceTargetsQuantity: 1},
+		&mockCustomResourcesProcessor{nodeMark: "p2", customResourceTargetsToAdd: []string{"p2_R1"}, customResourceTargetsQuantity: 2},
+		&mockCustomResourcesProcessor{nodeMark: "p3", customResourceTargetsToAdd: []string{"p3_R1"}, customResourceTargetsQuantity: 3},
+	}}
+
+	testCases := map[string]struct {
+		node              *apiv1.Node
+		expectedResources []CustomResourceTarget
+	}{
+		"single processor": {
+			node: utils.BuildTestNode("p1", 500, 100),
+			expectedResources: []CustomResourceTarget{
+				{ResourceType: "p1_R1", ResourceCount: 1},
+				{ResourceType: "p1_R2", ResourceCount: 1},
+			},
+		},
+		"many processors": {
+			node: utils.BuildTestNode("p1_p3", 500, 100),
+			expectedResources: []CustomResourceTarget{
+				{ResourceType: "p1_R1", ResourceCount: 1},
+				{ResourceType: "p1_R2", ResourceCount: 1},
+				{ResourceType: "p3_R1", ResourceCount: 3},
+			},
+		},
+		"all processors": {
+			node: utils.BuildTestNode("p1_p2_p3", 500, 100),
+			expectedResources: []CustomResourceTarget{
+				{ResourceType: "p1_R1", ResourceCount: 1},
+				{ResourceType: "p1_R2", ResourceCount: 1},
+				{ResourceType: "p2_R1", ResourceCount: 2},
+				{ResourceType: "p3_R1", ResourceCount: 3},
+			},
+		},
+	}
+	for tcName, tc := range testCases {
+		t.Run(tcName, func(t *testing.T) {
+			customResourceTarget, _ := processor.GetNodeResourceTargets(nil, tc.node, nil)
+			assert.ElementsMatch(t, customResourceTarget, tc.expectedResources)
+		})
+	}
+}
+
+type mockCustomResourcesProcessor struct {
+	nodeMark                      string
+	customResourceTargetsToAdd    []string
+	customResourceTargetsQuantity int64
+}
+
+func (m *mockCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(_ *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, _ *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
+	filteredReadyNodes := []*apiv1.Node{}
+	for _, node := range readyNodes {
+		if !strings.Contains(node.Name, m.nodeMark) {
+			filteredReadyNodes = append(filteredReadyNodes, node)
+		}
+	}
+	return allNodes, filteredReadyNodes
+}
+
+func (m *mockCustomResourcesProcessor) GetNodeResourceTargets(_ *context.AutoscalingContext, node *apiv1.Node, _ cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
+	result := []CustomResourceTarget{}
+	if strings.Contains(node.Name, m.nodeMark) {
+		for _, rt := range m.customResourceTargetsToAdd {
+			result = append(result, CustomResourceTarget{ResourceType: rt, ResourceCount: m.customResourceTargetsQuantity})
+		}
+	}
+	return result, nil
+}
+
+func (m *mockCustomResourcesProcessor) CleanUp() {
+}
diff --git a/cluster-autoscaler/processors/customresources/dra_processor.go b/cluster-autoscaler/processors/customresources/dra_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..06d6e2759bce4ba87ce2280b4529106b3fea0e0e
--- /dev/null
+++ b/cluster-autoscaler/processors/customresources/dra_processor.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package customresources
+
+import (
+	apiv1 "k8s.io/api/core/v1"
+	"k8s.io/api/resource/v1beta1"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+	"k8s.io/autoscaler/cluster-autoscaler/context"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
+	"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
+	"k8s.io/klog/v2"
+)
+
+// DraCustomResourcesProcessor handles DRA custom resource. It assumes,
+// that the DRA resources may not become allocatable immediately after the node creation.
+type DraCustomResourcesProcessor struct {
+}
+
+// FilterOutNodesWithUnreadyResources removes nodes that should have DRA resource, but don't have
+// it in allocatable from ready nodes list and updates their status to unready on all nodes list.
+func (p *DraCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, draSnapshot *snapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
+	newAllNodes := make([]*apiv1.Node, 0)
+	newReadyNodes := make([]*apiv1.Node, 0)
+	nodesWithUnreadyDraResources := make(map[string]*apiv1.Node)
+	if draSnapshot == nil {
+		klog.Warningf("Cannot filter out nodes with unready DRA resources. The DRA snapshot is nil. Processing will be skipped.")
+		return allNodes, readyNodes
+	}
+
+	for _, node := range readyNodes {
+		ng, err := context.CloudProvider.NodeGroupForNode(node)
+		if err != nil {
+			newReadyNodes = append(newReadyNodes, node)
+			klog.Warningf("Failed to get node group for node %s, Skipping DRA readiness check and keeping node in ready list. Error: %v", node.Name, err)
+			continue
+		}
+		if ng == nil {
+			newReadyNodes = append(newReadyNodes, node)
+			continue
+		}
+
+		nodeInfo, err := ng.TemplateNodeInfo()
+		if err != nil {
+			newReadyNodes = append(newReadyNodes, node)
+			klog.Warningf("Failed to get template node info for node group %s with error: %v", ng.Id(), err)
+			continue
+		}
+
+		nodeResourcesSlices, _ := draSnapshot.NodeResourceSlices(node.Name)
+		if isEqualResourceSlices(nodeResourcesSlices, nodeInfo.LocalResourceSlices) {
+			newReadyNodes = append(newReadyNodes, node)
+		} else {
+			nodesWithUnreadyDraResources[node.Name] = kubernetes.GetUnreadyNodeCopy(node, kubernetes.ResourceUnready)
+		}
+	}
+
+	// Override any node with unready DRA resources with its "unready" copy
+	for _, node := range allNodes {
+		if newNode, found := nodesWithUnreadyDraResources[node.Name]; found {
+			newAllNodes = append(newAllNodes, newNode)
+		} else {
+			newAllNodes = append(newAllNodes, node)
+		}
+	}
+	return newAllNodes, newReadyNodes
+}
+
+type resourceSliceSpecs struct {
+	driver string
+	pool   string
+}
+
+func isEqualResourceSlices(nodeResourcesSlices []*v1beta1.ResourceSlice, templateResourcesSlices []*v1beta1.ResourceSlice) bool {
+	tempSlicesByPools := getDevicesBySpecs(templateResourcesSlices)
+	nodeSlicesByPools := getDevicesBySpecs(nodeResourcesSlices)
+
+	for templSpecs, tempDevicesSet := range tempSlicesByPools {
+		matched := false
+		for nodeSpecs, nodeDevicesSet := range nodeSlicesByPools {
+			if templSpecs.driver == nodeSpecs.driver && nodeDevicesSet.Equal(tempDevicesSet) {
+				delete(nodeSlicesByPools, nodeSpecs)
+				matched = true
+				break
+			}
+		}
+		if !matched {
+			return false
+		}
+	}
+
+	return true
+}
+
+func getDevicesBySpecs(resourcesSlices []*v1beta1.ResourceSlice) map[resourceSliceSpecs]sets.Set[string] {
+	slicesGroupedByPoolAndDriver := make(map[resourceSliceSpecs]sets.Set[string])
+	for _, rs := range resourcesSlices {
+		rsSpecs := resourceSliceSpecs{
+			pool:   rs.Spec.Pool.Name,
+			driver: rs.Spec.Driver,
+		}
+		slicesGroupedByPoolAndDriver[rsSpecs] = getResourceSliceDevicesSet(rs)
+	}
+	return slicesGroupedByPoolAndDriver
+}
+
+func getResourceSliceDevicesSet(resourcesSlice *v1beta1.ResourceSlice) sets.Set[string] {
+	devices := sets.New[string]()
+	for _, device := range resourcesSlice.Spec.Devices {
+		devices.Insert(device.Name)
+	}
+	return devices
+}
+
+// GetNodeResourceTargets returns the resource targets for DRA resource slices, not implemented.
+func (p *DraCustomResourcesProcessor) GetNodeResourceTargets(_ *context.AutoscalingContext, _ *apiv1.Node, _ cloudprovider.NodeGroup) ([]CustomResourceTarget, errors.AutoscalerError) {
+	// TODO(DRA): Figure out resource limits for DRA here.
+	return []CustomResourceTarget{}, nil
+}
+
+// CleanUp cleans up processor's internal structures.
+func (p *DraCustomResourcesProcessor) CleanUp() {
+}
diff --git a/cluster-autoscaler/processors/customresources/dra_processor_test.go b/cluster-autoscaler/processors/customresources/dra_processor_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..64b86c227fada2e50816a63ebff93cfa4d55ccbe
--- /dev/null
+++ b/cluster-autoscaler/processors/customresources/dra_processor_test.go
@@ -0,0 +1,399 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package customresources
+
+import (
+	"fmt"
+	"testing"
+	"time"
+
+	resourceapi "k8s.io/api/resource/v1beta1"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
+
+	"github.com/stretchr/testify/assert"
+	apiv1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
+	"k8s.io/autoscaler/cluster-autoscaler/context"
+	utils "k8s.io/autoscaler/cluster-autoscaler/utils/test"
+)
+
+func TestFilterOutNodesWithUnreadyDRAResources(t *testing.T) {
+	testCases := map[string]struct {
+		nodeGroupsAllNodes        map[string][]*apiv1.Node
+		nodeGroupsTemplatesSlices map[string][]*resourceapi.ResourceSlice
+		nodesSlices               map[string][]*resourceapi.ResourceSlice
+		expectedNodesReadiness    map[string]bool
+	}{
+		"1 DRA node group all totally ready": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
+				"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 1}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": true,
+				"node_2_Dra_Ready": true,
+			},
+		},
+		"1 DRA node group, one initially unready": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", false),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
+				"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 1}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": true,
+				"node_2_Dra_Ready": false,
+			},
+		},
+		"1 DRA node group, one initially ready with unready reasource": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
+				"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 0}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": true,
+				"node_2_Dra_Ready": false,
+			},
+		},
+		"1 DRA node group, one initially ready with more reasources than expected": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": createNodeResourceSlices("node_1_Dra_Ready", []int{1, 1}),
+				"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 3}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": true,
+				"node_2_Dra_Ready": false,
+			},
+		},
+		"1 DRA node group, one initially ready with no slices": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": {},
+				"node_2_Dra_Ready": createNodeResourceSlices("node_2_Dra_Ready", []int{1, 3}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": false,
+				"node_2_Dra_Ready": false,
+			},
+		},
+		"1 DRA node group, single driver multiple pools, only one published": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": false,
+			},
+		},
+		"1 DRA node group, single driver multiple pools, more pools published including template pools": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_2_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2, 2, 2, 2}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_2_Dra_Ready": true,
+			},
+		},
+		"1 DRA node group, single driver multiple pools, more pools published not including template pools": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": buildNodeResourceSlices("node_1_Dra_Ready", "driver", []int{2, 2, 1, 2}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": false,
+			},
+		},
+		"2 node groups, one DRA with 1 reasource unready node": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+					buildTestNode("node_3_Dra_Unready", true),
+				},
+				"ng2": {
+					buildTestNode("node_4_NonDra_Ready", true),
+					buildTestNode("node_5_NonDra_Unready", false),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready":   createNodeResourceSlices("node_1_Dra_Ready", []int{2, 2}),
+				"node_2_Dra_Ready":   createNodeResourceSlices("node_2_Dra_Ready", []int{2, 2}),
+				"node_3_Dra_Unready": createNodeResourceSlices("node_3_Dra_Unready", []int{2, 1}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready":      true,
+				"node_2_Dra_Ready":      true,
+				"node_3_Dra_Unready":    false,
+				"node_4_NonDra_Ready":   true,
+				"node_5_NonDra_Unready": false,
+			},
+		},
+		"2 DRA node groups, each with 1 reasource unready node": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+					buildTestNode("node_3_Dra_Unready", true),
+				},
+				"ng2": {
+					buildTestNode("node_4_Dra_Ready", true),
+					buildTestNode("node_5_Dra_Unready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
+				"ng2": createNodeResourceSlices("ng2_template", []int{3, 3}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready":   createNodeResourceSlices("node_1_Dra_Ready", []int{2, 2}),
+				"node_2_Dra_Ready":   createNodeResourceSlices("node_2_Dra_Ready", []int{2, 2}),
+				"node_3_Dra_Unready": createNodeResourceSlices("node_3_Dra_Unready", []int{2, 1}),
+				"node_4_Dra_Ready":   createNodeResourceSlices("node_4_Dra_Ready", []int{3, 3}),
+				"node_5_Dra_Unready": createNodeResourceSlices("node_5_Dra_Unready", []int{2, 1}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready":   true,
+				"node_2_Dra_Ready":   true,
+				"node_3_Dra_Unready": false,
+				"node_4_Dra_Ready":   true,
+				"node_5_Dra_Unready": false,
+			},
+		},
+		"2 DRA node group, single driver multiple pools, more pools published including template pools": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1_Dra_Ready", true),
+					buildTestNode("node_2_Dra_Ready", true),
+				},
+				"ng2": {
+					buildTestNode("node_3_Dra_Ready", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": buildNodeResourceSlices("ng1_template", "driver", []int{2, 2, 2}),
+				"ng2": buildNodeResourceSlices("ng2_template", "driver", []int{1, 1}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1_Dra_Ready": buildNodeResourceSlices("node_1_Dra_Ready", "driver", []int{2, 2, 2, 2}),
+				"node_2_Dra_Ready": buildNodeResourceSlices("node_2_Dra_Ready", "driver", []int{2, 2, 2}),
+				"node_3_Dra_Ready": buildNodeResourceSlices("node_3_Dra_Ready", "driver", []int{1, 1, 1}),
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1_Dra_Ready": true,
+				"node_2_Dra_Ready": true,
+				"node_3_Dra_Ready": true,
+			},
+		},
+		"All together": {
+			nodeGroupsAllNodes: map[string][]*apiv1.Node{
+				"ng1": {
+					buildTestNode("node_1", true),
+					buildTestNode("node_2", true),
+					buildTestNode("node_3", true),
+				},
+				"ng2": {
+					buildTestNode("node_4", false),
+					buildTestNode("node_5", true),
+				},
+				"ng3": {
+					buildTestNode("node_6", false),
+					buildTestNode("node_7", true),
+				},
+			},
+			nodeGroupsTemplatesSlices: map[string][]*resourceapi.ResourceSlice{
+				"ng1": createNodeResourceSlices("ng1_template", []int{2, 2}),
+				"ng2": createNodeResourceSlices("ng2_template", []int{3, 3}),
+			},
+			nodesSlices: map[string][]*resourceapi.ResourceSlice{
+				"node_1": createNodeResourceSlices("node_1", []int{2, 2, 2}),
+				"node_2": createNodeResourceSlices("node_2", []int{1}),
+				"node_3": createNodeResourceSlices("node_3", []int{1, 2}),
+				"node_4": createNodeResourceSlices("node_4", []int{3, 3}),
+				"node_5": {},
+			},
+			expectedNodesReadiness: map[string]bool{
+				"node_1": true,
+				"node_2": false,
+				"node_3": false,
+				"node_4": false,
+				"node_5": false,
+				"node_6": false,
+				"node_7": true,
+			},
+		},
+	}
+
+	for tcName, tc := range testCases {
+		t.Run(tcName, func(t *testing.T) {
+			provider := testprovider.NewTestCloudProviderBuilder().Build()
+			machineTemplates := map[string]*framework.NodeInfo{}
+			initialAllNodes := []*apiv1.Node{}
+			initialReadyNodes := []*apiv1.Node{}
+			for ng, nodes := range tc.nodeGroupsAllNodes {
+				machineName := fmt.Sprintf("%s_machine_template", ng)
+				if rs, found := tc.nodeGroupsTemplatesSlices[ng]; found {
+					machineTemplates[machineName] = framework.NewNodeInfo(buildTestNode(fmt.Sprintf("%s_template", ng), true), rs)
+				} else {
+					machineTemplates[machineName] = framework.NewTestNodeInfo(buildTestNode(fmt.Sprintf("%s_template", ng), true))
+				}
+				provider.AddAutoprovisionedNodeGroup(ng, 0, 20, len(nodes), machineName)
+				for _, node := range nodes {
+					initialAllNodes = append(initialAllNodes, node)
+					if getNodeReadiness(node) {
+						initialReadyNodes = append(initialReadyNodes, node)
+					}
+					provider.AddNode(ng, node)
+				}
+			}
+			provider.SetMachineTemplates(machineTemplates)
+			draSnapshot := drasnapshot.NewSnapshot(nil, tc.nodesSlices, nil, nil)
+			clusterSnapshotStore := store.NewBasicSnapshotStore()
+			clusterSnapshotStore.SetClusterState([]*apiv1.Node{}, []*apiv1.Pod{}, draSnapshot)
+			clusterSnapshot, _, _ := testsnapshot.NewCustomTestSnapshotAndHandle(clusterSnapshotStore)
+
+			ctx := &context.AutoscalingContext{CloudProvider: provider, ClusterSnapshot: clusterSnapshot}
+			processor := DraCustomResourcesProcessor{}
+			newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes, draSnapshot)
+
+			readyNodes := make(map[string]bool)
+			for _, node := range newReadyNodes {
+				readyNodes[node.Name] = true
+			}
+
+			assert.True(t, len(newAllNodes) == len(initialAllNodes), "Total number of nodes should not change")
+			for _, node := range newAllNodes {
+				gotReadiness := getNodeReadiness(node)
+				assert.Equal(t, tc.expectedNodesReadiness[node.Name], gotReadiness)
+				assert.Equal(t, gotReadiness, readyNodes[node.Name])
+			}
+
+		})
+	}
+
+}
+
+func createNodeResourceSlices(nodeName string, numberOfDevicesInSlices []int) []*resourceapi.ResourceSlice {
+	return buildNodeResourceSlices(nodeName, "", numberOfDevicesInSlices)
+}
+
+func buildNodeResourceSlices(nodeName, driverName string, numberOfDevicesInSlices []int) []*resourceapi.ResourceSlice {
+	numberOfSlices := len(numberOfDevicesInSlices)
+	resourceSlices := []*resourceapi.ResourceSlice{}
+	for sliceIndex := range numberOfSlices {
+		devices := []resourceapi.Device{}
+		for deviceIndex := range numberOfDevicesInSlices[sliceIndex] {
+			devices = append(devices, resourceapi.Device{Name: fmt.Sprintf("%d_%d", sliceIndex, deviceIndex)})
+		}
+		if driverName == "" {
+			driverName = fmt.Sprintf("driver_%d", sliceIndex)
+		}
+		spec := resourceapi.ResourceSliceSpec{
+			NodeName: nodeName,
+			Driver:   driverName,
+			Pool:     resourceapi.ResourcePool{Name: fmt.Sprintf("%s_pool_%d", nodeName, sliceIndex)},
+			Devices:  devices,
+		}
+		resourceSlices = append(resourceSlices, &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: nodeName}, Spec: spec})
+	}
+	return resourceSlices
+}
+
+func buildTestNode(nodeName string, ready bool) *apiv1.Node {
+	node := utils.BuildTestNode(nodeName, 500, 100)
+	utils.SetNodeReadyState(node, ready, time.Now().Add(-5*time.Minute))
+	return node
+}
+
+func getNodeReadiness(node *apiv1.Node) bool {
+	for i := range node.Status.Conditions {
+		if node.Status.Conditions[i].Type == apiv1.NodeReady {
+			return node.Status.Conditions[i].Status == apiv1.ConditionTrue
+		}
+	}
+	return false
+}
diff --git a/cluster-autoscaler/processors/customresources/gpu_processor.go b/cluster-autoscaler/processors/customresources/gpu_processor.go
index 1ca572f2dbdc3430e0fb145e07e8a40426156538..cb449cd4fd5edc0accc5b4312a3718e4dbd82fae 100644
--- a/cluster-autoscaler/processors/customresources/gpu_processor.go
+++ b/cluster-autoscaler/processors/customresources/gpu_processor.go
@@ -20,6 +20,7 @@ import (
 	apiv1 "k8s.io/api/core/v1"
 	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -36,7 +37,7 @@ type GpuCustomResourcesProcessor struct {
 // it in allocatable from ready nodes list and updates their status to unready on all nodes list.
 // This is a hack/workaround for nodes with GPU coming up without installed drivers, resulting
 // in GPU missing from their allocatable and capacity.
-func (p *GpuCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node) ([]*apiv1.Node, []*apiv1.Node) {
+func (p *GpuCustomResourcesProcessor) FilterOutNodesWithUnreadyResources(context *context.AutoscalingContext, allNodes, readyNodes []*apiv1.Node, _ *drasnapshot.Snapshot) ([]*apiv1.Node, []*apiv1.Node) {
 	newAllNodes := make([]*apiv1.Node, 0)
 	newReadyNodes := make([]*apiv1.Node, 0)
 	nodesWithUnreadyGpu := make(map[string]*apiv1.Node)
diff --git a/cluster-autoscaler/processors/customresources/gpu_processor_test.go b/cluster-autoscaler/processors/customresources/gpu_processor_test.go
index b76a38034a7a731775d23c7e1d05558df7708acf..0ae6853f4596f6f88b6358583ba7d947825a1964 100644
--- a/cluster-autoscaler/processors/customresources/gpu_processor_test.go
+++ b/cluster-autoscaler/processors/customresources/gpu_processor_test.go
@@ -170,10 +170,10 @@ func TestFilterOutNodesWithUnreadyResources(t *testing.T) {
 		nodeNoGpuUnready,
 	}
 
-	processor := NewDefaultCustomResourcesProcessor()
+	processor := GpuCustomResourcesProcessor{}
 	provider := testprovider.NewTestCloudProviderBuilder().Build()
 	ctx := &context.AutoscalingContext{CloudProvider: provider}
-	newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes)
+	newAllNodes, newReadyNodes := processor.FilterOutNodesWithUnreadyResources(ctx, initialAllNodes, initialReadyNodes, nil)
 
 	foundInReady := make(map[string]bool)
 	for _, node := range newReadyNodes {
diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
index c28933637663f63c610a965fa0a2f382664473e8..9a28ea1ae3093c6d68ecaa1d4ca9bbfea13a4adc 100644
--- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
+++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
@@ -28,7 +28,6 @@ import (
 	testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
@@ -86,7 +85,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
 
 	nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1, ready7, readyToBeDeleted6}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
@@ -173,7 +172,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
 
 	nodes := []*apiv1.Node{unready4, unready3, ready2, ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	// Fill cache
@@ -264,7 +263,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
 
 	nodes := []*apiv1.Node{ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
diff --git a/cluster-autoscaler/processors/processors.go b/cluster-autoscaler/processors/processors.go
index b391fed789b2375c14fc57066e2f4fc1321a53bf..3285590d323dee6dc5b138512979812b55e2f661 100644
--- a/cluster-autoscaler/processors/processors.go
+++ b/cluster-autoscaler/processors/processors.go
@@ -97,7 +97,7 @@ func DefaultProcessors(options config.AutoscalingOptions) *AutoscalingProcessors
 		NodeGroupManager:            nodegroups.NewDefaultNodeGroupManager(),
 		AsyncNodeGroupStateChecker:  asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker(),
 		NodeGroupConfigProcessor:    nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults),
-		CustomResourcesProcessor:    customresources.NewDefaultCustomResourcesProcessor(),
+		CustomResourcesProcessor:    customresources.NewDefaultCustomResourcesProcessor(options.DynamicResourceAllocationEnabled),
 		ActionableClusterProcessor:  actionablecluster.NewDefaultActionableClusterProcessor(),
 		TemplateNodeInfoProvider:    nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
 		ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),
diff --git a/cluster-autoscaler/processors/test/common.go b/cluster-autoscaler/processors/test/common.go
index 065b06d92f99cf753ea12fa6d7813249c97c8b2b..5afa5b2c8d770bfb75967866df0487f79380ccb2 100644
--- a/cluster-autoscaler/processors/test/common.go
+++ b/cluster-autoscaler/processors/test/common.go
@@ -52,7 +52,7 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal
 		NodeGroupManager:            nodegroups.NewDefaultNodeGroupManager(),
 		TemplateNodeInfoProvider:    nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
 		NodeGroupConfigProcessor:    nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.NodeGroupDefaults),
-		CustomResourcesProcessor:    customresources.NewDefaultCustomResourcesProcessor(),
+		CustomResourcesProcessor:    customresources.NewDefaultCustomResourcesProcessor(true),
 		ActionableClusterProcessor:  actionablecluster.NewDefaultActionableClusterProcessor(),
 		ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),
 		ScaleStateNotifier:          nodegroupchange.NewNodeGroupChangeObserversList(),
diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
index fb6b98efec260397a40d0f4a8ccc256e151b9423..d184637a9026a2da6a9f7cd8d509f68f3697ebe2 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
@@ -77,7 +77,7 @@ type ClusterSnapshotStore interface {
 
 	// SetClusterState resets the snapshot to an unforked state and replaces the contents of the snapshot
 	// with the provided data. scheduledPods are correlated to their Nodes based on spec.NodeName.
-	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error
+	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error
 
 	// ForceAddPod adds the given Pod to the Node with the given nodeName inside the snapshot without checking scheduler predicates.
 	ForceAddPod(pod *apiv1.Pod, nodeName string) error
@@ -93,7 +93,7 @@ type ClusterSnapshotStore interface {
 	RemoveSchedulerNodeInfo(nodeName string) error
 
 	// DraSnapshot returns an interface that allows accessing and modifying the DRA objects in the snapshot.
-	DraSnapshot() drasnapshot.Snapshot
+	DraSnapshot() *drasnapshot.Snapshot
 
 	// Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert().
 	// Use WithForkedSnapshot() helper function instead if possible.
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
index 8c4f359327563a84094116f4c7d1ab5978516cc4..d347e4e7d2107a4f2aa94c7f7bf4dd21d0d989ae 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
@@ -56,6 +56,7 @@ func (s *PredicateSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, e
 	if err != nil {
 		return nil, err
 	}
+
 	if s.draEnabled {
 		return s.ClusterSnapshotStore.DraSnapshot().WrapSchedulerNodeInfo(schedNodeInfo)
 	}
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
index 1e4c5fbf885c9c695bd81c57c50b94ef9cdb2938..b384bcecc02f7637b7a9647aa3ccf05538ab8c52 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -40,7 +39,7 @@ func BenchmarkAddNodeInfo(b *testing.B) {
 			b.Run(fmt.Sprintf("%s: AddNodeInfo() %d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
-					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, nil))
 					b.StartTimer()
 					for _, node := range nodes {
 						err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
@@ -62,7 +61,7 @@ func BenchmarkListNodeInfos(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			if err != nil {
 				assert.NoError(b, err)
 			}
@@ -92,14 +91,14 @@ func BenchmarkAddPods(b *testing.B) {
 			clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(b, err)
 			b.ResetTimer()
 			b.Run(fmt.Sprintf("%s: ForceAddPod() 30*%d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
 
-					err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+					err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 					if err != nil {
 						assert.NoError(b, err)
 					}
@@ -128,7 +127,7 @@ func BenchmarkForkAddRevert(b *testing.B) {
 				clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 				clusterSnapshot, err := snapshotFactory()
 				assert.NoError(b, err)
-				err = clusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+				err = clusterSnapshot.SetClusterState(nodes, pods, nil)
 				assert.NoError(b, err)
 				tmpNode1 := BuildTestNode("tmp-1", 2000, 2000000)
 				tmpNode2 := BuildTestNode("tmp-2", 2000, 2000000)
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..581e7ed5d10124df4f80f27a7ed603426b143ba4
--- /dev/null
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go
@@ -0,0 +1,397 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package predicate
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/google/uuid"
+	apiv1 "k8s.io/api/core/v1"
+	resourceapi "k8s.io/api/resource/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apiserver/pkg/util/feature"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
+	featuretesting "k8s.io/component-base/featuregate/testing"
+	"k8s.io/kubernetes/pkg/features"
+
+	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
+)
+
+func createTestResourceSlice(nodeName string, devicesPerSlice int, slicesPerNode int, driver string, device resourceapi.BasicDevice) *resourceapi.ResourceSlice {
+	sliceId := uuid.New().String()
+	name := fmt.Sprintf("rs-%s", sliceId)
+	uid := types.UID(fmt.Sprintf("rs-%s-uid", sliceId))
+	devices := make([]resourceapi.Device, devicesPerSlice)
+	for deviceIndex := 0; deviceIndex < devicesPerSlice; deviceIndex++ {
+		deviceName := fmt.Sprintf("rs-dev-%s-%d", sliceId, deviceIndex)
+		deviceCopy := device
+		devices[deviceIndex] = resourceapi.Device{Name: deviceName, Basic: &deviceCopy}
+	}
+
+	return &resourceapi.ResourceSlice{
+		ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid},
+		Spec: resourceapi.ResourceSliceSpec{
+			NodeName: nodeName,
+			Driver:   driver,
+			Pool: resourceapi.ResourcePool{
+				Name:               nodeName,
+				ResourceSliceCount: int64(slicesPerNode),
+			},
+			Devices: devices,
+		},
+	}
+}
+
+func createTestResourceClaim(requestsPerClaim int, devicesPerRequest int, driver string, deviceClass string) *resourceapi.ResourceClaim {
+	claimId := uuid.New().String()
+	name := fmt.Sprintf("claim-%s", claimId)
+	uid := types.UID(fmt.Sprintf("claim-%s-uid", claimId))
+	expression := fmt.Sprintf(`device.driver == "%s"`, driver)
+
+	requests := make([]resourceapi.DeviceRequest, requestsPerClaim)
+	for requestIndex := 0; requestIndex < requestsPerClaim; requestIndex++ {
+		requests[requestIndex] = resourceapi.DeviceRequest{
+			Name:            fmt.Sprintf("deviceRequest-%d", requestIndex),
+			DeviceClassName: deviceClass,
+			Selectors:       []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: expression}}},
+			AllocationMode:  resourceapi.DeviceAllocationModeExactCount,
+			Count:           int64(devicesPerRequest),
+		}
+	}
+
+	return &resourceapi.ResourceClaim{
+		ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid, Namespace: "default"},
+		Spec: resourceapi.ResourceClaimSpec{
+			Devices: resourceapi.DeviceClaim{Requests: requests},
+		},
+	}
+}
+
+// allocateResourceSlicesForClaim attempts to allocate devices from the provided ResourceSlices
+// to satisfy the requests in the given ResourceClaim. It iterates through the claim's device
+// requests and, for each request, tries to find enough available devices in the provided slices.
+//
+// The function returns a new ResourceClaim object with the allocation result (if successful)
+// and a boolean indicating whether all requests in the claim were satisfied.
+//
+// If not all requests can be satisfied with the given slices, the returned ResourceClaim will
+// have a partial or empty allocation, and the boolean will be false.
+// The original ResourceClaim object is not modified.
+func allocateResourceSlicesForClaim(claim *resourceapi.ResourceClaim, nodeName string, slices ...*resourceapi.ResourceSlice) (*resourceapi.ResourceClaim, bool) {
+	allocatedDevices := make([]resourceapi.DeviceRequestAllocationResult, 0, len(claim.Spec.Devices.Requests))
+	sliceIndex, deviceIndex := 0, 0
+	requestSatisfied := true
+
+allocationLoop:
+	for _, request := range claim.Spec.Devices.Requests {
+		for devicesRequired := request.Count; devicesRequired > 0; devicesRequired-- {
+			// Skipping resource slices until we find one with at least a single device available
+			for sliceIndex < len(slices) && deviceIndex >= len(slices[sliceIndex].Spec.Devices) {
+				sliceIndex++
+				deviceIndex = 0
+			}
+
+			// In case in the previous look we weren't able to find a resource slice containing
+			// at least a single device and there's still device request pending from resource
+			// claim - terminate allocation loop and indicate the request wasn't fully satisfied
+			if sliceIndex >= len(slices) {
+				requestSatisfied = false
+				break allocationLoop
+			}
+
+			slice := slices[sliceIndex]
+			device := slice.Spec.Devices[deviceIndex]
+			deviceAllocation := resourceapi.DeviceRequestAllocationResult{
+				Request: request.Name,
+				Driver:  slice.Spec.Driver,
+				Pool:    slice.Spec.Pool.Name,
+				Device:  device.Name,
+			}
+
+			allocatedDevices = append(allocatedDevices, deviceAllocation)
+			deviceIndex++
+		}
+	}
+
+	allocation := &resourceapi.AllocationResult{
+		NodeSelector: selectorForNode(nodeName),
+		Devices:      resourceapi.DeviceAllocationResult{Results: allocatedDevices},
+	}
+
+	return drautils.TestClaimWithAllocation(claim, allocation), requestSatisfied
+}
+
+func selectorForNode(node string) *apiv1.NodeSelector {
+	return &apiv1.NodeSelector{
+		NodeSelectorTerms: []apiv1.NodeSelectorTerm{
+			{
+				MatchFields: []apiv1.NodeSelectorRequirement{
+					{
+						Key:      "metadata.name",
+						Operator: apiv1.NodeSelectorOpIn,
+						Values:   []string{node},
+					},
+				},
+			},
+		},
+	}
+}
+
+// BenchmarkScheduleRevert measures the performance of scheduling pods which interact with Dynamic Resources Allocation
+// API onto nodes within a cluster snapshot, followed by snapshot manipulation operations (fork, commit, revert).
+//
+// The benchmark iterates tests for various configurations, varying:
+// - The number of nodes in the initial snapshot.
+// - The number of pods being scheduled, categorized by whether they use shared or pod-owned ResourceClaims.
+// - The number of snapshot operations (Fork, Commit, Revert) performed before/after scheduling.
+//
+// For each configuration and snapshot type, the benchmark performs the following steps:
+//  1. Initializes a cluster snapshot with a predefined set of nodes, ResourceSlices, DeviceClasses, and pre-allocated ResourceClaims (both shared and potentially pod-owned).
+//  2. Iterates through a subset of the nodes based on the configuration.
+//  3. For each node:
+//     a. Performs the configured number of snapshot Forks.
+//     b. Adds the node's NodeInfo (including its ResourceSlices) to the snapshot.
+//     c. Schedules a configured number of pods that reference a shared ResourceClaim onto the node.
+//     d. Schedules a configured number of pods that reference their own pre-allocated pod-owned ResourceClaims onto the node.
+//     e. Performs the configured number of snapshot Commits.
+//     f. Performs the configured number of snapshot Reverts.
+//
+// This benchmark helps evaluate the efficiency of:
+// - Scheduling pods with different types of DRA claims.
+// - Adding nodes with DRA resources to the snapshot.
+// - The overhead of snapshot Fork, Commit, and Revert operations, especially in scenarios involving DRA objects.
+func BenchmarkScheduleRevert(b *testing.B) {
+	featuretesting.SetFeatureGateDuringTest(b, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
+
+	const maxNodesCount = 100
+	const devicesPerSlice = 100
+	const maxPodsCount = 100
+	const deviceClassName = "defaultClass"
+	const driverName = "driver.foo.com"
+
+	configurations := map[string]struct {
+		nodesCount int
+
+		sharedClaimPods int
+		ownedClaimPods  int
+		forks           int
+		commits         int
+		reverts         int
+	}{
+		// SHARED CLAIMS
+		"100x32/SharedClaims/ForkRevert":           {sharedClaimPods: 32, nodesCount: 100, forks: 1, reverts: 1},
+		"100x32/SharedClaims/ForkCommit":           {sharedClaimPods: 32, nodesCount: 100, forks: 1, commits: 1},
+		"100x32/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x32/SharedClaims/Fork":                 {sharedClaimPods: 32, nodesCount: 100, forks: 1},
+		"100x32/SharedClaims/Fork5Revert5":         {sharedClaimPods: 32, nodesCount: 100, forks: 5, reverts: 5},
+		"100x1/SharedClaims/ForkRevert":            {sharedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1/SharedClaims/ForkCommit":            {sharedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1/SharedClaims/ForkForkCommitRevert":  {sharedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1/SharedClaims/Fork":                  {sharedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1/SharedClaims/Fork5Revert5":          {sharedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x32/SharedClaims/ForkRevert":            {sharedClaimPods: 32, nodesCount: 10, forks: 1, reverts: 1},
+		"10x32/SharedClaims/ForkCommit":            {sharedClaimPods: 32, nodesCount: 10, forks: 1, commits: 1},
+		"10x32/SharedClaims/ForkForkCommitRevert":  {sharedClaimPods: 32, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x32/SharedClaims/Fork":                  {sharedClaimPods: 32, nodesCount: 10, forks: 1},
+		"10x32/SharedClaims/Fork5Revert5":          {sharedClaimPods: 32, nodesCount: 10, forks: 5, reverts: 5},
+		"10x1/SharedClaims/ForkRevert":             {sharedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1/SharedClaims/ForkCommit":             {sharedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1/SharedClaims/ForkForkCommitRevert":   {sharedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1/SharedClaims/Fork":                   {sharedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1/SharedClaims/Fork5Revert5":           {sharedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+		// POD OWNED CLAIMS
+		"100x100/OwnedClaims/ForkRevert":           {ownedClaimPods: 100, nodesCount: 100, forks: 1, reverts: 1},
+		"100x100/OwnedClaims/ForkCommit":           {ownedClaimPods: 100, nodesCount: 100, forks: 1, commits: 1},
+		"100x100/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 100, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x100/OwnedClaims/Fork":                 {ownedClaimPods: 100, nodesCount: 100, forks: 1},
+		"100x100/OwnedClaims/Fork5Revert5":         {ownedClaimPods: 100, nodesCount: 100, forks: 5, reverts: 5},
+		"100Ñ…1/OwnedClaims/ForkRevert":             {ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1/OwnedClaims/ForkCommit":             {ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1/OwnedClaims/ForkForkCommitRevert":   {ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1/OwnedClaims/Fork":                   {ownedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1/OwnedClaims/Fork5Revert5":           {ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x100/OwnedClaims/ForkRevert":            {ownedClaimPods: 100, nodesCount: 10, forks: 1, reverts: 1},
+		"10x100/OwnedClaims/ForkCommit":            {ownedClaimPods: 100, nodesCount: 10, forks: 1, commits: 1},
+		"10x100/OwnedClaims/ForkForkCommitRevert":  {ownedClaimPods: 100, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x100/OwnedClaims/Fork":                  {ownedClaimPods: 100, nodesCount: 10, forks: 1},
+		"10x100/OwnedClaims/Fork5Revert5":          {ownedClaimPods: 100, nodesCount: 10, forks: 5, reverts: 5},
+		"10Ñ…1/OwnedClaims/ForkRevert":              {ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1/OwnedClaims/ForkCommit":              {ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1/OwnedClaims/ForkForkCommitRevert":    {ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1/OwnedClaims/Fork":                    {ownedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1/OwnedClaims/Fork5Revert5":            {ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+		// MIXED CLAIMS
+		"100x32x50/MixedClaims/ForkRevert":           {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, reverts: 1},
+		"100x32x50/MixedClaims/ForkCommit":           {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, commits: 1},
+		"100x32x50/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x32x50/MixedClaims/Fork":                 {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1},
+		"100x32x50/MixedClaims/Fork5Revert5":         {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 5, reverts: 5},
+		"100x1x1/MixedClaims/ForkRevert":             {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1x1/MixedClaims/ForkCommit":             {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1x1/MixedClaims/ForkForkCommitRevert":   {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1x1/MixedClaims/Fork":                   {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1x1/MixedClaims/Fork5Revert5":           {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x32x50/MixedClaims/ForkRevert":            {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, reverts: 1},
+		"10x32x50/MixedClaims/ForkCommit":            {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, commits: 1},
+		"10x32x50/MixedClaims/ForkForkCommitRevert":  {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x32x50/MixedClaims/Fork":                  {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1},
+		"10x32x50/MixedClaims/Fork5Revert5":          {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 5, reverts: 5},
+		"10x1x1/MixedClaims/ForkRevert":              {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1x1/MixedClaims/ForkCommit":              {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1x1/MixedClaims/ForkForkCommitRevert":    {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1x1/MixedClaims/Fork":                    {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1x1/MixedClaims/Fork5Revert5":            {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+	}
+
+	devicesClasses := map[string]*resourceapi.DeviceClass{
+		deviceClassName: {ObjectMeta: metav1.ObjectMeta{Name: deviceClassName, UID: "defaultClassUid"}},
+	}
+
+	nodeInfos := make([]*framework.NodeInfo, maxNodesCount)
+	sharedClaims := make([]*resourceapi.ResourceClaim, maxNodesCount)
+	ownedClaims := make([][]*resourceapi.ResourceClaim, maxNodesCount)
+	owningPods := make([][]*apiv1.Pod, maxNodesCount)
+	for nodeIndex := 0; nodeIndex < maxNodesCount; nodeIndex++ {
+		nodeName := fmt.Sprintf("node-%d", nodeIndex)
+		node := BuildTestNode(nodeName, 10000, 10000)
+		nodeSlice := createTestResourceSlice(node.Name, devicesPerSlice, 1, driverName, resourceapi.BasicDevice{})
+		nodeInfo := framework.NewNodeInfo(node, []*resourceapi.ResourceSlice{nodeSlice})
+
+		sharedClaim := createTestResourceClaim(devicesPerSlice, 1, driverName, deviceClassName)
+		sharedClaim, satisfied := allocateResourceSlicesForClaim(sharedClaim, nodeName, nodeSlice)
+		if !satisfied {
+			b.Errorf("Error during setup, claim allocation cannot be satistied")
+		}
+
+		claimsOnNode := make([]*resourceapi.ResourceClaim, maxPodsCount)
+		podsOnNode := make([]*apiv1.Pod, maxPodsCount)
+		for podIndex := 0; podIndex < maxPodsCount; podIndex++ {
+			podName := fmt.Sprintf("pod-%d-%d", nodeIndex, podIndex)
+			ownedClaim := createTestResourceClaim(1, 1, driverName, deviceClassName)
+			pod := BuildTestPod(
+				podName,
+				1,
+				1,
+				WithResourceClaim(ownedClaim.Name, ownedClaim.Name, ""),
+			)
+
+			ownedClaim = drautils.TestClaimWithPodOwnership(pod, ownedClaim)
+			ownedClaim, satisfied := allocateResourceSlicesForClaim(ownedClaim, nodeName, nodeSlice)
+			if !satisfied {
+				b.Errorf("Error during setup, claim allocation cannot be satistied")
+			}
+
+			podsOnNode[podIndex] = pod
+			claimsOnNode[podIndex] = ownedClaim
+		}
+
+		nodeInfos[nodeIndex] = nodeInfo
+		sharedClaims[nodeIndex] = sharedClaim
+		ownedClaims[nodeIndex] = claimsOnNode
+		owningPods[nodeIndex] = podsOnNode
+	}
+
+	b.ResetTimer()
+	for snapshotName, snapshotFactory := range snapshots {
+		b.Run(snapshotName, func(b *testing.B) {
+			for cfgName, cfg := range configurations {
+				b.Run(cfgName, func(b *testing.B) {
+					for i := 0; i < b.N; i++ {
+						snapshot, err := snapshotFactory()
+						if err != nil {
+							b.Errorf("Failed to create a snapshot: %v", err)
+						}
+
+						draSnapshot := drasnapshot.NewSnapshot(
+							nil,
+							nil,
+							nil,
+							devicesClasses,
+						)
+
+						draSnapshot.AddClaims(sharedClaims)
+						for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
+							draSnapshot.AddClaims(ownedClaims[nodeIndex])
+						}
+
+						err = snapshot.SetClusterState(nil, nil, draSnapshot)
+						if err != nil {
+							b.Errorf("Failed to set cluster state: %v", err)
+						}
+
+						for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
+							nodeInfo := nodeInfos[nodeIndex]
+							for i := 0; i < cfg.forks; i++ {
+								snapshot.Fork()
+							}
+
+							err := snapshot.AddNodeInfo(nodeInfo)
+							if err != nil {
+								b.Errorf("Failed to add node info to snapshot: %v", err)
+							}
+
+							sharedClaim := sharedClaims[nodeIndex]
+							for podIndex := 0; podIndex < cfg.sharedClaimPods; podIndex++ {
+								pod := BuildTestPod(
+									fmt.Sprintf("pod-%d", podIndex),
+									1,
+									1,
+									WithResourceClaim(sharedClaim.Name, sharedClaim.Name, ""),
+								)
+
+								err := snapshot.SchedulePod(pod, nodeInfo.Node().Name)
+								if err != nil {
+									b.Errorf(
+										"Failed to schedule a pod %s to node %s: %v",
+										pod.Name,
+										nodeInfo.Node().Name,
+										err,
+									)
+								}
+							}
+
+							for podIndex := 0; podIndex < cfg.ownedClaimPods; podIndex++ {
+								owningPod := owningPods[nodeIndex][podIndex]
+								err := snapshot.SchedulePod(owningPod, nodeInfo.Node().Name)
+								if err != nil {
+									b.Errorf(
+										"Failed to schedule a pod %s to node %s: %v",
+										owningPod.Name,
+										nodeInfo.Node().Name,
+										err,
+									)
+								}
+							}
+
+							for i := 0; i < cfg.commits; i++ {
+								snapshot.Commit()
+							}
+
+							for i := 0; i < cfg.reverts; i++ {
+								snapshot.Revert()
+							}
+						}
+					}
+				})
+			}
+		})
+	}
+}
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
index ad9b78638788a29c1ab96cf68107336bf068e2d4..d284d217ba3c6ef74481c93f3d6bcb0f095ecf1b 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
@@ -71,7 +71,7 @@ func extractNodes(nodeInfos []*framework.NodeInfo) []*apiv1.Node {
 type snapshotState struct {
 	nodes       []*apiv1.Node
 	podsByNode  map[string][]*apiv1.Pod
-	draSnapshot drasnapshot.Snapshot
+	draSnapshot *drasnapshot.Snapshot
 }
 
 func compareStates(t *testing.T, a, b snapshotState) {
@@ -102,11 +102,11 @@ func compareStates(t *testing.T, a, b snapshotState) {
 		t.Errorf("ResourceClaims().List(): unexpected diff (-want +got): %s", diff)
 	}
 
-	aSlices, err := a.draSnapshot.ResourceSlices().List()
+	aSlices, err := a.draSnapshot.ResourceSlices().ListWithDeviceTaintRules()
 	if err != nil {
 		t.Errorf("ResourceSlices().List(): unexpected error: %v", err)
 	}
-	bSlices, err := b.draSnapshot.ResourceSlices().List()
+	bSlices, err := b.draSnapshot.ResourceSlices().ListWithDeviceTaintRules()
 	if err != nil {
 		t.Errorf("ResourceSlices().List(): unexpected error: %v", err)
 	}
@@ -148,7 +148,9 @@ func startSnapshot(t *testing.T, snapshotFactory func() (clustersnapshot.Cluster
 			pods = append(pods, pod)
 		}
 	}
-	err = snapshot.SetClusterState(state.nodes, pods, state.draSnapshot.Clone())
+
+	draSnapshot := drasnapshot.CloneTestSnapshot(state.draSnapshot)
+	err = snapshot.SetClusterState(state.nodes, pods, draSnapshot)
 	assert.NoError(t, err)
 	return snapshot
 }
@@ -476,704 +478,707 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
 				podsByNode: map[string][]*apiv1.Pod{largeNode.Name: {withNodeName(largePod, largeNode.Name)}},
 			},
 		},
-	}
-
-	// Only the Basic store is compatible with DRA for now.
-	if snapshotName == "basic" {
-		// Uncomment to get logs from the DRA plugin.
-		// var fs flag.FlagSet
-		// klog.InitFlags(&fs)
-		//if err := fs.Set("v", "10"); err != nil {
-		//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
-		//}
-		featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
-
-		testCases = append(testCases, []modificationTestCase{
-			{
-				name: "add empty nodeInfo with LocalResourceSlices",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:       []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		{
+			name: "add empty nodeInfo with LocalResourceSlices",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
 			},
-			{
-				name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
-				// needs to be added to the DRA snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim should just get a reservation for the pod added in the DRA snapshot.
-				// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "adding LocalResourceSlices for an already tracked Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:       []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding already tracked pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "adding unallocated pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						podOwnedClaim.DeepCopy(),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
+			// needs to be added to the DRA snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The shared claim should just get a reservation for the pod added in the DRA snapshot.
+			// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing a shared claim already at max reservations is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding LocalResourceSlices for an already tracked Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing its own claim without adding the claim is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				// One claim is shared, one is pod-owned.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo(node.Name)
-				},
-				// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
-				// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
-				// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
+			// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				// Remove the NodeInfo and then add it back to the snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
-						return err
-					}
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The state should be identical to the initial one after the modifications.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding already tracked pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "removing LocalResourceSlices for a non-existing Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo("wrong-name")
-				},
-				// The removed Node isn't in the snapshot, so this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding unallocated pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					podOwnedClaim.DeepCopy(),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
-					}
-					return err
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{otherNode, node, largeNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod on any Node with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing a shared claim already at max reservations is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing its own claim without adding the claim is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			// One claim is shared, one is pod-owned.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo(node.Name)
+			},
+			// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
+			// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
+			// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			// Remove the NodeInfo and then add it back to the snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			// The state should be identical to the initial one after the modifications.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "removing LocalResourceSlices for a non-existing Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo("wrong-name")
+			},
+			// The removed Node isn't in the snapshot, so this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
+				}
+				return err
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{otherNode, node, largeNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod on any Node with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
+			},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims and schedule it back",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims and schedule it back",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
-				// The pod-owned claim should get deallocated, but the shared one shouldn't.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
+			// The pod-owned claim should get deallocated, but the shared one shouldn't.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "get/list NodeInfo with DRA objects",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
-				// and pods in the cluster.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					nodeInfoDiffOpts := []cmp.Option{
-						// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
-						cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
-						cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
-						cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
-						cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
-						IgnoreObjectOrder[*resourceapi.ResourceClaim](),
-						IgnoreObjectOrder[*resourceapi.ResourceSlice](),
-					}
-
-					// Verify that GetNodeInfo works as expected.
-					nodeInfo, err := snapshot.GetNodeInfo(node.Name)
-					if err != nil {
-						return err
-					}
-					wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
-						framework.NewPodInfo(withNodeName(pod, node.Name), nil),
-						framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						}),
-					)
-					if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
-					}
-
-					// Verify that ListNodeInfo works as expected.
-					nodeInfos, err := snapshot.ListNodeInfos()
-					if err != nil {
-						return err
-					}
-					wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
-					if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
-					}
-
-					return nil
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
+					return err
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "get/list NodeInfo with DRA objects",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
+			// and pods in the cluster.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-		}...)
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				nodeInfoDiffOpts := []cmp.Option{
+					// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
+					cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
+					cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
+					cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
+					cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
+					IgnoreObjectOrder[*resourceapi.ResourceClaim](),
+					IgnoreObjectOrder[*resourceapi.ResourceSlice](),
+				}
+
+				// Verify that GetNodeInfo works as expected.
+				nodeInfo, err := snapshot.GetNodeInfo(node.Name)
+				if err != nil {
+					return err
+				}
+				wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
+					framework.NewPodInfo(withNodeName(pod, node.Name), nil),
+					framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					}),
+				)
+				if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
+				}
+
+				// Verify that ListNodeInfo works as expected.
+				nodeInfos, err := snapshot.ListNodeInfos()
+				if err != nil {
+					return err
+				}
+				wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
+				if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
+				}
+
+				return nil
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+	}
+
+	for i := range testCases {
+		if testCases[i].modifiedState.draSnapshot == nil {
+			testCases[i].modifiedState.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
+
+		if testCases[i].state.draSnapshot == nil {
+			testCases[i].state.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
 	}
 
 	return testCases
 }
 
 func TestForking(t *testing.T) {
-	node := BuildTestNode("specialNode-2", 10, 100)
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
 
+	node := BuildTestNode("specialNode-2", 10, 100)
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
 			t.Run(fmt.Sprintf("%s: %s base", snapshotName, tc.name), func(t *testing.T) {
@@ -1300,7 +1305,7 @@ func TestSetClusterState(t *testing.T) {
 	nodes := clustersnapshot.CreateTestNodes(nodeCount)
 	pods := clustersnapshot.CreateTestPods(podCount)
 	podsByNode := clustersnapshot.AssignTestPodsToNodes(pods, nodes)
-	state := snapshotState{nodes: nodes, podsByNode: podsByNode}
+	state := snapshotState{nodes: nodes, podsByNode: podsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}
 
 	extraNodes := clustersnapshot.CreateTestNodesWithPrefix("extra", extraNodeCount)
 
@@ -1323,9 +1328,9 @@ func TestSetClusterState(t *testing.T) {
 				snapshot := startSnapshot(t, snapshotFactory, state)
 				compareStates(t, state, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear base %d nodes %d pods and set a new state", name, nodeCount, podCount),
 			func(t *testing.T) {
@@ -1334,9 +1339,9 @@ func TestSetClusterState(t *testing.T) {
 
 				newNodes, newPods := clustersnapshot.CreateTestNodes(13), clustersnapshot.CreateTestPods(37)
 				newPodsByNode := clustersnapshot.AssignTestPodsToNodes(newPods, newNodes)
-				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, nil))
 
-				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear fork %d nodes %d pods %d extra nodes %d extra pods", name, nodeCount, podCount, extraNodeCount, extraPodCount),
 			func(t *testing.T) {
@@ -1355,11 +1360,11 @@ func TestSetClusterState(t *testing.T) {
 					assert.NoError(t, err)
 				}
 
-				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
 				// SetClusterState() should break out of forked state.
 				snapshot.Fork()
@@ -1768,7 +1773,7 @@ func TestPVCClearAndFork(t *testing.T) {
 			volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, true, volumeExists)
 
-			assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+			assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 			volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, false, volumeExists)
 
@@ -1777,6 +1782,14 @@ func TestPVCClearAndFork(t *testing.T) {
 }
 
 func TestWithForkedSnapshot(t *testing.T) {
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
+
 	err := fmt.Errorf("some error")
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
index 8c62b720685b67c3bbe3c685b940a7ec4f638372..61abc684b79e4f1fabf6b6d1b6e86d9e6462fd4b 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
@@ -29,13 +29,13 @@ import (
 // BasicSnapshotStore is simple, reference implementation of ClusterSnapshotStore.
 // It is inefficient. But hopefully bug-free and good for initial testing.
 type BasicSnapshotStore struct {
-	data []*internalBasicSnapshotData
+	data        []*internalBasicSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 }
 
 type internalBasicSnapshotData struct {
 	nodeInfoMap        map[string]*schedulerframework.NodeInfo
 	pvcNamespacePodMap map[string]map[string]bool
-	draSnapshot        drasnapshot.Snapshot
 }
 
 func (data *internalBasicSnapshotData) listNodeInfos() []*schedulerframework.NodeInfo {
@@ -142,7 +142,6 @@ func (data *internalBasicSnapshotData) clone() *internalBasicSnapshotData {
 	return &internalBasicSnapshotData{
 		nodeInfoMap:        clonedNodeInfoMap,
 		pvcNamespacePodMap: clonedPvcNamespaceNodeMap,
-		draSnapshot:        data.draSnapshot.Clone(),
 	}
 }
 
@@ -208,8 +207,8 @@ func (snapshot *BasicSnapshotStore) getInternalData() *internalBasicSnapshotData
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *BasicSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	return snapshot.getInternalData().draSnapshot
+func (snapshot *BasicSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -226,7 +225,7 @@ func (snapshot *BasicSnapshotStore) AddSchedulerNodeInfo(nodeInfo *schedulerfram
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	knownNodes := make(map[string]bool)
@@ -243,7 +242,13 @@ func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 			}
 		}
 	}
-	snapshot.getInternalData().draSnapshot = draSnapshot
+
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -271,6 +276,7 @@ func (snapshot *BasicSnapshotStore) IsPVCUsedByPods(key string) bool {
 func (snapshot *BasicSnapshotStore) Fork() {
 	forkData := snapshot.getInternalData().clone()
 	snapshot.data = append(snapshot.data, forkData)
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -279,6 +285,7 @@ func (snapshot *BasicSnapshotStore) Revert() {
 		return
 	}
 	snapshot.data = snapshot.data[:len(snapshot.data)-1]
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -288,6 +295,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 		return nil
 	}
 	snapshot.data = append(snapshot.data[:len(snapshot.data)-2], snapshot.data[len(snapshot.data)-1])
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -295,6 +303,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 func (snapshot *BasicSnapshotStore) clear() {
 	baseData := newInternalBasicSnapshotData()
 	snapshot.data = []*internalBasicSnapshotData{baseData}
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
 
 // implementation of SharedLister interface
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
index 67c1bc67fe3b524723aa779732516688a3cc9914..6d0d5c7e76182357b80d4489c5539e864890c0e3 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
@@ -41,12 +41,18 @@ import (
 //
 // Watch out for:
 //
-//	node deletions, pod additions & deletions - invalidates cache of current snapshot
-//		(when forked affects delta, but not base.)
-//	pod affinity - causes scheduler framework to list pods with non-empty selector,
-//		so basic caching doesn't help.
+// * Node deletions, pod additions & deletions - invalidates cache of current snapshot
+// (when forked affects delta, but not base.)
+//
+// * Pod affinity - causes scheduler framework to list pods with non-empty selector,
+// so basic caching doesn't help.
+//
+// * DRA objects are tracked in the separate snapshot and while they don't exactly share
+// memory and time complexities of DeltaSnapshotStore - they are optimized for
+// cluster autoscaler operations
 type DeltaSnapshotStore struct {
 	data        *internalDeltaSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 	parallelism int
 }
 
@@ -321,6 +327,7 @@ func (data *internalDeltaSnapshotData) commit() (*internalDeltaSnapshotData, err
 			return nil, err
 		}
 	}
+
 	return data.baseData, nil
 }
 
@@ -419,9 +426,8 @@ func NewDeltaSnapshotStore(parallelism int) *DeltaSnapshotStore {
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *DeltaSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	// TODO(DRA): Return DRA snapshot.
-	return drasnapshot.Snapshot{}
+func (snapshot *DeltaSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -469,7 +475,7 @@ func (snapshot *DeltaSnapshotStore) setClusterStatePodsParallelized(nodeInfos []
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	nodeNameToIdx := make(map[string]int, len(nodes))
@@ -494,7 +500,12 @@ func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 	// Clear caches after adding pods.
 	snapshot.data.clearCaches()
 
-	// TODO(DRA): Save DRA snapshot.
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -522,6 +533,7 @@ func (snapshot *DeltaSnapshotStore) IsPVCUsedByPods(key string) bool {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) Fork() {
 	snapshot.data = snapshot.data.fork()
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -530,6 +542,7 @@ func (snapshot *DeltaSnapshotStore) Revert() {
 	if snapshot.data.baseData != nil {
 		snapshot.data = snapshot.data.baseData
 	}
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -540,6 +553,7 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 		return err
 	}
 	snapshot.data = newData
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -547,4 +561,5 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) clear() {
 	snapshot.data = newInternalDeltaSnapshotData()
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
index 5f618befd180c3f33958b68be468f81d33faf38e..0d426b7e39b11eb3c332c2822640d9a97cdd7771 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -49,7 +48,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("fork add 1000 to %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount + 1000)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			deltaStore.Fork()
@@ -71,7 +70,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("base %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes, nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes, nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			b.ResetTimer()
diff --git a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
index b98c9e117e1f034909b42e808443f34d572fb9e4..d95a3e6437c405c2786c0054070817b3193f28fe 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
@@ -25,7 +25,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	apiv1 "k8s.io/api/core/v1"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -39,7 +38,7 @@ func InitializeClusterSnapshotOrDie(
 	pods []*apiv1.Pod) {
 	var err error
 
-	assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+	assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
 	for _, node := range nodes {
 		err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
diff --git a/cluster-autoscaler/simulator/common/patch.go b/cluster-autoscaler/simulator/common/patch.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfd3923264a63101c7f9ebdc8c7eda7e7ed6953c
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patch.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// Patch represents a single layer of modifications (additions/updates)
+// and deletions for a set of key-value pairs. It's used as a building
+// block for the patchSet.
+type Patch[K comparable, V any] struct {
+	modified map[K]V
+	deleted  map[K]bool
+}
+
+// Set marks a key-value pair as modified in the current patch.
+// If the key was previously marked as deleted, the deletion mark is removed.
+func (p *Patch[K, V]) Set(key K, value V) {
+	p.modified[key] = value
+	delete(p.deleted, key)
+}
+
+// Delete marks a key as deleted in the current patch.
+// If the key was previously marked as modified, the modification is removed.
+func (p *Patch[K, V]) Delete(key K) {
+	delete(p.modified, key)
+	p.deleted[key] = true
+}
+
+// Get retrieves the modified value for a key within this specific patch.
+// It returns the value and true if the key was found in this patch
+// or zero value and false otherwise.
+func (p *Patch[K, V]) Get(key K) (value V, found bool) {
+	value, found = p.modified[key]
+	return value, found
+}
+
+// IsDeleted checks if the key is marked as deleted within this specific patch.
+func (p *Patch[K, V]) IsDeleted(key K) bool {
+	return p.deleted[key]
+}
+
+// NewPatch creates a new, empty patch with no modifications or deletions.
+func NewPatch[K comparable, V any]() *Patch[K, V] {
+	return &Patch[K, V]{
+		modified: make(map[K]V),
+		deleted:  make(map[K]bool),
+	}
+}
+
+// NewPatchFromMap creates a new patch initialized with the data from the provided map
+// the data supplied is recorded as modified in the patch.
+func NewPatchFromMap[M ~map[K]V, K comparable, V any](source M) *Patch[K, V] {
+	if source == nil {
+		source = make(M)
+	}
+
+	return &Patch[K, V]{
+		modified: source,
+		deleted:  make(map[K]bool),
+	}
+}
+
+// mergePatchesInPlace merges two patches into one, while modifying patch a
+// inplace taking records in b as a priority when overwrites are required
+func mergePatchesInPlace[K comparable, V any](a *Patch[K, V], b *Patch[K, V]) *Patch[K, V] {
+	for key, value := range b.modified {
+		a.Set(key, value)
+	}
+
+	for key := range b.deleted {
+		a.Delete(key)
+	}
+
+	return a
+}
diff --git a/cluster-autoscaler/simulator/common/patch_test.go b/cluster-autoscaler/simulator/common/patch_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..30a6d778a5c8102423b77c2bfd9aeb88d7de8bd8
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patch_test.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"maps"
+	"testing"
+)
+
+func TestPatchOperations(t *testing.T) {
+	p := NewPatch[string, int]()
+
+	// 1. Get and IsDeleted on non-existent key
+	if _, found := p.Get("a"); found {
+		t.Errorf("Get for 'a' should not find anything")
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted for 'a' should be false")
+	}
+
+	// 2. Set 'a' key
+	p.Set("a", 1)
+	if val, found := p.Get("a"); !found || val != 1 {
+		t.Errorf("Get('a') = %v,%v, expected 1,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 3. Overwrite 'a' key
+	p.Set("a", 2)
+	if val, found := p.Get("a"); !found || val != 2 {
+		t.Errorf("Get('a') = %v,%v, expected 2,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 4. Delete 'a' key
+	p.Delete("a")
+	if val, found := p.Get("a"); found {
+		t.Errorf("Get('a') = %v,%v, should not find anything after delete", val, found)
+	}
+	if !p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = false, should be true")
+	}
+
+	// 5. Set 'a' key again after deletion
+	p.Set("a", 3)
+	if val, found := p.Get("a"); !found || val != 3 {
+		t.Errorf("Get('a') = %v,%v, expected 3,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 6. Delete a non-existent key 'c'
+	p.Delete("c")
+	if val, found := p.Get("c"); found {
+		t.Errorf("Get('c') = %v, %v, should not find anything", val, found)
+	}
+	if !p.IsDeleted("c") {
+		t.Errorf("IsDeleted('c') = false, should be true")
+	}
+}
+
+func TestCreatePatch(t *testing.T) {
+	tests := map[string]struct {
+		sourceMap    map[string]int
+		addKeys      map[string]int
+		deleteKeys   []string
+		wantModified map[string]int
+		wantDeleted  map[string]bool
+	}{
+		"SourceMapOnlyNoModifications": {
+			sourceMap:    map[string]int{"k1": 1, "k2": 2},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{"k1": 1, "k2": 2},
+			wantDeleted:  map[string]bool{},
+		},
+		"NilSourceMapAddAndDelete": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{"a": 1, "b": 2},
+			deleteKeys:   []string{"b"},
+			wantModified: map[string]int{"a": 1},
+			wantDeleted:  map[string]bool{"b": true},
+		},
+		"EmptySourceMapAddAndDelete": {
+			sourceMap:    map[string]int{},
+			addKeys:      map[string]int{"x": 10},
+			deleteKeys:   []string{"y"},
+			wantModified: map[string]int{"x": 10},
+			wantDeleted:  map[string]bool{"y": true},
+		},
+		"NonEmptySourceMapAddOverwriteDelete": {
+			sourceMap:    map[string]int{"orig1": 100, "orig2": 200},
+			addKeys:      map[string]int{"new1": 300, "orig1": 101},
+			deleteKeys:   []string{"orig2", "new1"},
+			wantModified: map[string]int{"orig1": 101},
+			wantDeleted:  map[string]bool{"orig2": true, "new1": true},
+		},
+		"DeleteKeyFromSourceMap": {
+			sourceMap:    map[string]int{"key_to_delete": 70, "key_to_keep": 80},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{"key_to_delete"},
+			wantModified: map[string]int{"key_to_keep": 80},
+			wantDeleted:  map[string]bool{"key_to_delete": true},
+		},
+		"AddOnlyNoSourceMap": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{"add1": 10, "add2": 20},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{"add1": 10, "add2": 20},
+			wantDeleted:  map[string]bool{},
+		},
+		"DeleteOnlyNoSourceMap": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{"del1", "del2"},
+			wantModified: map[string]int{},
+			wantDeleted:  map[string]bool{"del1": true, "del2": true},
+		},
+		"DeleteKeyNotPresentInSourceOrAdded": {
+			sourceMap:    map[string]int{"a": 1},
+			addKeys:      map[string]int{"b": 2},
+			deleteKeys:   []string{"c"},
+			wantModified: map[string]int{"a": 1, "b": 2},
+			wantDeleted:  map[string]bool{"c": true},
+		},
+		"AddKeyThenDeleteIt": {
+			sourceMap:    map[string]int{"base": 100},
+			addKeys:      map[string]int{"temp": 50},
+			deleteKeys:   []string{"temp"},
+			wantModified: map[string]int{"base": 100},
+			wantDeleted:  map[string]bool{"temp": true},
+		},
+		"AllOperations": {
+			sourceMap:    map[string]int{"s1": 1, "s2": 2, "s3": 3},
+			addKeys:      map[string]int{"a1": 10, "s2": 22, "a2": 20},
+			deleteKeys:   []string{"s1", "a1", "nonexistent"},
+			wantModified: map[string]int{"s2": 22, "s3": 3, "a2": 20},
+			wantDeleted:  map[string]bool{"s1": true, "a1": true, "nonexistent": true},
+		},
+		"NoOperationsEmptySource": {
+			sourceMap:    map[string]int{},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{},
+			wantDeleted:  map[string]bool{},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			p := NewPatchFromMap(test.sourceMap)
+
+			for k, v := range test.addKeys {
+				p.Set(k, v)
+			}
+			for _, k := range test.deleteKeys {
+				p.Delete(k)
+			}
+
+			if !maps.Equal(p.modified, test.wantModified) {
+				t.Errorf("Modified map mismatch: got %v, want %v", p.modified, test.wantModified)
+			}
+
+			if !maps.Equal(p.deleted, test.wantDeleted) {
+				t.Errorf("Deleted map mismatch: got %v, want %v", p.deleted, test.wantDeleted)
+			}
+		})
+	}
+}
+
+func TestMergePatchesInPlace(t *testing.T) {
+	tests := map[string]struct {
+		modifiedPatchA     map[string]int
+		deletedPatchA      map[string]bool
+		modifiedPatchB     map[string]int
+		deletedPatchB      map[string]bool
+		wantModifiedMerged map[string]int
+		wantDeletedMerged  map[string]bool
+	}{
+		"PatchBOverwritesValueInPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1, "b": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"b": 22, "c": 3},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a": 1, "b": 22, "c": 3},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"PatchBDeletesValuePresentInPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1, "b": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"a": true},
+			wantModifiedMerged: map[string]int{"b": 2},
+			wantDeletedMerged:  map[string]bool{"a": true},
+		},
+		"PatchBDeletesValueAlreadyDeletedInPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"x": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"x": true, "y": true},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{"x": true, "y": true},
+		},
+		"PatchBAddsValuePreviouslyDeletedInPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"x": true},
+			modifiedPatchB:     map[string]int{"x": 10},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"x": 10},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"MergeEmptyPatchBIntoNonEmptyPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1},
+			deletedPatchA:      map[string]bool{"b": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a": 1},
+			wantDeletedMerged:  map[string]bool{"b": true},
+		},
+		"MergeNonEmptyPatchBIntoEmptyPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"a": 1},
+			deletedPatchB:      map[string]bool{"b": true},
+			wantModifiedMerged: map[string]int{"a": 1},
+			wantDeletedMerged:  map[string]bool{"b": true},
+		},
+		"MergeTwoEmptyPatches": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"NoOverlapBetweenPatchAAndPatchBModifications": {
+			modifiedPatchA:     map[string]int{"a1": 1, "a2": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"b1": 10, "b2": 20},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a1": 1, "a2": 2, "b1": 10, "b2": 20},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"NoOverlapBetweenPatchAAndPatchBDeletions": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"a1": true, "a2": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"b1": true, "b2": true},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{"a1": true, "a2": true, "b1": true, "b2": true},
+		},
+		"PatchBOnlyAddsNewKeysPatchAUnchanged": {
+			modifiedPatchA:     map[string]int{"orig": 5},
+			modifiedPatchB:     map[string]int{"new1": 100, "new2": 200},
+			deletedPatchA:      map[string]bool{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"orig": 5, "new1": 100, "new2": 200},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"PatchBOnlyDeletesNewKeysPatchAUnchanged": {
+			modifiedPatchA:     map[string]int{"orig": 5},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"del1": true, "del2": true},
+			wantModifiedMerged: map[string]int{"orig": 5},
+			wantDeletedMerged:  map[string]bool{"del1": true, "del2": true},
+		},
+		"AllInOne": {
+			modifiedPatchA:     map[string]int{"k1": 1, "k2": 2, "k3": 3},
+			deletedPatchA:      map[string]bool{"d1": true, "d2": true},
+			modifiedPatchB:     map[string]int{"k2": 22, "k4": 4, "d1": 11},
+			deletedPatchB:      map[string]bool{"k3": true, "d2": true, "d3": true},
+			wantModifiedMerged: map[string]int{"k1": 1, "k2": 22, "k4": 4, "d1": 11},
+			wantDeletedMerged:  map[string]bool{"k3": true, "d2": true, "d3": true},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			patchA := NewPatchFromMap(test.modifiedPatchA)
+			for k := range test.deletedPatchA {
+				patchA.Delete(k)
+			}
+
+			patchB := NewPatchFromMap(test.modifiedPatchB)
+			for k := range test.deletedPatchB {
+				patchB.Delete(k)
+			}
+
+			merged := mergePatchesInPlace(patchA, patchB)
+
+			if merged != patchA {
+				t.Errorf("mergePatchesInPlace did not modify patchA inplace, references are different")
+			}
+
+			if !maps.Equal(merged.modified, test.wantModifiedMerged) {
+				t.Errorf("Modified map mismatch: got %v, want %v", merged.modified, test.wantModifiedMerged)
+			}
+
+			if !maps.Equal(merged.deleted, test.wantDeletedMerged) {
+				t.Errorf("Deleted map mismatch: got %v, want %v", merged.deleted, test.wantDeletedMerged)
+			}
+		})
+	}
+}
diff --git a/cluster-autoscaler/simulator/common/patchset.go b/cluster-autoscaler/simulator/common/patchset.go
new file mode 100644
index 0000000000000000000000000000000000000000..c30ad06e93eeec60e5504c4b83fd27fcd2dfc569
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patchset.go
@@ -0,0 +1,265 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// PatchSet manages a stack of patches, allowing for fork/revert/commit operations.
+// It provides a view of the data as if all patches were applied sequentially.
+//
+// Time Complexities:
+//   - Fork(): O(1).
+//   - Commit(): O(P), where P is the number of modified/deleted entries
+//     in the current patch or no-op for PatchSet with a single patch.
+//   - Revert(): O(P), where P is the number of modified/deleted entries
+//     in the topmost patch or no-op for PatchSet with a single patch.
+//   - FindValue(key): O(1) for cached keys, O(N * P) - otherwise.
+//   - AsMap(): O(N * P) as it needs to iterate through all the layers
+//     modifications and deletions to get the latest state. Best case -
+//     if the cache is currently in sync with the PatchSet data complexity becomes
+//     O(C), where C is the actual number key/value pairs in flattened PatchSet.
+//   - SetCurrent(key, value): O(1).
+//   - DeleteCurrent(key): O(1).
+//   - InCurrentPatch(key): O(1).
+//
+// Variables used in complexity analysis:
+//   - N: The number of patch layers in the PatchSet.
+//   - P: The number of modified/deleted entries in a single patch layer
+//
+// Caching:
+//
+// The PatchSet employs a lazy caching mechanism to speed up access to data. When a specific item is requested,
+// the cache is checked first. If the item isn't cached, its effective value is computed by traversing the layers
+// of patches from the most recent to the oldest, and the result is then stored in the cache. For operations that
+// require the entire dataset like AsMap, if a cache is fully up-to-date, the data is served directly from the cache.
+// Otherwise, the entire dataset is rebuilt by applying all patches, and this process fully populates the cache.
+// Direct modifications to the current patch update the specific item in the cache immediately. Reverting the latest
+// patch will clear affected items from the cache and mark the cache as potentially out-of-sync, as underlying values may
+// no longer represent the PatchSet as a whole. Committing and forking does not invalidate the cache, as the effective
+// values remain consistent from the perspective of read operations.
+type PatchSet[K comparable, V any] struct {
+	// patches is a stack of individual modification layers. The base data is
+	// at index 0, and subsequent modifications are layered on top.
+	// PatchSet should always contain at least a single patch.
+	patches []*Patch[K, V]
+
+	// cache stores the computed effective value for keys that have been accessed.
+	// A nil pointer indicates the key is effectively deleted or not present.
+	cache map[K]*V
+
+	// cacheInSync indicates whether the cache map accurately reflects the
+	// current state derived from applying all patches in the 'patches' slice.
+	// When false, the cache may be stale and needs to be rebuilt or validated
+	// before being fully trusted for all keys.
+	cacheInSync bool
+}
+
+// NewPatchSet creates a new PatchSet, initializing it with the provided base patches.
+func NewPatchSet[K comparable, V any](patches ...*Patch[K, V]) *PatchSet[K, V] {
+	return &PatchSet[K, V]{
+		patches:     patches,
+		cache:       make(map[K]*V),
+		cacheInSync: false,
+	}
+}
+
+// Fork adds a new, empty patch layer to the top of the stack.
+// Subsequent modifications will be recorded in this new layer.
+func (p *PatchSet[K, V]) Fork() {
+	p.patches = append(p.patches, NewPatch[K, V]())
+}
+
+// Commit merges the topmost patch layer into the one below it.
+// If there's only one layer (or none), it's a no-op.
+func (p *PatchSet[K, V]) Commit() {
+	if len(p.patches) < 2 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	previousPatch := p.patches[len(p.patches)-2]
+	mergePatchesInPlace(previousPatch, currentPatch)
+	p.patches = p.patches[:len(p.patches)-1]
+}
+
+// Revert removes the topmost patch layer.
+// Any modifications or deletions recorded in that layer are discarded.
+func (p *PatchSet[K, V]) Revert() {
+	if len(p.patches) <= 1 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	p.patches = p.patches[:len(p.patches)-1]
+
+	for key := range currentPatch.modified {
+		delete(p.cache, key)
+	}
+
+	for key := range currentPatch.deleted {
+		delete(p.cache, key)
+	}
+
+	p.cacheInSync = false
+}
+
+// FindValue searches for the effective value of a key by looking through the patches
+// from top to bottom. It returns the value and true if found, or the zero value and false
+// if the key is deleted or not found in any patch.
+func (p *PatchSet[K, V]) FindValue(key K) (value V, found bool) {
+	var zero V
+
+	if cachedValue, cacheHit := p.cache[key]; cacheHit {
+		if cachedValue == nil {
+			return zero, false
+		}
+
+		return *cachedValue, true
+	}
+
+	value = zero
+	found = false
+	for i := len(p.patches) - 1; i >= 0; i-- {
+		patch := p.patches[i]
+		if patch.IsDeleted(key) {
+			break
+		}
+
+		foundValue, ok := patch.Get(key)
+		if ok {
+			value = foundValue
+			found = true
+			break
+		}
+	}
+
+	if found {
+		p.cache[key] = &value
+	} else {
+		p.cache[key] = nil
+	}
+
+	return value, found
+}
+
+// AsMap merges all patches into a single map representing the current effective state.
+// It iterates through all patches from bottom to top, applying modifications and deletions.
+// The cache is populated with the results during this process.
+func (p *PatchSet[K, V]) AsMap() map[K]V {
+	if p.cacheInSync {
+		patchSetMap := make(map[K]V, len(p.cache))
+		for key, value := range p.cache {
+			if value != nil {
+				patchSetMap[key] = *value
+			}
+		}
+		return patchSetMap
+	}
+
+	keysCount := p.totalKeyCount()
+	patchSetMap := make(map[K]V, keysCount)
+
+	for _, patch := range p.patches {
+		for key, value := range patch.modified {
+			patchSetMap[key] = value
+		}
+
+		for key := range patch.deleted {
+			delete(patchSetMap, key)
+		}
+	}
+
+	for key, value := range patchSetMap {
+		p.cache[key] = &value
+	}
+	p.cacheInSync = true
+
+	return patchSetMap
+}
+
+// SetCurrent adds or updates a key-value pair in the topmost patch layer.
+func (p *PatchSet[K, V]) SetCurrent(key K, value V) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Set(key, value)
+	p.cache[key] = &value
+}
+
+// DeleteCurrent marks a key as deleted in the topmost patch layer.
+func (p *PatchSet[K, V]) DeleteCurrent(key K) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Delete(key)
+	p.cache[key] = nil
+}
+
+// InCurrentPatch checks if the key is available in the topmost patch layer.
+func (p *PatchSet[K, V]) InCurrentPatch(key K) bool {
+	if len(p.patches) == 0 {
+		return false
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	_, found := currentPatch.Get(key)
+	return found
+}
+
+// totalKeyCount calculates an approximate total number of key-value
+// pairs across all patches taking the highest number of records possible
+// this calculation does not consider deleted records - thus it is likely
+// to be not accurate
+func (p *PatchSet[K, V]) totalKeyCount() int {
+	count := 0
+	for _, patch := range p.patches {
+		count += len(patch.modified)
+	}
+
+	return count
+}
+
+// ClonePatchSet creates a deep copy of a PatchSet object with the same patch layers
+// structure, while copying keys and values using cloneKey and cloneValue functions
+// provided.
+//
+// This function is intended for testing purposes only.
+func ClonePatchSet[K comparable, V any](ps *PatchSet[K, V], cloneKey func(K) K, cloneValue func(V) V) *PatchSet[K, V] {
+	if ps == nil {
+		return nil
+	}
+
+	cloned := NewPatchSet[K, V]()
+	for _, patch := range ps.patches {
+		clonedPatch := NewPatch[K, V]()
+		for key, value := range patch.modified {
+			clonedKey, clonedValue := cloneKey(key), cloneValue(value)
+			clonedPatch.Set(clonedKey, clonedValue)
+		}
+
+		for key := range patch.deleted {
+			clonedKey := cloneKey(key)
+			clonedPatch.Delete(clonedKey)
+		}
+
+		cloned.patches = append(cloned.patches, clonedPatch)
+	}
+
+	return cloned
+}
diff --git a/cluster-autoscaler/simulator/common/patchset_test.go b/cluster-autoscaler/simulator/common/patchset_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6571321bb08bac6c86eecc519463bfb55b35ccc
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patchset_test.go
@@ -0,0 +1,708 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"maps"
+	"testing"
+
+	"k8s.io/utils/ptr"
+)
+
+func TestPatchSetAsMap(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		wantMap     map[string]int
+	}{
+		"EmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			wantMap:     map[string]int{},
+		},
+		"SingleLayerNoDeletions": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 1, "b": 2},
+		},
+		"SingleLayerOnlyDeletions": {
+			patchLayers: []map[string]*int{{"a": nil, "b": nil}},
+			wantMap:     map[string]int{},
+		},
+		"SingleLayerModificationsAndDeletions": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": nil, "c": ptr.To(3)}},
+			wantMap: map[string]int{"a": 1, "c": 3},
+		},
+		"MultipleLayersNoDeletionsOverwrite": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2)},
+				{"b": ptr.To(22), "c": ptr.To(3)}},
+			wantMap: map[string]int{"a": 1, "b": 22, "c": 3},
+		},
+		"MultipleLayersWithDeletions": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": nil, "x": ptr.To(100)},
+				{"c": ptr.To(3), "x": ptr.To(101), "a": nil}},
+			wantMap: map[string]int{"c": 3, "x": 101},
+		},
+		"MultipleLayersDeletionInLowerLayer": {
+			patchLayers: []map[string]*int{
+				{"a": nil, "b": ptr.To(2)},
+				{"c": ptr.To(3)}},
+			wantMap: map[string]int{"b": 2, "c": 3},
+		},
+		"MultipleLayersAddAfterDelete": {
+			patchLayers: []map[string]*int{
+				{"a": nil},
+				{"a": ptr.To(11), "b": ptr.To(2)}},
+			wantMap: map[string]int{"a": 11, "b": 2},
+		},
+		"MultipleLayersDeletePreviouslyAdded": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2)},
+				{"c": ptr.To(3), "b": nil}},
+			wantMap: map[string]int{"a": 1, "c": 3},
+		},
+		"AllInOne": {
+			patchLayers: []map[string]*int{
+				{"k1": ptr.To(1), "k2": ptr.To(2)},
+				{"k2": ptr.To(22), "k3": ptr.To(3), "k1": nil},
+				{"k1": ptr.To(111), "k3": ptr.To(33), "k4": ptr.To(4), "deleted": nil},
+			},
+			wantMap: map[string]int{"k2": 22, "k3": 33, "k4": 4, "k1": 111},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			patchset := buildTestPatchSet(t, test.patchLayers)
+			mergedMap := patchset.AsMap()
+			if !maps.Equal(mergedMap, test.wantMap) {
+				t.Errorf("AsMap() result mismatch: got %v, want %v", mergedMap, test.wantMap)
+			}
+		})
+	}
+}
+
+func TestPatchSetCommit(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		wantMap     map[string]int // Expected map after each commit
+	}{
+		"CommitEmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			wantMap:     map[string]int{},
+		},
+		"CommitSingleLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			wantMap:     map[string]int{"a": 1},
+		},
+		"CommitTwoLayersNoOverlap": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 1, "b": 2},
+		},
+		"CommitTwoLayersWithOverwrite": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 2},
+		},
+		"CommitTwoLayersWithDeleteInTopLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(5)}, {"c": ptr.To(3), "a": nil}},
+			wantMap:     map[string]int{"b": 5, "c": 3},
+		},
+		"CommitTwoLayersWithDeleteInBottomLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": nil}, {"a": ptr.To(11), "c": ptr.To(3)}},
+			wantMap:     map[string]int{"a": 11, "c": 3},
+		},
+		"CommitThreeLayers": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2), "z": ptr.To(100)},
+				{"b": ptr.To(22), "c": ptr.To(3), "a": nil},
+				{"c": ptr.To(33), "d": ptr.To(4), "a": ptr.To(111), "b": nil, "deleted": nil},
+			},
+			wantMap: map[string]int{"z": 100, "c": 33, "d": 4, "a": 111},
+		},
+		"CommitMultipleLayersDeleteAndAddBack": {
+			patchLayers: []map[string]*int{
+				{"x": ptr.To(10)},
+				{"x": nil},
+				{"x": ptr.To(30)},
+				{"y": ptr.To(40), "x": nil}},
+			wantMap: map[string]int{"y": 40},
+		},
+		"CommitEmptyLayersBetweenDataLayers": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {}, {"b": ptr.To(2)}, {}, {"c": ptr.To(3)}},
+			wantMap:     map[string]int{"a": 1, "b": 2, "c": 3},
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			initialNumPatches := len(ps.patches)
+
+			if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantMap) {
+				t.Errorf("AsMap() before any commits mismatch: got %v, want %v", currentMap, tc.wantMap)
+			}
+
+			for i := 0; i < initialNumPatches-1; i++ {
+				expectedPatchesAfterCommit := len(ps.patches) - 1
+				ps.Commit()
+				if len(ps.patches) != expectedPatchesAfterCommit {
+					t.Errorf("After commit #%d, expected %d patches, got %d", i+1, expectedPatchesAfterCommit, len(ps.patches))
+				}
+				if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantMap) {
+					t.Errorf("AsMap() after commit #%d mismatch: got %v, want %v", i+1, currentMap, tc.wantMap)
+				}
+			}
+
+			if initialNumPatches > 0 && len(ps.patches) != 1 {
+				t.Errorf("Expected 1 patch after all commits, got %d", len(ps.patches))
+			} else if initialNumPatches == 0 && len(ps.patches) != 0 {
+				t.Errorf("Expected 0 patches after all commits, got %d", len(ps.patches))
+			}
+		})
+	}
+}
+
+func TestPatchSetRevert(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers         []map[string]*int
+		wantInitialMap      map[string]int
+		wantMapsAfterRevert []map[string]int
+	}{
+		"RevertEmptyPatchSet": {
+			patchLayers:         []map[string]*int{},
+			wantInitialMap:      map[string]int{},
+			wantMapsAfterRevert: []map[string]int{{}},
+		},
+		"RevertSingleLayer": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}},
+			wantInitialMap:      map[string]int{"a": 1},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}},
+		},
+		"RevertTwoLayersNoOverlap": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			wantInitialMap:      map[string]int{"a": 1, "b": 2},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}, {"a": 1}},
+		},
+		"RevertTwoLayersWithOverwrite": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(2)}},
+			wantInitialMap:      map[string]int{"a": 2},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}, {"a": 1}},
+		},
+		"RevertTwoLayersWithDeleteInTopLayer": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1), "b": ptr.To(5)}, {"c": ptr.To(3), "a": nil}},
+			wantInitialMap:      map[string]int{"b": 5, "c": 3},
+			wantMapsAfterRevert: []map[string]int{{"a": 1, "b": 5}, {"a": 1, "b": 5}},
+		},
+		"RevertThreeLayers": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2), "z": ptr.To(100)},
+				{"b": ptr.To(22), "c": ptr.To(3), "a": nil},
+				{"c": ptr.To(33), "d": ptr.To(4), "a": ptr.To(111), "b": nil, "deleted": nil},
+			},
+			wantInitialMap: map[string]int{"z": 100, "c": 33, "d": 4, "a": 111},
+			wantMapsAfterRevert: []map[string]int{
+				{"a": 1, "b": 2, "z": 100},
+				{"a": 1, "b": 2, "z": 100},
+				{"z": 100, "b": 22, "c": 3},
+			},
+		},
+		"RevertMultipleLayersDeleteAndAddBack": {
+			patchLayers:    []map[string]*int{{"x": ptr.To(10)}, {"x": nil}, {"x": ptr.To(30)}, {"y": ptr.To(40), "x": nil}},
+			wantInitialMap: map[string]int{"y": 40},
+			wantMapsAfterRevert: []map[string]int{
+				{"x": 10},
+				{"x": 10},
+				{},
+				{"x": 30},
+			},
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			patchesNumber := len(ps.patches)
+
+			if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantInitialMap) {
+				t.Errorf("AsMap() before any reverts mismatch: got %v, want %v", currentMap, tc.wantInitialMap)
+			}
+
+			for i := 0; i <= patchesNumber; i++ {
+				wantPatchesAfterRevert := len(ps.patches) - 1
+				ps.Revert()
+				if len(ps.patches) != wantPatchesAfterRevert && len(ps.patches) > 1 {
+					t.Errorf("After revert #%d, expected %d patches, got %d", i+1, wantPatchesAfterRevert, len(ps.patches))
+				}
+
+				currentMap := ps.AsMap()
+				wantMapIndex := patchesNumber - i - 1
+				if wantMapIndex < 0 {
+					wantMapIndex = 0
+				}
+				wantMapAfterRevert := tc.wantMapsAfterRevert[wantMapIndex]
+				if !maps.Equal(currentMap, wantMapAfterRevert) {
+					t.Errorf("AsMap() after revert #%d mismatch: got %v, want %v", i+1, currentMap, wantMapAfterRevert)
+				}
+			}
+
+			if patchesNumber >= 1 && len(ps.patches) != 1 {
+				t.Errorf("Expected 1 patch after all reverts, got %d", len(ps.patches))
+			} else if patchesNumber == 0 && len(ps.patches) != 0 {
+				t.Errorf("Expected 0 patches after all reverts, got %d", len(ps.patches))
+			}
+		})
+	}
+}
+
+func TestPatchSetForkRevert(t *testing.T) {
+	// 1. Initialize empty patchset
+	ps := NewPatchSet(NewPatch[string, int]())
+	if initialMap := ps.AsMap(); len(initialMap) != 0 {
+		t.Fatalf("Initial AsMap() got %v, want empty", initialMap)
+	}
+
+	// 2. Call Fork
+	ps.Fork()
+	if len(ps.patches) != 2 {
+		t.Fatalf("Expected 2 patches after Fork(), got %d", len(ps.patches))
+	}
+
+	// 3. Perform some mutation operations on the new layer
+	ps.SetCurrent("a", 1)
+	ps.SetCurrent("b", 2)
+	ps.DeleteCurrent("a")
+
+	// 4. Call Revert
+	ps.Revert()
+	if len(ps.patches) != 1 {
+		t.Fatalf("Expected 1 patch after Revert(), got %d", len(ps.patches))
+	}
+
+	// 5. Compare state to the empty map
+	if finalMap := ps.AsMap(); len(finalMap) != 0 {
+		t.Errorf("AsMap() got %v, want empty", finalMap)
+	}
+}
+
+func TestPatchSetForkCommit(t *testing.T) {
+	// 1. Initialize empty patchset
+	ps := NewPatchSet(NewPatch[string, int]())
+	if initialMap := ps.AsMap(); len(initialMap) != 0 {
+		t.Fatalf("Initial AsMap() got %v, want empty", initialMap)
+	}
+
+	// 2. Call Fork two times
+	ps.Fork()
+	ps.Fork()
+	if len(ps.patches) != 3 {
+		t.Fatalf("Expected 3 patches after 2xFork(), got %d", len(ps.patches))
+	}
+
+	// 3. Perform some mutation operations on the current layer
+	ps.SetCurrent("a", 1)
+	ps.SetCurrent("b", 2)
+	ps.DeleteCurrent("a")
+
+	// 4. Call Commit to persist changes
+	ps.Commit()
+	if len(ps.patches) != 2 {
+		t.Fatalf("Expected 1 patch after Commit(), got %d", len(ps.patches))
+	}
+
+	// 5. Call Revert on the empty layer
+	ps.Revert()
+	if len(ps.patches) != 1 {
+		t.Fatalf("Expected 1 patch after Revert(), got %d", len(ps.patches))
+	}
+
+	// 6. Compare state to the empty map
+	wantMap := map[string]int{"b": 2}
+	if finalMap := ps.AsMap(); maps.Equal(wantMap, finalMap) {
+		t.Errorf("AsMap() got %v, want %v", finalMap, wantMap)
+	}
+}
+
+func TestPatchSetFindValue(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		searchKey   string
+		wantValue   int
+		wantFound   bool
+	}{
+		"EmptyPatchSet_KeyNotFound": {
+			patchLayers: []map[string]*int{},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"SingleLayer_KeyFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   1,
+			wantFound:   true,
+		},
+		"SingleLayer_KeyNotFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			searchKey:   "x",
+			wantFound:   false,
+		},
+		"SingleLayer_KeyDeleted": {
+			patchLayers: []map[string]*int{{"a": nil, "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyInTopLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(10), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   10,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyInBottomLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}, {"b": ptr.To(22)}},
+			searchKey:   "a",
+			wantValue:   1,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyOverwrittenThenDeleted": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(10)}, {"a": nil, "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyDeletedThenAddedBack": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": nil}, {"a": ptr.To(100), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   100,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyNotFoundInAnyLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			searchKey:   "x",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyDeletedInMiddleLayer_NotFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": nil, "b": ptr.To(2)}, {"c": ptr.To(3)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyPresentInBase_DeletedInOverlay_ReAddedInTop": {
+			patchLayers: []map[string]*int{
+				{"k1": ptr.To(1)},
+				{"k1": nil},
+				{"k1": ptr.To(111)},
+			},
+			searchKey: "k1",
+			wantValue: 111,
+			wantFound: true,
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			val, found := ps.FindValue(tc.searchKey)
+
+			if found != tc.wantFound || (found && val != tc.wantValue) {
+				t.Errorf("FindValue(%q) got val=%v, found=%v; want val=%v, found=%v", tc.searchKey, val, found, tc.wantValue, tc.wantFound)
+			}
+		})
+	}
+}
+
+func TestPatchSetOperations(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers    []map[string]*int
+		mutatePatchSet func(ps *PatchSet[string, int])
+		searchKey      string
+		wantInCurrent  bool
+	}{
+		"SetCurrent_NewKey_InitiallyEmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_NewKey_OnExistingEmptyLayer": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_OverwriteKey_InCurrentPatch": {
+			patchLayers: []map[string]*int{{"a": ptr.To(10)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_OverwriteKey_InLowerPatch_AfterFork": {
+			patchLayers: []map[string]*int{{"a": ptr.To(10)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.Fork()
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"DeleteCurrent_ExistingKey_InCurrentPatch": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_ExistingKey_InLowerPatch_AfterFork": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.Fork()
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_NonExistentKey_OnExistingEmptyLayer": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("x")
+			},
+			searchKey:     "x",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_NonExistentKey_InitiallyEmptyPatchSet": {
+			patchLayers: []map[string]*int{}, // Starts with no layers
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("x")
+			},
+			searchKey:     "x",
+			wantInCurrent: false,
+		},
+		"InCurrentPatch_KeySetInCurrent": {
+			patchLayers:    []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {},
+			searchKey:      "a",
+			wantInCurrent:  true,
+		},
+		"InCurrentPatch_KeySetInLower_NotInCurrent_AfterFork": {
+			patchLayers:    []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) { ps.Fork() },
+			searchKey:      "a",
+			wantInCurrent:  false,
+		},
+		"InCurrentPatch_KeyDeletedInCurrent": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false, // Get in current patch won't find it.
+		},
+		"InCurrentPatch_NonExistentKey_InitiallyEmptyPatchSet": {
+			patchLayers:    []map[string]*int{},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {},
+			searchKey:      "a",
+			wantInCurrent:  false,
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			tc.mutatePatchSet(ps)
+
+			inCurrent := ps.InCurrentPatch(tc.searchKey)
+			if inCurrent != tc.wantInCurrent {
+				t.Errorf("InCurrentPatch(%q): got %v, want %v", tc.searchKey, inCurrent, tc.wantInCurrent)
+			}
+		})
+	}
+}
+
+func TestPatchSetCache(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers     []map[string]*int
+		mutatePatchSet  func(ps *PatchSet[string, int])
+		wantCache       map[string]*int
+		wantCacheInSync bool
+	}{
+		"Initial_EmptyPatchSet": {
+			patchLayers:     []map[string]*int{},
+			mutatePatchSet:  func(ps *PatchSet[string, int]) {},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"Initial_WithData_NoCacheAccess": {
+			patchLayers:     []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet:  func(ps *PatchSet[string, int]) {},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"FindValue_PopulatesCacheForKey": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"FindValue_DeletedKey_PopulatesCacheWithNil": {
+			patchLayers: []map[string]*int{{"a": nil, "b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+			},
+			wantCache:       map[string]*int{"a": nil},
+			wantCacheInSync: false,
+		},
+		"AsMap_PopulatesAndSyncsCache": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": nil, "c": ptr.To(3)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "c": ptr.To(3)}, // Cache does not necessarily track deletions like 'b' key
+			wantCacheInSync: true,
+		},
+		"SetCurrent_UpdatesCache_NewKey": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("x", 10)
+			},
+			wantCache:       map[string]*int{"x": ptr.To(10)},
+			wantCacheInSync: false,
+		},
+		"SetCurrent_UpdatesCache_OverwriteKey": {
+			patchLayers: []map[string]*int{{"x": ptr.To(5)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("x")
+				ps.SetCurrent("x", 10)
+			},
+			wantCache:       map[string]*int{"x": ptr.To(10)},
+			wantCacheInSync: false,
+		},
+		"DeleteCurrent_UpdatesCache": {
+			patchLayers: []map[string]*int{{"x": ptr.To(5)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("x")
+				ps.DeleteCurrent("x")
+			},
+			wantCache:       map[string]*int{"x": nil},
+			wantCacheInSync: false,
+		},
+		"Revert_ClearsAffectedCacheEntries_And_SetsCacheNotInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2), "a": ptr.To(11)}}, // Layer 0: a=1; Layer 1: b=2, a=11
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.FindValue("b")
+				ps.Revert()
+			},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"Revert_OnSyncedCache_SetsCacheNotInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Revert()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"Commit_DoesNotInvalidateCache_IfValuesConsistent": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.FindValue("b")
+				ps.Commit()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "b": ptr.To(2)},
+			wantCacheInSync: false,
+		},
+		"Commit_OnSyncedCache_KeepsCacheInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Commit()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "b": ptr.To(2)},
+			wantCacheInSync: true,
+		},
+		"Fork_DoesNotInvalidateCache": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.Fork()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"Fork_OnSyncedCache_KeepsCacheInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Fork()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: true,
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			tc.mutatePatchSet(ps)
+
+			if !maps.EqualFunc(ps.cache, tc.wantCache, func(a, b *int) bool {
+				if a == nil && b == nil {
+					return true
+				}
+				if a == nil || b == nil {
+					return false
+				}
+				return *a == *b
+			}) {
+				t.Errorf("Cache content mismatch: got %v, want %v", ps.cache, tc.wantCache)
+			}
+
+			if ps.cacheInSync != tc.wantCacheInSync {
+				t.Errorf("cacheInSync status mismatch: got %v, want %v", ps.cacheInSync, tc.wantCacheInSync)
+			}
+		})
+	}
+}
+
+func buildTestPatchSet[K comparable, V any](t *testing.T, patchLayers []map[K]*V) *PatchSet[K, V] {
+	t.Helper()
+
+	patchesNumber := len(patchLayers)
+	patches := make([]*Patch[K, V], patchesNumber)
+	for i := 0; i < patchesNumber; i++ {
+		layerMap := patchLayers[i]
+		currentPatch := NewPatch[K, V]()
+		for k, vPtr := range layerMap {
+			if vPtr != nil {
+				currentPatch.Set(k, *vPtr)
+			} else {
+				currentPatch.Delete(k)
+			}
+		}
+		patches[i] = currentPatch
+	}
+
+	return NewPatchSet(patches...)
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
index 7c6cddac31fd31e62200fa57453f6a511c180db5..02e02562b93bbca44af1ff2d1099552a095fffb2 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
@@ -48,10 +48,10 @@ func NewProvider(claims allObjectsLister[*resourceapi.ResourceClaim], slices all
 }
 
 // Snapshot returns a snapshot of all DRA resources at a ~single point in time.
-func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
+func (p *Provider) Snapshot() (*drasnapshot.Snapshot, error) {
 	claims, err := p.resourceClaims.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	claimMap := make(map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim)
 	for _, claim := range claims {
@@ -60,7 +60,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	slices, err := p.resourceSlices.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	slicesMap := make(map[string][]*resourceapi.ResourceSlice)
 	var nonNodeLocalSlices []*resourceapi.ResourceSlice
@@ -74,7 +74,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	classes, err := p.deviceClasses.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	classMap := make(map[string]*resourceapi.DeviceClass)
 	for _, class := range classes {
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
index d8ff91cab1b1a79ea24fe11d5616bfc067eead3a..1e6e608004b29dae708e9726d21ed867a9047471 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
@@ -61,7 +61,7 @@ func TestProviderSnapshot(t *testing.T) {
 		triggerSlicesError  bool
 		classes             []*resourceapi.DeviceClass
 		triggerClassesError bool
-		wantSnapshot        drasnapshot.Snapshot
+		wantSnapshot        *drasnapshot.Snapshot
 		wantErr             error
 	}{
 		{
@@ -133,7 +133,7 @@ func TestProviderSnapshot(t *testing.T) {
 			if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): unexpected error (-want +got): %s", diff)
 			}
-			if diff := cmp.Diff(tc.wantSnapshot, snapshot, cmp.AllowUnexported(drasnapshot.Snapshot{}), cmpopts.EquateEmpty()); diff != "" {
+			if diff := cmp.Diff(tc.wantSnapshot, snapshot, drasnapshot.SnapshotFlattenedComparer()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): snapshot differs from expected (-want +got): %s", diff)
 			}
 		})
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
index f9e2a3f2b5ba7bdd676cf6bb4ab78e8e28f6da5b..cc9007e213310086613e4ea33f4e09aadf413b04 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
@@ -18,13 +18,16 @@ package snapshot
 
 import (
 	"fmt"
+	"maps"
 
 	apiv1 "k8s.io/api/core/v1"
 	resourceapi "k8s.io/api/resource/v1beta1"
 	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/common"
 	drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/klog/v2"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -43,54 +46,67 @@ func GetClaimId(claim *resourceapi.ResourceClaim) ResourceClaimId {
 // obtained via the Provider. Then, it can be modified using the exposed methods, to simulate scheduling actions
 // in the cluster.
 type Snapshot struct {
-	resourceClaimsById         map[ResourceClaimId]*resourceapi.ResourceClaim
-	resourceSlicesByNodeName   map[string][]*resourceapi.ResourceSlice
-	nonNodeLocalResourceSlices []*resourceapi.ResourceSlice
-	deviceClasses              map[string]*resourceapi.DeviceClass
+	resourceClaims *common.PatchSet[ResourceClaimId, *resourceapi.ResourceClaim]
+	resourceSlices *common.PatchSet[string, []*resourceapi.ResourceSlice]
+	deviceClasses  *common.PatchSet[string, *resourceapi.DeviceClass]
 }
 
+// nonNodeLocalResourceSlicesIdentifier is a special key used in the resourceSlices patchSet
+// to store ResourceSlices that apply do not apply to specific nodes. The string itself is
+// using a value which kubernetes node names cannot possibly have to avoid having collisions.
+const nonNodeLocalResourceSlicesIdentifier = "--NON-LOCAL--"
+
 // NewSnapshot returns a Snapshot created from the provided data.
-func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, globalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) Snapshot {
-	if claims == nil {
-		claims = map[ResourceClaimId]*resourceapi.ResourceClaim{}
-	}
-	if nodeLocalSlices == nil {
-		nodeLocalSlices = map[string][]*resourceapi.ResourceSlice{}
-	}
-	if deviceClasses == nil {
-		deviceClasses = map[string]*resourceapi.DeviceClass{}
+func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, nonNodeLocalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) *Snapshot {
+	slices := make(map[string][]*resourceapi.ResourceSlice, len(nodeLocalSlices)+1)
+	maps.Copy(slices, nodeLocalSlices)
+	slices[nonNodeLocalResourceSlicesIdentifier] = nonNodeLocalSlices
+
+	claimsPatch := common.NewPatchFromMap(claims)
+	slicesPatch := common.NewPatchFromMap(slices)
+	devicesPatch := common.NewPatchFromMap(deviceClasses)
+	return &Snapshot{
+		resourceClaims: common.NewPatchSet(claimsPatch),
+		resourceSlices: common.NewPatchSet(slicesPatch),
+		deviceClasses:  common.NewPatchSet(devicesPatch),
 	}
-	return Snapshot{
-		resourceClaimsById:         claims,
-		resourceSlicesByNodeName:   nodeLocalSlices,
-		nonNodeLocalResourceSlices: globalSlices,
-		deviceClasses:              deviceClasses,
+}
+
+// NewEmptySnapshot returns a zero initialized Snapshot.
+func NewEmptySnapshot() *Snapshot {
+	claimsPatch := common.NewPatch[ResourceClaimId, *resourceapi.ResourceClaim]()
+	slicesPatch := common.NewPatch[string, []*resourceapi.ResourceSlice]()
+	devicesPatch := common.NewPatch[string, *resourceapi.DeviceClass]()
+	return &Snapshot{
+		resourceClaims: common.NewPatchSet(claimsPatch),
+		resourceSlices: common.NewPatchSet(slicesPatch),
+		deviceClasses:  common.NewPatchSet(devicesPatch),
 	}
 }
 
 // ResourceClaims exposes the Snapshot as schedulerframework.ResourceClaimTracker, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
-	return snapshotClaimTracker(s)
+func (s *Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
+	return snapshotClaimTracker{snapshot: s}
 }
 
 // ResourceSlices exposes the Snapshot as schedulerframework.ResourceSliceLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
-	return snapshotSliceLister(s)
+func (s *Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
+	return snapshotSliceLister{snapshot: s}
 }
 
 // DeviceClasses exposes the Snapshot as schedulerframework.DeviceClassLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
-	return snapshotClassLister(s)
+func (s *Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
+	return snapshotClassLister{snapshot: s}
 }
 
 // WrapSchedulerNodeInfo wraps the provided *schedulerframework.NodeInfo into an internal *framework.NodeInfo, adding
 // dra information. Node-local ResourceSlices are added to the NodeInfo, and all ResourceClaims referenced by each Pod
 // are added to each PodInfo. Returns an error if any of the Pods is missing a ResourceClaim.
-func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
-	podExtraInfos := map[types.UID]framework.PodExtraInfo{}
+func (s *Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
+	podExtraInfos := make(map[types.UID]framework.PodExtraInfo, len(schedNodeInfo.Pods))
 	for _, pod := range schedNodeInfo.Pods {
 		podClaims, err := s.PodClaims(pod.Pod)
 		if err != nil {
@@ -104,161 +120,251 @@ func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeIn
 	return framework.WrapSchedulerNodeInfo(schedNodeInfo, nodeSlices, podExtraInfos), nil
 }
 
-// Clone returns a copy of this Snapshot that can be independently modified without affecting this Snapshot.
-// The only mutable objects in the Snapshot are ResourceClaims, so they are deep-copied. The rest is only a
-// shallow copy.
-func (s Snapshot) Clone() Snapshot {
-	result := Snapshot{
-		resourceClaimsById:       map[ResourceClaimId]*resourceapi.ResourceClaim{},
-		resourceSlicesByNodeName: map[string][]*resourceapi.ResourceSlice{},
-		deviceClasses:            map[string]*resourceapi.DeviceClass{},
-	}
-	// The claims are mutable, they have to be deep-copied.
-	for id, claim := range s.resourceClaimsById {
-		result.resourceClaimsById[id] = claim.DeepCopy()
-	}
-	// The rest of the objects aren't mutable, so a shallow copy should be enough.
-	for nodeName, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result.resourceSlicesByNodeName[nodeName] = append(result.resourceSlicesByNodeName[nodeName], slice)
-		}
-	}
-	for _, slice := range s.nonNodeLocalResourceSlices {
-		result.nonNodeLocalResourceSlices = append(result.nonNodeLocalResourceSlices, slice)
-	}
-	for className, class := range s.deviceClasses {
-		result.deviceClasses[className] = class
-	}
-	return result
-}
-
 // AddClaims adds additional ResourceClaims to the Snapshot. It can be used e.g. if we need to duplicate a Pod that
 // owns ResourceClaims. Returns an error if any of the claims is already tracked in the snapshot.
-func (s Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
+func (s *Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
 	for _, claim := range newClaims {
-		if _, found := s.resourceClaimsById[GetClaimId(claim)]; found {
+		if _, found := s.resourceClaims.FindValue(GetClaimId(claim)); found {
 			return fmt.Errorf("claim %s/%s already tracked in the snapshot", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range newClaims {
-		s.resourceClaimsById[GetClaimId(claim)] = claim
+		s.resourceClaims.SetCurrent(GetClaimId(claim), claim)
 	}
+
 	return nil
 }
 
 // PodClaims returns ResourceClaims objects for all claims referenced by the Pod. If any of the referenced claims
 // isn't tracked in the Snapshot, an error is returned.
-func (s Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-
-	for _, claimRef := range pod.Spec.ResourceClaims {
-		claim, err := s.claimForPod(pod, claimRef)
-		if err != nil {
-			return nil, fmt.Errorf("error while obtaining ResourceClaim %s for pod %s/%s: %v", claimRef.Name, pod.Namespace, pod.Name, err)
-		}
-		result = append(result, claim)
-	}
-
-	return result, nil
+func (s *Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
+	return s.findPodClaims(pod, false)
 }
 
 // RemovePodOwnedClaims iterates over all the claims referenced by the Pod, and removes the ones owned by the Pod from the Snapshot.
 // Claims referenced by the Pod but not owned by it are not removed, but the Pod's reservation is removed from them.
 // This method removes all relevant claims that are in the snapshot, and doesn't error out if any of the claims are missing.
-func (s Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
-	for _, podClaimRef := range pod.Spec.ResourceClaims {
-		claimName := claimRefToName(pod, podClaimRef)
-		if claimName == "" {
-			// This most likely means that the Claim hasn't actually been created. Nothing to remove/modify, so continue to the next claim.
-			continue
-		}
-		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
-		claim, found := s.resourceClaimsById[claimId]
-		if !found {
-			// The claim isn't tracked in the snapshot for some reason. Nothing to remove/modify, so continue to the next claim.
-			continue
-		}
+func (s *Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
+	claims, err := s.findPodClaims(pod, true)
+	if err != nil {
+		klog.Errorf("Snapshot.RemovePodOwnedClaims ignored an error: %s", err)
+	}
+
+	for _, claim := range claims {
+		claimId := GetClaimId(claim)
 		if err := resourceclaim.IsForPod(pod, claim); err == nil {
-			delete(s.resourceClaimsById, claimId)
-		} else {
-			drautils.ClearPodReservationInPlace(claim, pod)
+			s.resourceClaims.DeleteCurrent(claimId)
+			continue
 		}
+
+		claim := s.ensureClaimWritable(claim)
+		drautils.ClearPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 }
 
 // ReservePodClaims adds a reservation for the provided Pod to all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, or if any of the claims are already at maximum reservation count, an error is
 // returned.
-func (s Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
 		if drautils.ClaimFullyReserved(claim) && !resourceclaim.IsReservedForPod(pod, claim) {
 			return fmt.Errorf("claim %s/%s already has max number of reservations set, can't add more", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range claims {
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.AddPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
+
 	return nil
 }
 
 // UnreservePodClaims removes reservations for the provided Pod from all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, an error is returned. If a claim is owned by the pod, or if the claim has no more reservations,
 // its allocation is cleared.
-func (s Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
-		podOwnedClaim := resourceclaim.IsForPod(pod, claim) == nil
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.ClearPodReservationInPlace(claim, pod)
-		if podOwnedClaim || !drautils.ClaimInUse(claim) {
+		if err := resourceclaim.IsForPod(pod, claim); err == nil || !drautils.ClaimInUse(claim) {
 			drautils.DeallocateClaimInPlace(claim)
 		}
+
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 	return nil
 }
 
 // NodeResourceSlices returns all node-local ResourceSlices for the given Node.
-func (s Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
-	slices, found := s.resourceSlicesByNodeName[nodeName]
+func (s *Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
+	slices, found := s.resourceSlices.FindValue(nodeName)
 	return slices, found
 }
 
 // AddNodeResourceSlices adds additional node-local ResourceSlices to the Snapshot. This should be used whenever a Node with
 // node-local ResourceSlices is duplicated in the cluster snapshot.
-func (s Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
-	if _, alreadyInSnapshot := s.resourceSlicesByNodeName[nodeName]; alreadyInSnapshot {
+func (s *Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
+	if _, alreadyInSnapshot := s.NodeResourceSlices(nodeName); alreadyInSnapshot {
 		return fmt.Errorf("node %q ResourceSlices already present", nodeName)
 	}
-	s.resourceSlicesByNodeName[nodeName] = slices
+
+	s.resourceSlices.SetCurrent(nodeName, slices)
 	return nil
 }
 
-// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given nodeName.
+// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given node name.
 // It's a no-op if there aren't any slices to remove.
-func (s Snapshot) RemoveNodeResourceSlices(nodeName string) {
-	delete(s.resourceSlicesByNodeName, nodeName)
+func (s *Snapshot) RemoveNodeResourceSlices(nodeName string) {
+	s.resourceSlices.DeleteCurrent(nodeName)
+}
+
+// Commit persists changes done in the topmost layer merging them into the one below it.
+func (s *Snapshot) Commit() {
+	s.deviceClasses.Commit()
+	s.resourceClaims.Commit()
+	s.resourceSlices.Commit()
+}
+
+// Revert removes the topmost patch layer for all resource types, discarding
+// any modifications or deletions recorded there.
+func (s *Snapshot) Revert() {
+	s.deviceClasses.Revert()
+	s.resourceClaims.Revert()
+	s.resourceSlices.Revert()
+}
+
+// Fork adds a new, empty patch layer to the top of the stack for all
+// resource types. Subsequent modifications will be recorded in this
+// new layer until Commit() or Revert() are invoked.
+func (s *Snapshot) Fork() {
+	s.deviceClasses.Fork()
+	s.resourceClaims.Fork()
+	s.resourceSlices.Fork()
+}
+
+// listDeviceClasses retrieves all effective DeviceClasses from the snapshot.
+func (s *Snapshot) listDeviceClasses() []*resourceapi.DeviceClass {
+	deviceClasses := s.deviceClasses.AsMap()
+	deviceClassesList := make([]*resourceapi.DeviceClass, 0, len(deviceClasses))
+	for _, class := range deviceClasses {
+		deviceClassesList = append(deviceClassesList, class)
+	}
+
+	return deviceClassesList
+}
+
+// listResourceClaims retrieves all effective ResourceClaims from the snapshot.
+func (s *Snapshot) listResourceClaims() []*resourceapi.ResourceClaim {
+	claims := s.resourceClaims.AsMap()
+	claimsList := make([]*resourceapi.ResourceClaim, 0, len(claims))
+	for _, claim := range claims {
+		claimsList = append(claimsList, claim)
+	}
+
+	return claimsList
+}
+
+// configureResourceClaim updates or adds a ResourceClaim in the current patch layer.
+// This is typically used internally when a claim's state (like allocation) changes.
+func (s *Snapshot) configureResourceClaim(claim *resourceapi.ResourceClaim) {
+	claimId := ResourceClaimId{Name: claim.Name, Namespace: claim.Namespace}
+	s.resourceClaims.SetCurrent(claimId, claim)
+}
+
+// getResourceClaim retrieves a specific ResourceClaim by its ID from the snapshot.
+func (s *Snapshot) getResourceClaim(claimId ResourceClaimId) (*resourceapi.ResourceClaim, bool) {
+	return s.resourceClaims.FindValue(claimId)
 }
 
-func (s Snapshot) claimForPod(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) (*resourceapi.ResourceClaim, error) {
-	claimName := claimRefToName(pod, claimRef)
-	if claimName == "" {
-		return nil, fmt.Errorf("couldn't determine ResourceClaim name")
+// listResourceSlices retrieves all effective ResourceSlices from the snapshot.
+func (s *Snapshot) listResourceSlices() []*resourceapi.ResourceSlice {
+	resourceSlices := s.resourceSlices.AsMap()
+	resourceSlicesList := make([]*resourceapi.ResourceSlice, 0, len(resourceSlices))
+	for _, nodeSlices := range resourceSlices {
+		resourceSlicesList = append(resourceSlicesList, nodeSlices...)
 	}
 
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: pod.Namespace}]
-	if !found {
-		return nil, fmt.Errorf("couldn't find ResourceClaim %q", claimName)
+	return resourceSlicesList
+}
+
+// getDeviceClass retrieves a specific DeviceClass by its name from the snapshot.
+func (s *Snapshot) getDeviceClass(className string) (*resourceapi.DeviceClass, bool) {
+	return s.deviceClasses.FindValue(className)
+}
+
+// findPodClaims retrieves all ResourceClaim objects referenced by a given pod.
+// If ignoreNotTracked is true, it skips claims not found in the snapshot; otherwise, it returns an error.
+func (s *Snapshot) findPodClaims(pod *apiv1.Pod, ignoreNotTracked bool) ([]*resourceapi.ResourceClaim, error) {
+	result := make([]*resourceapi.ResourceClaim, len(pod.Spec.ResourceClaims))
+	for claimIndex, claimRef := range pod.Spec.ResourceClaims {
+		claimName := claimRefToName(pod, claimRef)
+		if claimName == "" {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't determine ResourceClaim name",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
+		claim, found := s.resourceClaims.FindValue(claimId)
+		if !found {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't find ResourceClaim in the snapshot",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		result[claimIndex] = claim
+	}
+
+	return result, nil
+}
+
+// ensureClaimWritable returns a resource claim suitable for inplace modifications,
+// in case if requested claim is stored in the current patch - the same object
+// is returned, otherwise a deep-copy is created. This is required for resource claim
+// state changing operations to implement copy-on-write policy for inplace modifications
+// when there's no claim tracked on the current layer of the patchset.
+func (s *Snapshot) ensureClaimWritable(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
+	claimId := GetClaimId(claim)
+	if s.resourceClaims.InCurrentPatch(claimId) {
+		return claim
 	}
 
-	return claim, nil
+	return claim.DeepCopy()
 }
 
+// claimRefToName determines the actual name of a ResourceClaim based on a PodResourceClaim reference.
+// It first checks if the name is directly specified in the reference. If not, it looks up the name
+// in the pod's status. Returns an empty string if the name cannot be determined.
 func claimRefToName(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) string {
 	if claimRef.ResourceClaimName != nil {
 		return *claimRef.ResourceClaimName
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
index c1dad60a12cae89e88651069488863244e2e2271..a277303243e414448b3980eb8d4a36015c89898a 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
@@ -25,51 +25,51 @@ import (
 	"k8s.io/dynamic-resource-allocation/structured"
 )
 
-type snapshotClaimTracker Snapshot
+type snapshotClaimTracker struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-	for _, claim := range s.resourceClaimsById {
-		result = append(result, claim)
-	}
-	return result, nil
+func (ct snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
+	return ct.snapshot.listResourceClaims(), nil
 }
 
-func (s snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: namespace}]
+func (ct snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
+	claimId := ResourceClaimId{Name: claimName, Namespace: namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return nil, fmt.Errorf("claim %s/%s not found", namespace, claimName)
 	}
 	return claim, nil
 }
 
-func (s snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
+func (ct snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
 	result := sets.New[structured.DeviceID]()
-	for _, claim := range s.resourceClaimsById {
+	for _, claim := range ct.snapshot.listResourceClaims() {
 		result = result.Union(claimAllocatedDevices(claim))
 	}
 	return result, nil
 }
 
-func (s snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
 	// The DRA scheduler plugin calls this at the end of the scheduling phase, in Reserve. Then, the allocation is persisted via an API
 	// call during the binding phase.
 	//
 	// In Cluster Autoscaler only the scheduling phase is run, so SignalClaimPendingAllocation() is used to obtain the allocation
 	// and persist it in-memory in the snapshot.
-	ref := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
-	claim, found := s.resourceClaimsById[ref]
+	claimId := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return fmt.Errorf("claim %s/%s not found", allocatedClaim.Namespace, allocatedClaim.Name)
 	}
 	if claim.UID != claimUid {
 		return fmt.Errorf("claim %s/%s: snapshot has UID %q, allocation came for UID %q - shouldn't happenn", allocatedClaim.Namespace, allocatedClaim.Name, claim.UID, claimUid)
 	}
-	s.resourceClaimsById[ref] = allocatedClaim
+
+	ct.snapshot.configureResourceClaim(allocatedClaim)
 	return nil
 }
 
-func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
+func (ct snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
 	// The DRA scheduler plugin calls this at the beginning of Filter, and fails the filter if true is returned to handle race conditions.
 	//
 	// In the scheduler implementation, ClaimHasPendingAllocation() starts answering true after SignalClaimPendingAllocation()
@@ -81,19 +81,19 @@ func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool
 	return false
 }
 
-func (s snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
+func (ct snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.RemoveClaimPendingAllocation() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumeClaimAfterAPICall() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
+func (ct snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumedClaimRestore() was called - this should never happen")
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
index 273d7c24aa48300871900d007c5faadbbae69dfd..b3d50e0badfd1d83d2adf3784cc068600d76b5f1 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
@@ -170,7 +170,7 @@ func TestSnapshotClaimTrackerListAllAllocatedDevices(t *testing.T) {
 				GetClaimId(allocatedClaim2): allocatedClaim2,
 				GetClaimId(claim3):          claim3,
 			},
-			wantDevices: sets.New[structured.DeviceID](
+			wantDevices: sets.New(
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-1"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-2"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-3"),
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
index 7beaaba22763b67c671157fba458b71bf2efef66..39affd2847c2270eadaa553b0febf7c8f0696936 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
@@ -22,18 +22,16 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotClassLister Snapshot
+type snapshotClassLister struct {
+	snapshot *Snapshot
+}
 
 func (s snapshotClassLister) List() ([]*resourceapi.DeviceClass, error) {
-	var result []*resourceapi.DeviceClass
-	for _, class := range s.deviceClasses {
-		result = append(result, class)
-	}
-	return result, nil
+	return s.snapshot.listDeviceClasses(), nil
 }
 
 func (s snapshotClassLister) Get(className string) (*resourceapi.DeviceClass, error) {
-	class, found := s.deviceClasses[className]
+	class, found := s.snapshot.getDeviceClass(className)
 	if !found {
 		return nil, fmt.Errorf("DeviceClass %q not found", className)
 	}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
index de14ded286e422a6e8423d7c2d953597517a6596..0cf5be3bf4915122d542e40876741f6ec874fe5c 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
@@ -20,15 +20,11 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotSliceLister Snapshot
+type snapshotSliceLister struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
-	var result []*resourceapi.ResourceSlice
-	for _, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result = append(result, slice)
-		}
-	}
-	result = append(result, s.nonNodeLocalResourceSlices...)
-	return result, nil
+// TODO(DRA): Actually handle the taint rules.
+func (sl snapshotSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) {
+	return sl.snapshot.listResourceSlices(), nil
 }
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister_test.go
index b5deb34d38bf3a8ef5cae968ffec1d0c2f5ebce9..8712aea795236c35300b2cc83c5fe0b0c189014e 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister_test.go
@@ -76,7 +76,7 @@ func TestSnapshotSliceListerList(t *testing.T) {
 		t.Run(tc.testName, func(t *testing.T) {
 			snapshot := NewSnapshot(nil, tc.localSlices, tc.globalSlices, nil)
 			var resourceSliceLister schedulerframework.ResourceSliceLister = snapshot.ResourceSlices()
-			slices, err := resourceSliceLister.List()
+			slices, err := resourceSliceLister.ListWithDeviceTaintRules()
 			if err != nil {
 				t.Fatalf("snapshotSliceLister.List(): got unexpected error: %v", err)
 			}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
index 386765313f7a4e4a12eb2f3a62f91b35ebcd54be..51d9dcd903fcda0ebe17eb351af212a6391bf30f 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
@@ -68,6 +68,9 @@ var (
 	pod2OwnClaim1 = drautils.TestClaimWithPodOwnership(pod2,
 		&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod2-ownClaim1-abc", UID: "pod2-ownClaim1-abc", Namespace: "default"}},
 	)
+
+	deviceClass1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class1", UID: "class1-uid"}}
+	deviceClass2 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class2", UID: "class2-uid"}}
 )
 
 func TestSnapshotResourceClaims(t *testing.T) {
@@ -79,7 +82,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 
 		claims map[ResourceClaimId]*resourceapi.ResourceClaim
 
-		claimsModFun        func(snapshot Snapshot) error
+		claimsModFun        func(snapshot *Snapshot) error
 		wantClaimsModFunErr error
 
 		pod              *apiv1.Pod
@@ -141,7 +144,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), sharedClaim1.DeepCopy()})
 			},
 			wantClaimsModFunErr: cmpopts.AnyError,
@@ -153,7 +156,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), pod2OwnClaim1.DeepCopy()}); err != nil {
 					return err
 				}
@@ -171,7 +174,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -189,7 +192,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -211,7 +214,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -234,7 +237,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -258,7 +261,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.ReservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -286,7 +289,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.UnreservePodClaims(pod1)
 			},
@@ -309,7 +312,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -338,7 +341,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1), nil),
 				GetClaimId(pod1OwnClaim2): drautils.TestClaimWithAllocation(pod1OwnClaim2.DeepCopy(), nil),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -404,7 +407,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 	for _, tc := range []struct {
 		testName string
 
-		slicesModFun        func(snapshot Snapshot) error
+		slicesModFun        func(snapshot *Snapshot) error
 		wantSlicesModFunErr error
 
 		nodeName            string
@@ -426,7 +429,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a Node that already has slices tracked is an error",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1})
 			},
 			wantSlicesModFunErr: cmpopts.AnyError,
@@ -434,7 +437,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a new Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{extraNode3Slice1, extraNode3Slice2})
 			},
 			nodeName:            "node3",
@@ -444,7 +447,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for a non-existing Node is a no-op",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node3")
 				return nil
 			},
@@ -452,7 +455,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for an existing Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node2")
 				return nil
 			},
@@ -480,7 +483,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 			}
 
 			if tc.wantAllSlices != nil {
-				allSlices, err := snapshot.ResourceSlices().List()
+				allSlices, err := snapshot.ResourceSlices().ListWithDeviceTaintRules()
 				if err != nil {
 					t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
 				}
@@ -574,6 +577,7 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 			cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
 				cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
 				test.IgnoreObjectOrder[*resourceapi.ResourceClaim](), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()}
+
 			if diff := cmp.Diff(tc.wantNodeInfo, nodeInfo, cmpOpts...); diff != "" {
 				t.Errorf("Snapshot.WrapSchedulerNodeInfo(): unexpected output (-want +got): %s", diff)
 			}
@@ -581,121 +585,6 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 	}
 }
 
-func TestSnapshotClone(t *testing.T) {
-	for _, tc := range []struct {
-		testName           string
-		snapshot           Snapshot
-		cloneModFun        func(snapshot Snapshot) error
-		wantModifiedClaims []*resourceapi.ResourceClaim
-		wantModifiedSlices []*resourceapi.ResourceSlice
-	}{
-		{
-			testName: "empty snapshot",
-			snapshot: Snapshot{},
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1, node1Slice2})
-			},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node1Slice1, node1Slice2},
-		},
-		{
-			testName: "non-empty snapshot",
-			snapshot: NewSnapshot(
-				map[ResourceClaimId]*resourceapi.ResourceClaim{
-					GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
-					GetClaimId(sharedClaim2):  sharedClaim2.DeepCopy(),
-					GetClaimId(sharedClaim3):  drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
-					GetClaimId(pod2OwnClaim1): drautils.TestClaimWithPodOwnership(pod2, drautils.TestClaimWithPodReservations(pod2OwnClaim1, pod2)),
-				},
-				map[string][]*resourceapi.ResourceSlice{
-					"node1": {node1Slice1, node1Slice2},
-					"node2": {node2Slice1, node2Slice2},
-				},
-				[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2}, nil),
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{node3Slice1, node3Slice2}); err != nil {
-					return err
-				}
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				if err := snapshot.ReservePodClaims(pod1); err != nil {
-					return err
-				}
-				snapshot.RemovePodOwnedClaims(pod2)
-				snapshot.RemoveNodeResourceSlices("node1")
-				return nil
-			},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node2Slice1, node2Slice2, node3Slice1, node3Slice2, globalSlice1, globalSlice2},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{
-				drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
-				drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim1, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
-				sharedClaim3,
-			},
-		},
-	} {
-		t.Run(tc.testName, func(t *testing.T) {
-			// Grab the initial state of the snapshot to verify that it doesn't change when the clone is modified.
-			initialClaims, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlices, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-
-			// Clone and verify that the clone is identical to the original.
-			snapshotClone := tc.snapshot.Clone()
-			if diff := cmp.Diff(tc.snapshot, snapshotClone, cmpopts.EquateEmpty(), cmp.AllowUnexported(Snapshot{}, framework.NodeInfo{}, schedulerframework.NodeInfo{})); diff != "" {
-				t.Fatalf("Snapshot.Clone(): snapshot not identical after cloning (-want +got): %s", diff)
-			}
-
-			// Modify the clone.
-			if err := tc.cloneModFun(snapshotClone); err != nil {
-				t.Fatalf("Snapshot: unexpected error during snapshot modification: %v", err)
-			}
-
-			// Verify that the clone changed as expected.
-			modifiedClaims, err := snapshotClone.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			modifiedSlices, err := snapshotClone.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(tc.wantModifiedClaims, modifiedClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceClaim state after modifications (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(tc.wantModifiedSlices, modifiedSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceSlice state after modifications (-want +got): %s", diff)
-			}
-
-			// Verify that the original hasn't changed during clone modifications.
-			initialClaimsAfterCloneMod, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlicesAfterCloneMod, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(initialClaims, initialClaimsAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: ResourceClaim state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(initialSlices, initialSlicesAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: ResourceSlice state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-		})
-	}
-}
-
 func testPods(count int) []*apiv1.Pod {
 	var result []*apiv1.Pod
 	for i := range count {
@@ -703,3 +592,179 @@ func testPods(count int) []*apiv1.Pod {
 	}
 	return result
 }
+
+func TestSnapshotForkCommitRevert(t *testing.T) {
+	initialClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
+	}
+	initialDeviceClasses := map[string]*resourceapi.DeviceClass{deviceClass1.Name: deviceClass1.DeepCopy(), deviceClass2.Name: deviceClass2.DeepCopy()}
+	initialLocalSlices := map[string][]*resourceapi.ResourceSlice{node1Slice1.Spec.NodeName: {node1Slice1.DeepCopy()}}
+	initialGlobalSlices := []*resourceapi.ResourceSlice{globalSlice1.DeepCopy(), globalSlice2.DeepCopy()}
+	initialState := NewSnapshot(initialClaims, initialLocalSlices, initialGlobalSlices, initialDeviceClasses)
+
+	addedClaim := sharedClaim2.DeepCopy()
+	addedNodeSlice := node2Slice1.DeepCopy()
+	podToReserve := pod1.DeepCopy()
+
+	modifiedClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, podToReserve),
+		GetClaimId(sharedClaim2):  drautils.TestClaimWithPodReservations(addedClaim, podToReserve),
+		GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, podToReserve),
+		GetClaimId(pod1OwnClaim2): drautils.TestClaimWithPodReservations(pod1OwnClaim2, podToReserve),
+	}
+	modifiedLocalSlices := map[string][]*resourceapi.ResourceSlice{addedNodeSlice.Spec.NodeName: {addedNodeSlice.DeepCopy()}}
+	// Expected state after modifications are applied
+	modifiedState := NewSnapshot(
+		modifiedClaims,
+		modifiedLocalSlices,
+		initialGlobalSlices,
+		initialDeviceClasses,
+	)
+
+	applyModifications := func(t *testing.T, s *Snapshot) {
+		t.Helper()
+
+		addedSlices := []*resourceapi.ResourceSlice{addedNodeSlice.DeepCopy()}
+		if err := s.AddNodeResourceSlices(addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
+			t.Fatalf("failed to add %s resource slices: %v", addedNodeSlice.Spec.NodeName, err)
+		}
+		if err := s.AddClaims([]*resourceapi.ResourceClaim{addedClaim}); err != nil {
+			t.Fatalf("failed to add %s claim: %v", addedClaim.Name, err)
+		}
+		if err := s.ReservePodClaims(podToReserve); err != nil {
+			t.Fatalf("failed to reserve claim %s for pod %s: %v", sharedClaim1.Name, podToReserve.Name, err)
+		}
+
+		s.RemoveNodeResourceSlices(node1Slice1.Spec.NodeName)
+	}
+
+	compareSnapshots := func(t *testing.T, want, got *Snapshot, msg string) {
+		t.Helper()
+		if diff := cmp.Diff(want, got, SnapshotFlattenedComparer(), cmpopts.EquateEmpty()); diff != "" {
+			t.Errorf("%s: Snapshot state mismatch (-want +got):\n%s", msg, diff)
+		}
+	}
+
+	t.Run("Fork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork and Modify")
+	})
+
+	t.Run("ForkRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+	})
+
+	t.Run("ForkCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit")
+	})
+
+	t.Run("ForkForkRevertRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+
+		// Apply further modifications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Revert() // Revert first fork
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Revert")
+	})
+
+	t.Run("ForkForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit() // Commit second fork into first fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Fork, Modify, Commit")
+
+		snapshot.Revert() // Revert first fork (which now contains committed changes)
+		compareSnapshots(t, initialState, snapshot, "After Fork, Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkForkRevertCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+		// Apply further mofications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Commit() // Commit first fork (with original modifications)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Commit")
+	})
+
+	t.Run("CommitNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Commit() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Commit with no Fork")
+	})
+
+	t.Run("RevertNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Revert() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Revert with no Fork")
+	})
+
+	t.Run("ForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		// Now try to revert the committed changes (should be no-op as only base layer exists)
+		snapshot.Revert()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkRevertFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+
+		snapshot.Fork() // Fork again from the reverted (initial) state
+		differentClaim := sharedClaim3.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{differentClaim}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+
+		expectedState := CloneTestSnapshot(initialState)
+		expectedState.AddClaims([]*resourceapi.ResourceClaim{differentClaim.DeepCopy()}) // Apply same change to expected state
+
+		compareSnapshots(t, expectedState, snapshot, "After Fork, Modify, Revert, Fork, Modify")
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a8f2cc32bfca80258856d4c5dbef15f96883939
--- /dev/null
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshot
+
+import (
+	"github.com/google/go-cmp/cmp"
+	resourceapi "k8s.io/api/resource/v1beta1"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/common"
+)
+
+// CloneTestSnapshot creates a deep copy of the provided Snapshot.
+// This function is intended for testing purposes only.
+func CloneTestSnapshot(snapshot *Snapshot) *Snapshot {
+	cloneString := func(s string) string { return s }
+	cloneResourceClaimId := func(rc ResourceClaimId) ResourceClaimId { return rc }
+	cloneDeviceClass := func(dc *resourceapi.DeviceClass) *resourceapi.DeviceClass { return dc.DeepCopy() }
+	cloneResourceClaim := func(rc *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { return rc.DeepCopy() }
+	cloneResourceSlices := func(rcs []*resourceapi.ResourceSlice) []*resourceapi.ResourceSlice {
+		clone := make([]*resourceapi.ResourceSlice, len(rcs))
+		for i := range rcs {
+			clone[i] = rcs[i].DeepCopy()
+		}
+		return clone
+	}
+
+	deviceClasses := common.ClonePatchSet(snapshot.deviceClasses, cloneString, cloneDeviceClass)
+	resourceSlices := common.ClonePatchSet(snapshot.resourceSlices, cloneString, cloneResourceSlices)
+	resourceClaims := common.ClonePatchSet(snapshot.resourceClaims, cloneResourceClaimId, cloneResourceClaim)
+
+	return &Snapshot{
+		deviceClasses:  deviceClasses,
+		resourceSlices: resourceSlices,
+		resourceClaims: resourceClaims,
+	}
+}
+
+// SnapshotFlattenedComparer returns a cmp.Option that provides a custom comparer function
+// for comparing two *Snapshot objects based on their underlying data maps, it doesn't
+// compare the underlying patchsets, instead flattened objects are compared.
+// This function is intended for testing purposes only.
+func SnapshotFlattenedComparer() cmp.Option {
+	return cmp.Comparer(func(a, b *Snapshot) bool {
+		if a == nil || b == nil {
+			return a == b
+		}
+
+		devicesEqual := cmp.Equal(a.deviceClasses.AsMap(), b.deviceClasses.AsMap())
+		slicesEqual := cmp.Equal(a.resourceSlices.AsMap(), b.resourceSlices.AsMap())
+		claimsEqual := cmp.Equal(a.resourceClaims.AsMap(), b.resourceClaims.AsMap())
+
+		return devicesEqual && slicesEqual && claimsEqual
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
index 09cffb6d88f2b69887dc63ab78531169682dce4b..68d029b5b160b59f41be0d9bfc3bfac1979f8c91 100644
--- a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
+++ b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
@@ -23,7 +23,7 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/component-helpers/scheduling/corev1"
-	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/dynamic-resource-allocation/resourceclaim"
 )
 
 // ClaimAllocated returns whether the provided claim is allocated.
diff --git a/cluster-autoscaler/simulator/framework/delegating_shared_lister.go b/cluster-autoscaler/simulator/framework/delegating_shared_lister.go
index 9b1ab0eb17c2af33c56c24e21df27c58a51322fc..1a4c8c09e43627cb51f24f08ce4119aa6b062229 100644
--- a/cluster-autoscaler/simulator/framework/delegating_shared_lister.go
+++ b/cluster-autoscaler/simulator/framework/delegating_shared_lister.go
@@ -145,7 +145,7 @@ func (u unsetResourceClaimTracker) AssumedClaimRestore(namespace, claimName stri
 	klog.Errorf("lister not set in delegate")
 }
 
-func (u unsetResourceSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
+func (u unsetResourceSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) {
 	return nil, fmt.Errorf("lister not set in delegate")
 }
 
diff --git a/cluster-autoscaler/utils/test/test_utils.go b/cluster-autoscaler/utils/test/test_utils.go
index 026a4e41266e74fe6877747897a6fca1121b9861..984924715317465bce2d98cd7fcf587269db2ec7 100644
--- a/cluster-autoscaler/utils/test/test_utils.go
+++ b/cluster-autoscaler/utils/test/test_utils.go
@@ -160,8 +160,8 @@ func WithHostPort(hostport int32) func(*apiv1.Pod) {
 	}
 }
 
-// WithMaxSkew sets a namespace to the pod.
-func WithMaxSkew(maxSkew int32, topologySpreadingKey string) func(*apiv1.Pod) {
+// WithMaxSkew sets a topology spread constraint to the pod.
+func WithMaxSkew(maxSkew int32, topologySpreadingKey string, minDomains int32) func(*apiv1.Pod) {
 	return func(pod *apiv1.Pod) {
 		if maxSkew > 0 {
 			pod.Spec.TopologySpreadConstraints = []apiv1.TopologySpreadConstraint{
@@ -174,6 +174,7 @@ func WithMaxSkew(maxSkew int32, topologySpreadingKey string) func(*apiv1.Pod) {
 							"app": "estimatee",
 						},
 					},
+					MinDomains: &minDomains,
 				},
 			}
 		}
diff --git a/cluster-autoscaler/version/version.go b/cluster-autoscaler/version/version.go
index 069e16bee61d38372c611dadc6c182d05b3c9ace..3bf124c96459eb9673e44b23a265952f9eabe194 100644
--- a/cluster-autoscaler/version/version.go
+++ b/cluster-autoscaler/version/version.go
@@ -17,4 +17,4 @@ limitations under the License.
 package version
 
 // ClusterAutoscalerVersion contains version of CA.
-const ClusterAutoscalerVersion = "1.33.0-beta.0"
+const ClusterAutoscalerVersion = "1.34.0-alpha.0"
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/Dockerfile b/vertical-pod-autoscaler/pkg/admission-controller/Dockerfile
index e9687cd9c86b245bd899fce4892868276ee62527..c2b41153ce214a8fb48434a3e991f92296c31913 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/Dockerfile
+++ b/vertical-pod-autoscaler/pkg/admission-controller/Dockerfile
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=$BUILDPLATFORM golang:1.24.3 AS builder
+FROM --platform=$BUILDPLATFORM golang:1.24.4 AS builder
 
 WORKDIR /workspace
 
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
index e68ddc79dbc575b565dcdff2f9c03a44d6618406..5cb9cbd96c5068e71685c57e513324a1f9e013c3 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go
@@ -118,7 +118,7 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa
 		klog.V(2).InfoS("Can't calculate recommendations, one of VPA or Pod is nil", "vpa", vpa, "pod", pod)
 		return nil, nil, nil
 	}
-	klog.V(2).InfoS("Updating requirements for pod", "pod", pod.Name)
+	klog.V(2).InfoS("Updating requirements for pod", "pod", klog.KObj(pod))
 
 	var annotations vpa_api_util.ContainerToAnnotationsMap
 	recommendedPodResources := &vpa_types.RecommendedPodResources{}
diff --git a/vertical-pod-autoscaler/pkg/recommender/Dockerfile b/vertical-pod-autoscaler/pkg/recommender/Dockerfile
index 7727bc5c0d8b03d3ad6dd60789ba510cdaa31a2c..5cf22afd0adc745fcd0626af6b2b798ba25bc3be 100644
--- a/vertical-pod-autoscaler/pkg/recommender/Dockerfile
+++ b/vertical-pod-autoscaler/pkg/recommender/Dockerfile
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=$BUILDPLATFORM golang:1.24.3 AS builder
+FROM --platform=$BUILDPLATFORM golang:1.24.4 AS builder
 
 WORKDIR /workspace
 
diff --git a/vertical-pod-autoscaler/pkg/updater/Dockerfile b/vertical-pod-autoscaler/pkg/updater/Dockerfile
index dcb90f427a9470ac26e785bb293e9b7d0966b42e..03ad71320ec0152bd66747dfb98e5b6bcbf47f20 100644
--- a/vertical-pod-autoscaler/pkg/updater/Dockerfile
+++ b/vertical-pod-autoscaler/pkg/updater/Dockerfile
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=$BUILDPLATFORM golang:1.24.3 AS builder
+FROM --platform=$BUILDPLATFORM golang:1.24.4 AS builder
 
 WORKDIR /workspace