diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
index 1ac006c6d06564f42f57dc2c59bef6cdf685204f..242caeb1a6b40b2a4d675e63dcffaaf0ab7653bf 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
@@ -26,7 +26,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/config"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -111,7 +110,7 @@ func TestFilterOutExpendable(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			processor := NewFilterOutExpendablePodListProcessor()
 			snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-			err := snapshot.SetClusterState(tc.nodes, nil, drasnapshot.Snapshot{})
+			err := snapshot.SetClusterState(tc.nodes, nil, nil)
 			assert.NoError(t, err)
 
 			pods, err := processor.Process(&context.AutoscalingContext{
diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
index 61303ed3a8d154abb3b78538dc60df6857abfc2e..5eb7db7b64e51ba791f723fb3db8d6c77dc43d91 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
@@ -27,7 +27,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -281,7 +280,7 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
 				}
 
 				clusterSnapshot := snapshotFactory()
-				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, drasnapshot.Snapshot{}); err != nil {
+				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, nil); err != nil {
 					assert.NoError(b, err)
 				}
 
diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator.go b/cluster-autoscaler/core/scaledown/actuation/actuator.go
index 2542b383667505d1c5634bae73935786b3f36fdc..55ef2e5a8fa66a1367965ac3aa1b6b286604659a 100644
--- a/cluster-autoscaler/core/scaledown/actuation/actuator.go
+++ b/cluster-autoscaler/core/scaledown/actuation/actuator.go
@@ -406,7 +406,7 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS
 	scheduledPods := kube_util.ScheduledPods(pods)
 	nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
+	var draSnapshot *drasnapshot.Snapshot
 	if a.ctx.DynamicResourceAllocationEnabled && a.ctx.DraProvider != nil {
 		draSnapshot, err = a.ctx.DraProvider.Snapshot()
 		if err != nil {
diff --git a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
index de00c8bb1180a4bf10640fd728b424da469d5b0b..41544693574b9496788e2dbdd15b3fca2a101a68 100644
--- a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
+++ b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
@@ -42,7 +42,7 @@ type testCase struct {
 	desc                        string
 	nodes                       []*apiv1.Node
 	pods                        []*apiv1.Pod
-	draSnapshot                 drasnapshot.Snapshot
+	draSnapshot                 *drasnapshot.Snapshot
 	draEnabled                  bool
 	wantUnneeded                []string
 	wantUnremovable             []*simulator.UnremovableNode
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
index 1ba94b3d6b7e0eb3bf171026cc22239b3abc9d9b..70be8d036a16f9616b2147ae30b7ba32aa8d7296 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
@@ -46,7 +46,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/status"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -1044,7 +1043,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
 	// build orchestrator
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1154,7 +1153,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1197,7 +1196,7 @@ func TestBinpackingLimiter(t *testing.T) {
 
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1257,7 +1256,7 @@ func TestScaleUpNoHelp(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1412,7 +1411,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
 			listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
 			ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1496,7 +1495,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
 			}
 			context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = context.ClusterSnapshot.SetClusterState(nodes, podList, drasnapshot.Snapshot{})
+			err = context.ClusterSnapshot.SetClusterState(nodes, podList, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1672,7 +1671,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*apiv1.Node{n1, n2}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 	processors := processorstest.NewTestProcessors(&context)
diff --git a/cluster-autoscaler/core/scaleup/resource/manager_test.go b/cluster-autoscaler/core/scaleup/resource/manager_test.go
index 280942601ef3822a2db7790f33894575bd595578..7d4573afed790dda206a1a912ae2900e0ee42676 100644
--- a/cluster-autoscaler/core/scaleup/resource/manager_test.go
+++ b/cluster-autoscaler/core/scaleup/resource/manager_test.go
@@ -32,7 +32,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/core/test"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
 	utils_test "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -72,7 +71,7 @@ func TestDeltaForNode(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -115,7 +114,7 @@ func TestResourcesLeft(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -168,7 +167,7 @@ func TestApplyLimits(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -235,7 +234,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*corev1.Node{n1}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go
index 95285b828923de14703ab66410a2c918f83aa9d4..f0109cf366500091f3a4593c6152b8c9c22abb8c 100644
--- a/cluster-autoscaler/core/static_autoscaler.go
+++ b/cluster-autoscaler/core/static_autoscaler.go
@@ -335,7 +335,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
 	}
 	nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
+	var draSnapshot *drasnapshot.Snapshot
 	if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
 		draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
 		if err != nil {
diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
index c28933637663f63c610a965fa0a2f382664473e8..9a28ea1ae3093c6d68ecaa1d4ca9bbfea13a4adc 100644
--- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
+++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
@@ -28,7 +28,6 @@ import (
 	testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
@@ -86,7 +85,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
 
 	nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1, ready7, readyToBeDeleted6}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
@@ -173,7 +172,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
 
 	nodes := []*apiv1.Node{unready4, unready3, ready2, ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	// Fill cache
@@ -264,7 +263,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
 
 	nodes := []*apiv1.Node{ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
index fb6b98efec260397a40d0f4a8ccc256e151b9423..d184637a9026a2da6a9f7cd8d509f68f3697ebe2 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
@@ -77,7 +77,7 @@ type ClusterSnapshotStore interface {
 
 	// SetClusterState resets the snapshot to an unforked state and replaces the contents of the snapshot
 	// with the provided data. scheduledPods are correlated to their Nodes based on spec.NodeName.
-	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error
+	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error
 
 	// ForceAddPod adds the given Pod to the Node with the given nodeName inside the snapshot without checking scheduler predicates.
 	ForceAddPod(pod *apiv1.Pod, nodeName string) error
@@ -93,7 +93,7 @@ type ClusterSnapshotStore interface {
 	RemoveSchedulerNodeInfo(nodeName string) error
 
 	// DraSnapshot returns an interface that allows accessing and modifying the DRA objects in the snapshot.
-	DraSnapshot() drasnapshot.Snapshot
+	DraSnapshot() *drasnapshot.Snapshot
 
 	// Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert().
 	// Use WithForkedSnapshot() helper function instead if possible.
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
index 8c4f359327563a84094116f4c7d1ab5978516cc4..d347e4e7d2107a4f2aa94c7f7bf4dd21d0d989ae 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
@@ -56,6 +56,7 @@ func (s *PredicateSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, e
 	if err != nil {
 		return nil, err
 	}
+
 	if s.draEnabled {
 		return s.ClusterSnapshotStore.DraSnapshot().WrapSchedulerNodeInfo(schedNodeInfo)
 	}
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
index 1e4c5fbf885c9c695bd81c57c50b94ef9cdb2938..b384bcecc02f7637b7a9647aa3ccf05538ab8c52 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -40,7 +39,7 @@ func BenchmarkAddNodeInfo(b *testing.B) {
 			b.Run(fmt.Sprintf("%s: AddNodeInfo() %d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
-					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, nil))
 					b.StartTimer()
 					for _, node := range nodes {
 						err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
@@ -62,7 +61,7 @@ func BenchmarkListNodeInfos(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			if err != nil {
 				assert.NoError(b, err)
 			}
@@ -92,14 +91,14 @@ func BenchmarkAddPods(b *testing.B) {
 			clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(b, err)
 			b.ResetTimer()
 			b.Run(fmt.Sprintf("%s: ForceAddPod() 30*%d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
 
-					err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+					err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 					if err != nil {
 						assert.NoError(b, err)
 					}
@@ -128,7 +127,7 @@ func BenchmarkForkAddRevert(b *testing.B) {
 				clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 				clusterSnapshot, err := snapshotFactory()
 				assert.NoError(b, err)
-				err = clusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+				err = clusterSnapshot.SetClusterState(nodes, pods, nil)
 				assert.NoError(b, err)
 				tmpNode1 := BuildTestNode("tmp-1", 2000, 2000000)
 				tmpNode2 := BuildTestNode("tmp-2", 2000, 2000000)
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
index ad9b78638788a29c1ab96cf68107336bf068e2d4..3d98e3ac608a012a843cf24bd7c66c0e07066909 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
@@ -71,7 +71,7 @@ func extractNodes(nodeInfos []*framework.NodeInfo) []*apiv1.Node {
 type snapshotState struct {
 	nodes       []*apiv1.Node
 	podsByNode  map[string][]*apiv1.Pod
-	draSnapshot drasnapshot.Snapshot
+	draSnapshot *drasnapshot.Snapshot
 }
 
 func compareStates(t *testing.T, a, b snapshotState) {
@@ -148,7 +148,9 @@ func startSnapshot(t *testing.T, snapshotFactory func() (clustersnapshot.Cluster
 			pods = append(pods, pod)
 		}
 	}
-	err = snapshot.SetClusterState(state.nodes, pods, state.draSnapshot.Clone())
+
+	draSnapshot := drasnapshot.CloneTestSnapshot(state.draSnapshot)
+	err = snapshot.SetClusterState(state.nodes, pods, draSnapshot)
 	assert.NoError(t, err)
 	return snapshot
 }
@@ -476,704 +478,707 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
 				podsByNode: map[string][]*apiv1.Pod{largeNode.Name: {withNodeName(largePod, largeNode.Name)}},
 			},
 		},
-	}
-
-	// Only the Basic store is compatible with DRA for now.
-	if snapshotName == "basic" {
-		// Uncomment to get logs from the DRA plugin.
-		// var fs flag.FlagSet
-		// klog.InitFlags(&fs)
-		//if err := fs.Set("v", "10"); err != nil {
-		//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
-		//}
-		featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
-
-		testCases = append(testCases, []modificationTestCase{
-			{
-				name: "add empty nodeInfo with LocalResourceSlices",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:       []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		{
+			name: "add empty nodeInfo with LocalResourceSlices",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
 			},
-			{
-				name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
-				// needs to be added to the DRA snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim should just get a reservation for the pod added in the DRA snapshot.
-				// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "adding LocalResourceSlices for an already tracked Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:       []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding already tracked pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "adding unallocated pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						podOwnedClaim.DeepCopy(),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
+			// needs to be added to the DRA snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The shared claim should just get a reservation for the pod added in the DRA snapshot.
+			// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing a shared claim already at max reservations is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding LocalResourceSlices for an already tracked Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing its own claim without adding the claim is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				// One claim is shared, one is pod-owned.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo(node.Name)
-				},
-				// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
-				// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
-				// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
+			// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				// Remove the NodeInfo and then add it back to the snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
-						return err
-					}
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The state should be identical to the initial one after the modifications.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding already tracked pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "removing LocalResourceSlices for a non-existing Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo("wrong-name")
-				},
-				// The removed Node isn't in the snapshot, so this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding unallocated pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					podOwnedClaim.DeepCopy(),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
-					}
-					return err
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{otherNode, node, largeNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod on any Node with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing a shared claim already at max reservations is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing its own claim without adding the claim is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			// One claim is shared, one is pod-owned.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo(node.Name)
+			},
+			// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
+			// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
+			// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			// Remove the NodeInfo and then add it back to the snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			// The state should be identical to the initial one after the modifications.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "removing LocalResourceSlices for a non-existing Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo("wrong-name")
+			},
+			// The removed Node isn't in the snapshot, so this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
+				}
+				return err
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{otherNode, node, largeNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod on any Node with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
+			},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims and schedule it back",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims and schedule it back",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
-				// The pod-owned claim should get deallocated, but the shared one shouldn't.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
+			// The pod-owned claim should get deallocated, but the shared one shouldn't.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "get/list NodeInfo with DRA objects",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
-				// and pods in the cluster.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					nodeInfoDiffOpts := []cmp.Option{
-						// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
-						cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
-						cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
-						cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
-						cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
-						IgnoreObjectOrder[*resourceapi.ResourceClaim](),
-						IgnoreObjectOrder[*resourceapi.ResourceSlice](),
-					}
-
-					// Verify that GetNodeInfo works as expected.
-					nodeInfo, err := snapshot.GetNodeInfo(node.Name)
-					if err != nil {
-						return err
-					}
-					wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
-						framework.NewPodInfo(withNodeName(pod, node.Name), nil),
-						framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						}),
-					)
-					if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
-					}
-
-					// Verify that ListNodeInfo works as expected.
-					nodeInfos, err := snapshot.ListNodeInfos()
-					if err != nil {
-						return err
-					}
-					wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
-					if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
-					}
-
-					return nil
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
+					return err
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "get/list NodeInfo with DRA objects",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
+			// and pods in the cluster.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-		}...)
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				nodeInfoDiffOpts := []cmp.Option{
+					// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
+					cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
+					cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
+					cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
+					cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
+					IgnoreObjectOrder[*resourceapi.ResourceClaim](),
+					IgnoreObjectOrder[*resourceapi.ResourceSlice](),
+				}
+
+				// Verify that GetNodeInfo works as expected.
+				nodeInfo, err := snapshot.GetNodeInfo(node.Name)
+				if err != nil {
+					return err
+				}
+				wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
+					framework.NewPodInfo(withNodeName(pod, node.Name), nil),
+					framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					}),
+				)
+				if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
+				}
+
+				// Verify that ListNodeInfo works as expected.
+				nodeInfos, err := snapshot.ListNodeInfos()
+				if err != nil {
+					return err
+				}
+				wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
+				if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
+				}
+
+				return nil
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+	}
+
+	for i := range testCases {
+		if testCases[i].modifiedState.draSnapshot == nil {
+			testCases[i].modifiedState.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
+
+		if testCases[i].state.draSnapshot == nil {
+			testCases[i].state.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
 	}
 
 	return testCases
 }
 
 func TestForking(t *testing.T) {
-	node := BuildTestNode("specialNode-2", 10, 100)
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
 
+	node := BuildTestNode("specialNode-2", 10, 100)
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
 			t.Run(fmt.Sprintf("%s: %s base", snapshotName, tc.name), func(t *testing.T) {
@@ -1300,7 +1305,7 @@ func TestSetClusterState(t *testing.T) {
 	nodes := clustersnapshot.CreateTestNodes(nodeCount)
 	pods := clustersnapshot.CreateTestPods(podCount)
 	podsByNode := clustersnapshot.AssignTestPodsToNodes(pods, nodes)
-	state := snapshotState{nodes: nodes, podsByNode: podsByNode}
+	state := snapshotState{nodes: nodes, podsByNode: podsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}
 
 	extraNodes := clustersnapshot.CreateTestNodesWithPrefix("extra", extraNodeCount)
 
@@ -1323,9 +1328,9 @@ func TestSetClusterState(t *testing.T) {
 				snapshot := startSnapshot(t, snapshotFactory, state)
 				compareStates(t, state, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear base %d nodes %d pods and set a new state", name, nodeCount, podCount),
 			func(t *testing.T) {
@@ -1334,9 +1339,9 @@ func TestSetClusterState(t *testing.T) {
 
 				newNodes, newPods := clustersnapshot.CreateTestNodes(13), clustersnapshot.CreateTestPods(37)
 				newPodsByNode := clustersnapshot.AssignTestPodsToNodes(newPods, newNodes)
-				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, nil))
 
-				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear fork %d nodes %d pods %d extra nodes %d extra pods", name, nodeCount, podCount, extraNodeCount, extraPodCount),
 			func(t *testing.T) {
@@ -1355,11 +1360,11 @@ func TestSetClusterState(t *testing.T) {
 					assert.NoError(t, err)
 				}
 
-				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
 				// SetClusterState() should break out of forked state.
 				snapshot.Fork()
@@ -1768,7 +1773,7 @@ func TestPVCClearAndFork(t *testing.T) {
 			volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, true, volumeExists)
 
-			assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+			assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 			volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, false, volumeExists)
 
@@ -1777,6 +1782,14 @@ func TestPVCClearAndFork(t *testing.T) {
 }
 
 func TestWithForkedSnapshot(t *testing.T) {
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
+
 	err := fmt.Errorf("some error")
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
index 8c62b720685b67c3bbe3c685b940a7ec4f638372..61abc684b79e4f1fabf6b6d1b6e86d9e6462fd4b 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
@@ -29,13 +29,13 @@ import (
 // BasicSnapshotStore is simple, reference implementation of ClusterSnapshotStore.
 // It is inefficient. But hopefully bug-free and good for initial testing.
 type BasicSnapshotStore struct {
-	data []*internalBasicSnapshotData
+	data        []*internalBasicSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 }
 
 type internalBasicSnapshotData struct {
 	nodeInfoMap        map[string]*schedulerframework.NodeInfo
 	pvcNamespacePodMap map[string]map[string]bool
-	draSnapshot        drasnapshot.Snapshot
 }
 
 func (data *internalBasicSnapshotData) listNodeInfos() []*schedulerframework.NodeInfo {
@@ -142,7 +142,6 @@ func (data *internalBasicSnapshotData) clone() *internalBasicSnapshotData {
 	return &internalBasicSnapshotData{
 		nodeInfoMap:        clonedNodeInfoMap,
 		pvcNamespacePodMap: clonedPvcNamespaceNodeMap,
-		draSnapshot:        data.draSnapshot.Clone(),
 	}
 }
 
@@ -208,8 +207,8 @@ func (snapshot *BasicSnapshotStore) getInternalData() *internalBasicSnapshotData
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *BasicSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	return snapshot.getInternalData().draSnapshot
+func (snapshot *BasicSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -226,7 +225,7 @@ func (snapshot *BasicSnapshotStore) AddSchedulerNodeInfo(nodeInfo *schedulerfram
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	knownNodes := make(map[string]bool)
@@ -243,7 +242,13 @@ func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 			}
 		}
 	}
-	snapshot.getInternalData().draSnapshot = draSnapshot
+
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -271,6 +276,7 @@ func (snapshot *BasicSnapshotStore) IsPVCUsedByPods(key string) bool {
 func (snapshot *BasicSnapshotStore) Fork() {
 	forkData := snapshot.getInternalData().clone()
 	snapshot.data = append(snapshot.data, forkData)
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -279,6 +285,7 @@ func (snapshot *BasicSnapshotStore) Revert() {
 		return
 	}
 	snapshot.data = snapshot.data[:len(snapshot.data)-1]
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -288,6 +295,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 		return nil
 	}
 	snapshot.data = append(snapshot.data[:len(snapshot.data)-2], snapshot.data[len(snapshot.data)-1])
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -295,6 +303,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 func (snapshot *BasicSnapshotStore) clear() {
 	baseData := newInternalBasicSnapshotData()
 	snapshot.data = []*internalBasicSnapshotData{baseData}
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
 
 // implementation of SharedLister interface
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
index 75af617e73af797e36ee84476a853ada4e2c272b..6d0d5c7e76182357b80d4489c5539e864890c0e3 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
@@ -41,12 +41,18 @@ import (
 //
 // Watch out for:
 //
-//	node deletions, pod additions & deletions - invalidates cache of current snapshot
-//		(when forked affects delta, but not base.)
-//	pod affinity - causes scheduler framework to list pods with non-empty selector,
-//		so basic caching doesn't help.
+// * Node deletions, pod additions & deletions - invalidates cache of current snapshot
+// (when forked affects delta, but not base.)
+//
+// * Pod affinity - causes scheduler framework to list pods with non-empty selector,
+// so basic caching doesn't help.
+//
+// * DRA objects are tracked in the separate snapshot and while they don't exactly share
+// memory and time complexities of DeltaSnapshotStore - they are optimized for
+// cluster autoscaler operations
 type DeltaSnapshotStore struct {
 	data        *internalDeltaSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 	parallelism int
 }
 
@@ -64,8 +70,6 @@ type internalDeltaSnapshotData struct {
 	havePodsWithAffinity             []*schedulerframework.NodeInfo
 	havePodsWithRequiredAntiAffinity []*schedulerframework.NodeInfo
 	pvcNamespaceMap                  map[string]int
-
-	draSnapshot drasnapshot.Snapshot
 }
 
 func newInternalDeltaSnapshotData() *internalDeltaSnapshotData {
@@ -296,7 +300,6 @@ func (data *internalDeltaSnapshotData) isPVCUsedByPods(key string) bool {
 
 func (data *internalDeltaSnapshotData) fork() *internalDeltaSnapshotData {
 	forkedData := newInternalDeltaSnapshotData()
-	forkedData.draSnapshot = data.draSnapshot.Clone()
 	forkedData.baseData = data
 	return forkedData
 }
@@ -325,7 +328,6 @@ func (data *internalDeltaSnapshotData) commit() (*internalDeltaSnapshotData, err
 		}
 	}
 
-	data.baseData.draSnapshot = data.draSnapshot
 	return data.baseData, nil
 }
 
@@ -424,8 +426,8 @@ func NewDeltaSnapshotStore(parallelism int) *DeltaSnapshotStore {
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *DeltaSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	return snapshot.data.draSnapshot
+func (snapshot *DeltaSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -473,7 +475,7 @@ func (snapshot *DeltaSnapshotStore) setClusterStatePodsParallelized(nodeInfos []
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	nodeNameToIdx := make(map[string]int, len(nodes))
@@ -498,7 +500,12 @@ func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 	// Clear caches after adding pods.
 	snapshot.data.clearCaches()
 
-	// TODO(DRA): Save DRA snapshot.
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -526,6 +533,7 @@ func (snapshot *DeltaSnapshotStore) IsPVCUsedByPods(key string) bool {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) Fork() {
 	snapshot.data = snapshot.data.fork()
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -534,6 +542,7 @@ func (snapshot *DeltaSnapshotStore) Revert() {
 	if snapshot.data.baseData != nil {
 		snapshot.data = snapshot.data.baseData
 	}
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -544,6 +553,7 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 		return err
 	}
 	snapshot.data = newData
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -551,4 +561,5 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) clear() {
 	snapshot.data = newInternalDeltaSnapshotData()
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
index 5f618befd180c3f33958b68be468f81d33faf38e..0d426b7e39b11eb3c332c2822640d9a97cdd7771 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -49,7 +48,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("fork add 1000 to %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount + 1000)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			deltaStore.Fork()
@@ -71,7 +70,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("base %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes, nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes, nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			b.ResetTimer()
diff --git a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
index b98c9e117e1f034909b42e808443f34d572fb9e4..d95a3e6437c405c2786c0054070817b3193f28fe 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
@@ -25,7 +25,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	apiv1 "k8s.io/api/core/v1"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -39,7 +38,7 @@ func InitializeClusterSnapshotOrDie(
 	pods []*apiv1.Pod) {
 	var err error
 
-	assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+	assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
 	for _, node := range nodes {
 		err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
index 7c6cddac31fd31e62200fa57453f6a511c180db5..02e02562b93bbca44af1ff2d1099552a095fffb2 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
@@ -48,10 +48,10 @@ func NewProvider(claims allObjectsLister[*resourceapi.ResourceClaim], slices all
 }
 
 // Snapshot returns a snapshot of all DRA resources at a ~single point in time.
-func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
+func (p *Provider) Snapshot() (*drasnapshot.Snapshot, error) {
 	claims, err := p.resourceClaims.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	claimMap := make(map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim)
 	for _, claim := range claims {
@@ -60,7 +60,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	slices, err := p.resourceSlices.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	slicesMap := make(map[string][]*resourceapi.ResourceSlice)
 	var nonNodeLocalSlices []*resourceapi.ResourceSlice
@@ -74,7 +74,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	classes, err := p.deviceClasses.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	classMap := make(map[string]*resourceapi.DeviceClass)
 	for _, class := range classes {
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
index d8ff91cab1b1a79ea24fe11d5616bfc067eead3a..1e6e608004b29dae708e9726d21ed867a9047471 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
@@ -61,7 +61,7 @@ func TestProviderSnapshot(t *testing.T) {
 		triggerSlicesError  bool
 		classes             []*resourceapi.DeviceClass
 		triggerClassesError bool
-		wantSnapshot        drasnapshot.Snapshot
+		wantSnapshot        *drasnapshot.Snapshot
 		wantErr             error
 	}{
 		{
@@ -133,7 +133,7 @@ func TestProviderSnapshot(t *testing.T) {
 			if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): unexpected error (-want +got): %s", diff)
 			}
-			if diff := cmp.Diff(tc.wantSnapshot, snapshot, cmp.AllowUnexported(drasnapshot.Snapshot{}), cmpopts.EquateEmpty()); diff != "" {
+			if diff := cmp.Diff(tc.wantSnapshot, snapshot, drasnapshot.SnapshotFlattenedComparer()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): snapshot differs from expected (-want +got): %s", diff)
 			}
 		})
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/patchset.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/patchset.go
new file mode 100644
index 0000000000000000000000000000000000000000..255ad3892cd9ede0a8dc39a32c5fce8a048f338e
--- /dev/null
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/patchset.go
@@ -0,0 +1,306 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshot
+
+// patch represents a single layer of modifications (additions/updates)
+// and deletions for a set of key-value pairs. It's used as a building
+// block for the patchSet.
+type patch[K comparable, V any] struct {
+	Modified map[K]V
+	Deleted  map[K]bool
+}
+
+// Set marks a key-value pair as modified in the current patch.
+// If the key was previously marked as deleted, the deletion mark is removed.
+func (p *patch[K, V]) Set(key K, value V) {
+	p.Modified[key] = value
+	delete(p.Deleted, key)
+}
+
+// Delete marks a key as deleted in the current patch.
+// If the key was previously marked as modified, the modification is removed.
+func (p *patch[K, V]) Delete(key K) {
+	delete(p.Modified, key)
+	p.Deleted[key] = true
+}
+
+// Get retrieves the modified value for a key within this specific patch.
+// It returns the value and true if the key was found in this patch
+// or zero value and false otherwise.
+func (p *patch[K, V]) Get(key K) (value V, found bool) {
+	value, found = p.Modified[key]
+	return value, found
+}
+
+// IsDeleted checks if the key is marked as deleted within this specific patch.
+func (p *patch[K, V]) IsDeleted(key K) bool {
+	return p.Deleted[key]
+}
+
+// newPatch creates a new, empty patch with no modifications or deletions.
+func newPatch[K comparable, V any]() *patch[K, V] {
+	return &patch[K, V]{
+		Modified: make(map[K]V),
+		Deleted:  make(map[K]bool),
+	}
+}
+
+// newPatchFromMap creates a new patch initialized with the data from the provided map
+// the data supplied is recorded as modified in the patch.
+func newPatchFromMap[M ~map[K]V, K comparable, V any](source M) *patch[K, V] {
+	if source == nil {
+		source = make(M)
+	}
+
+	return &patch[K, V]{
+		Modified: source,
+		Deleted:  make(map[K]bool),
+	}
+}
+
+// mergePatchesInPlace merges two patches into one, while modifying patch a
+// inplace taking records in b as a priority when overwrites are required
+func mergePatchesInPlace[K comparable, V any](a *patch[K, V], b *patch[K, V]) *patch[K, V] {
+	for key, value := range b.Modified {
+		a.Set(key, value)
+	}
+
+	for key := range b.Deleted {
+		a.Delete(key)
+	}
+
+	return a
+}
+
+// patchSet manages a stack of patches, allowing for fork/revert/commit operations.
+// It provides a view of the data as if all patches were applied sequentially.
+//
+// Time Complexities:
+//   - Fork(): O(1).
+//   - Commit(): O(P), where P is the number of modified/deleted entries
+//     in the current patch or no-op for patchSet with a single patch.
+//   - Revert(): O(P), where P is the number of modified/deleted entries
+//     in the topmost patch or no-op for patchSet with a single patch.
+//   - FindValue(key): O(1) for cached keys, O(N * P) - otherwise.
+//   - AsMap(): O(N * P) as it needs to iterate through all the layers
+//     modifications and deletions to get the latest state. Best case -
+//     if the cache is currently in sync with the patchSet data complexity becomes
+//     O(C), where C is the actual number key/value pairs in flattened patchSet.
+//   - SetCurrent(key, value): O(1).
+//   - DeleteCurrent(key): O(1).
+//   - InCurrentPatch(key): O(1).
+//
+// Variables used in complexity analysis:
+//   - N: The number of patch layers in the patchSet.
+//   - P: The number of modified/deleted entries in a single patch layer
+//
+// Caching:
+//
+// The patchSet employs a lazy caching mechanism to speed up access to data. When a specific item is requested,
+// the cache is checked first. If the item isn't cached, its effective value is computed by traversing the layers
+// of patches from the most recent to the oldest, and the result is then stored in the cache. For operations that
+// require the entire dataset like AsMap, if a cache is fully up-to-date, the data is served directly from the cache.
+// Otherwise, the entire dataset is rebuilt by applying all patches, and this process fully populates the cache.
+// Direct modifications to the current patch update the specific item in the cache immediately. Reverting the latest
+// patch will clear affected items from the cache and mark the cache as potentially out-of-sync, as underlying values may
+// no longer represent the patchSet as a whole. Committing and forking does not invalidate the cache, as the effective
+// values remain consistent from the perspective of read operations.
+type patchSet[K comparable, V any] struct {
+	// patches is a stack of individual modification layers. The base data is
+	// at index 0, and subsequent modifications are layered on top.
+	// PatchSet should always contain at least a single patch.
+	patches []*patch[K, V]
+
+	// cache stores the computed effective value for keys that have been accessed.
+	// A nil pointer indicates the key is effectively deleted or not present.
+	cache map[K]*V
+
+	// cacheInSync indicates whether the cache map accurately reflects the
+	// current state derived from applying all patches in the 'patches' slice.
+	// When false, the cache may be stale and needs to be rebuilt or validated
+	// before being fully trusted for all keys.
+	cacheInSync bool
+}
+
+// newPatchSet creates a new patchSet, initializing it with the provided base patches.
+func newPatchSet[K comparable, V any](patches ...*patch[K, V]) *patchSet[K, V] {
+	return &patchSet[K, V]{
+		patches:     patches,
+		cache:       make(map[K]*V),
+		cacheInSync: false,
+	}
+}
+
+// Fork adds a new, empty patch layer to the top of the stack.
+// Subsequent modifications will be recorded in this new layer.
+func (p *patchSet[K, V]) Fork() {
+	p.patches = append(p.patches, newPatch[K, V]())
+}
+
+// Commit merges the topmost patch layer into the one below it.
+// If there's only one layer (or none), it's a no-op.
+func (p *patchSet[K, V]) Commit() {
+	if len(p.patches) < 2 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	previousPatch := p.patches[len(p.patches)-2]
+	mergePatchesInPlace(previousPatch, currentPatch)
+	p.patches = p.patches[:len(p.patches)-1]
+}
+
+// Revert removes the topmost patch layer.
+// Any modifications or deletions recorded in that layer are discarded.
+func (p *patchSet[K, V]) Revert() {
+	if len(p.patches) <= 1 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	p.patches = p.patches[:len(p.patches)-1]
+
+	for key := range currentPatch.Modified {
+		delete(p.cache, key)
+	}
+
+	for key := range currentPatch.Deleted {
+		delete(p.cache, key)
+	}
+
+	p.cacheInSync = false
+}
+
+// FindValue searches for the effective value of a key by looking through the patches
+// from top to bottom. It returns the value and true if found, or the zero value and false
+// if the key is deleted or not found in any patch.
+func (p *patchSet[K, V]) FindValue(key K) (value V, found bool) {
+	var zero V
+
+	if cachedValue, cacheHit := p.cache[key]; cacheHit {
+		if cachedValue == nil {
+			return zero, false
+		}
+
+		return *cachedValue, true
+	}
+
+	value = zero
+	found = false
+	for i := len(p.patches) - 1; i >= 0; i-- {
+		patch := p.patches[i]
+		if patch.IsDeleted(key) {
+			break
+		}
+
+		foundValue, ok := patch.Get(key)
+		if ok {
+			value = foundValue
+			found = true
+			break
+		}
+	}
+
+	if found {
+		p.cache[key] = &value
+	} else {
+		p.cache[key] = nil
+	}
+
+	return value, found
+}
+
+// AsMap merges all patches into a single map representing the current effective state.
+// It iterates through all patches from bottom to top, applying modifications and deletions.
+// The cache is populated with the results during this process.
+func (p *patchSet[K, V]) AsMap() map[K]V {
+	if p.cacheInSync {
+		patchSetMap := make(map[K]V, len(p.cache))
+		for key, value := range p.cache {
+			if value != nil {
+				patchSetMap[key] = *value
+			}
+		}
+		return patchSetMap
+	}
+
+	keysCount := p.totalKeyCount()
+	patchSetMap := make(map[K]V, keysCount)
+
+	for _, patch := range p.patches {
+		for key, value := range patch.Modified {
+			patchSetMap[key] = value
+		}
+
+		for key := range patch.Deleted {
+			delete(patchSetMap, key)
+		}
+	}
+
+	for key, value := range patchSetMap {
+		p.cache[key] = &value
+	}
+	p.cacheInSync = true
+
+	return patchSetMap
+}
+
+// SetCurrent adds or updates a key-value pair in the topmost patch layer.
+func (p *patchSet[K, V]) SetCurrent(key K, value V) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Set(key, value)
+	p.cache[key] = &value
+}
+
+// DeleteCurrent marks a key as deleted in the topmost patch layer.
+func (p *patchSet[K, V]) DeleteCurrent(key K) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Delete(key)
+	p.cache[key] = nil
+}
+
+// InCurrentPatch checks if the key is available in the topmost patch layer.
+func (p *patchSet[K, V]) InCurrentPatch(key K) bool {
+	if len(p.patches) == 0 {
+		return false
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	_, found := currentPatch.Get(key)
+	return found
+}
+
+// totalKeyCount calculates an approximate total number of key-value
+// pairs across all patches taking the highest number of records possible
+// this calculation does not consider deleted records - thus it is likely
+// to be not accurate
+func (p *patchSet[K, V]) totalKeyCount() int {
+	count := 0
+	for _, patch := range p.patches {
+		count += len(patch.Modified)
+	}
+
+	return count
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
index f9e2a3f2b5ba7bdd676cf6bb4ab78e8e28f6da5b..f473b2d6330d6fc9d6eff6d7de4ff79e0916b587 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
@@ -18,6 +18,7 @@ package snapshot
 
 import (
 	"fmt"
+	"maps"
 
 	apiv1 "k8s.io/api/core/v1"
 	resourceapi "k8s.io/api/resource/v1beta1"
@@ -25,6 +26,7 @@ import (
 	drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/klog/v2"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -43,54 +45,67 @@ func GetClaimId(claim *resourceapi.ResourceClaim) ResourceClaimId {
 // obtained via the Provider. Then, it can be modified using the exposed methods, to simulate scheduling actions
 // in the cluster.
 type Snapshot struct {
-	resourceClaimsById         map[ResourceClaimId]*resourceapi.ResourceClaim
-	resourceSlicesByNodeName   map[string][]*resourceapi.ResourceSlice
-	nonNodeLocalResourceSlices []*resourceapi.ResourceSlice
-	deviceClasses              map[string]*resourceapi.DeviceClass
+	resourceClaims *patchSet[ResourceClaimId, *resourceapi.ResourceClaim]
+	resourceSlices *patchSet[string, []*resourceapi.ResourceSlice]
+	deviceClasses  *patchSet[string, *resourceapi.DeviceClass]
 }
 
+// nonNodeLocalResourceSlicesIdentifier is a special key used in the resourceSlices patchSet
+// to store ResourceSlices that apply do not apply to specific nodes. The string itself is
+// using a value which kubernetes node names cannot possibly have to avoid having collisions.
+const nonNodeLocalResourceSlicesIdentifier = "--NON-LOCAL--"
+
 // NewSnapshot returns a Snapshot created from the provided data.
-func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, globalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) Snapshot {
-	if claims == nil {
-		claims = map[ResourceClaimId]*resourceapi.ResourceClaim{}
-	}
-	if nodeLocalSlices == nil {
-		nodeLocalSlices = map[string][]*resourceapi.ResourceSlice{}
-	}
-	if deviceClasses == nil {
-		deviceClasses = map[string]*resourceapi.DeviceClass{}
+func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, nonNodeLocalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) *Snapshot {
+	slices := make(map[string][]*resourceapi.ResourceSlice, len(nodeLocalSlices)+1)
+	maps.Copy(slices, nodeLocalSlices)
+	slices[nonNodeLocalResourceSlicesIdentifier] = nonNodeLocalSlices
+
+	claimsPatch := newPatchFromMap(claims)
+	slicesPatch := newPatchFromMap(slices)
+	devicesPatch := newPatchFromMap(deviceClasses)
+	return &Snapshot{
+		resourceClaims: newPatchSet(claimsPatch),
+		resourceSlices: newPatchSet(slicesPatch),
+		deviceClasses:  newPatchSet(devicesPatch),
 	}
-	return Snapshot{
-		resourceClaimsById:         claims,
-		resourceSlicesByNodeName:   nodeLocalSlices,
-		nonNodeLocalResourceSlices: globalSlices,
-		deviceClasses:              deviceClasses,
+}
+
+// NewEmptySnapshot returns a zero initialized Snapshot.
+func NewEmptySnapshot() *Snapshot {
+	claimsPatch := newPatch[ResourceClaimId, *resourceapi.ResourceClaim]()
+	slicesPatch := newPatch[string, []*resourceapi.ResourceSlice]()
+	devicesPatch := newPatch[string, *resourceapi.DeviceClass]()
+	return &Snapshot{
+		resourceClaims: newPatchSet(claimsPatch),
+		resourceSlices: newPatchSet(slicesPatch),
+		deviceClasses:  newPatchSet(devicesPatch),
 	}
 }
 
 // ResourceClaims exposes the Snapshot as schedulerframework.ResourceClaimTracker, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
-	return snapshotClaimTracker(s)
+func (s *Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
+	return snapshotClaimTracker{snapshot: s}
 }
 
 // ResourceSlices exposes the Snapshot as schedulerframework.ResourceSliceLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
-	return snapshotSliceLister(s)
+func (s *Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
+	return snapshotSliceLister{snapshot: s}
 }
 
 // DeviceClasses exposes the Snapshot as schedulerframework.DeviceClassLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
-	return snapshotClassLister(s)
+func (s *Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
+	return snapshotClassLister{snapshot: s}
 }
 
 // WrapSchedulerNodeInfo wraps the provided *schedulerframework.NodeInfo into an internal *framework.NodeInfo, adding
 // dra information. Node-local ResourceSlices are added to the NodeInfo, and all ResourceClaims referenced by each Pod
 // are added to each PodInfo. Returns an error if any of the Pods is missing a ResourceClaim.
-func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
-	podExtraInfos := map[types.UID]framework.PodExtraInfo{}
+func (s *Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
+	podExtraInfos := make(map[types.UID]framework.PodExtraInfo, len(schedNodeInfo.Pods))
 	for _, pod := range schedNodeInfo.Pods {
 		podClaims, err := s.PodClaims(pod.Pod)
 		if err != nil {
@@ -104,161 +119,251 @@ func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeIn
 	return framework.WrapSchedulerNodeInfo(schedNodeInfo, nodeSlices, podExtraInfos), nil
 }
 
-// Clone returns a copy of this Snapshot that can be independently modified without affecting this Snapshot.
-// The only mutable objects in the Snapshot are ResourceClaims, so they are deep-copied. The rest is only a
-// shallow copy.
-func (s Snapshot) Clone() Snapshot {
-	result := Snapshot{
-		resourceClaimsById:       map[ResourceClaimId]*resourceapi.ResourceClaim{},
-		resourceSlicesByNodeName: map[string][]*resourceapi.ResourceSlice{},
-		deviceClasses:            map[string]*resourceapi.DeviceClass{},
-	}
-	// The claims are mutable, they have to be deep-copied.
-	for id, claim := range s.resourceClaimsById {
-		result.resourceClaimsById[id] = claim.DeepCopy()
-	}
-	// The rest of the objects aren't mutable, so a shallow copy should be enough.
-	for nodeName, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result.resourceSlicesByNodeName[nodeName] = append(result.resourceSlicesByNodeName[nodeName], slice)
-		}
-	}
-	for _, slice := range s.nonNodeLocalResourceSlices {
-		result.nonNodeLocalResourceSlices = append(result.nonNodeLocalResourceSlices, slice)
-	}
-	for className, class := range s.deviceClasses {
-		result.deviceClasses[className] = class
-	}
-	return result
-}
-
 // AddClaims adds additional ResourceClaims to the Snapshot. It can be used e.g. if we need to duplicate a Pod that
 // owns ResourceClaims. Returns an error if any of the claims is already tracked in the snapshot.
-func (s Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
+func (s *Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
 	for _, claim := range newClaims {
-		if _, found := s.resourceClaimsById[GetClaimId(claim)]; found {
+		if _, found := s.resourceClaims.FindValue(GetClaimId(claim)); found {
 			return fmt.Errorf("claim %s/%s already tracked in the snapshot", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range newClaims {
-		s.resourceClaimsById[GetClaimId(claim)] = claim
+		s.resourceClaims.SetCurrent(GetClaimId(claim), claim)
 	}
+
 	return nil
 }
 
 // PodClaims returns ResourceClaims objects for all claims referenced by the Pod. If any of the referenced claims
 // isn't tracked in the Snapshot, an error is returned.
-func (s Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-
-	for _, claimRef := range pod.Spec.ResourceClaims {
-		claim, err := s.claimForPod(pod, claimRef)
-		if err != nil {
-			return nil, fmt.Errorf("error while obtaining ResourceClaim %s for pod %s/%s: %v", claimRef.Name, pod.Namespace, pod.Name, err)
-		}
-		result = append(result, claim)
-	}
-
-	return result, nil
+func (s *Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
+	return s.findPodClaims(pod, false)
 }
 
 // RemovePodOwnedClaims iterates over all the claims referenced by the Pod, and removes the ones owned by the Pod from the Snapshot.
 // Claims referenced by the Pod but not owned by it are not removed, but the Pod's reservation is removed from them.
 // This method removes all relevant claims that are in the snapshot, and doesn't error out if any of the claims are missing.
-func (s Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
-	for _, podClaimRef := range pod.Spec.ResourceClaims {
-		claimName := claimRefToName(pod, podClaimRef)
-		if claimName == "" {
-			// This most likely means that the Claim hasn't actually been created. Nothing to remove/modify, so continue to the next claim.
-			continue
-		}
-		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
-		claim, found := s.resourceClaimsById[claimId]
-		if !found {
-			// The claim isn't tracked in the snapshot for some reason. Nothing to remove/modify, so continue to the next claim.
+func (s *Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
+	claims, err := s.findPodClaims(pod, true)
+	if err != nil {
+		klog.Errorf("Snapshot.RemovePodOwnedClaims ignored an error: %s", err)
+	}
+
+	for _, claim := range claims {
+		claimId := GetClaimId(claim)
+		if drautils.PodOwnsClaim(pod, claim) {
+			s.resourceClaims.DeleteCurrent(claimId)
 			continue
 		}
-		if err := resourceclaim.IsForPod(pod, claim); err == nil {
-			delete(s.resourceClaimsById, claimId)
-		} else {
-			drautils.ClearPodReservationInPlace(claim, pod)
-		}
+
+		claim := s.ensureClaimWritable(claim)
+		drautils.ClearPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 }
 
 // ReservePodClaims adds a reservation for the provided Pod to all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, or if any of the claims are already at maximum reservation count, an error is
 // returned.
-func (s Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
 		if drautils.ClaimFullyReserved(claim) && !resourceclaim.IsReservedForPod(pod, claim) {
 			return fmt.Errorf("claim %s/%s already has max number of reservations set, can't add more", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range claims {
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.AddPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
+
 	return nil
 }
 
 // UnreservePodClaims removes reservations for the provided Pod from all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, an error is returned. If a claim is owned by the pod, or if the claim has no more reservations,
 // its allocation is cleared.
-func (s Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
-		podOwnedClaim := resourceclaim.IsForPod(pod, claim) == nil
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.ClearPodReservationInPlace(claim, pod)
-		if podOwnedClaim || !drautils.ClaimInUse(claim) {
+		if drautils.PodOwnsClaim(pod, claim) || !drautils.ClaimInUse(claim) {
 			drautils.DeallocateClaimInPlace(claim)
 		}
+
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 	return nil
 }
 
 // NodeResourceSlices returns all node-local ResourceSlices for the given Node.
-func (s Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
-	slices, found := s.resourceSlicesByNodeName[nodeName]
+func (s *Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
+	slices, found := s.resourceSlices.FindValue(nodeName)
 	return slices, found
 }
 
 // AddNodeResourceSlices adds additional node-local ResourceSlices to the Snapshot. This should be used whenever a Node with
 // node-local ResourceSlices is duplicated in the cluster snapshot.
-func (s Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
-	if _, alreadyInSnapshot := s.resourceSlicesByNodeName[nodeName]; alreadyInSnapshot {
+func (s *Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
+	if _, alreadyInSnapshot := s.NodeResourceSlices(nodeName); alreadyInSnapshot {
 		return fmt.Errorf("node %q ResourceSlices already present", nodeName)
 	}
-	s.resourceSlicesByNodeName[nodeName] = slices
+
+	s.resourceSlices.SetCurrent(nodeName, slices)
 	return nil
 }
 
-// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given nodeName.
+// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given node name.
 // It's a no-op if there aren't any slices to remove.
-func (s Snapshot) RemoveNodeResourceSlices(nodeName string) {
-	delete(s.resourceSlicesByNodeName, nodeName)
+func (s *Snapshot) RemoveNodeResourceSlices(nodeName string) {
+	s.resourceSlices.DeleteCurrent(nodeName)
 }
 
-func (s Snapshot) claimForPod(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) (*resourceapi.ResourceClaim, error) {
-	claimName := claimRefToName(pod, claimRef)
-	if claimName == "" {
-		return nil, fmt.Errorf("couldn't determine ResourceClaim name")
+// Commit persists changes done in the topmost layer merging them into the one below it.
+func (s *Snapshot) Commit() {
+	s.deviceClasses.Commit()
+	s.resourceClaims.Commit()
+	s.resourceSlices.Commit()
+}
+
+// Revert removes the topmost patch layer for all resource types, discarding
+// any modifications or deletions recorded there.
+func (s *Snapshot) Revert() {
+	s.deviceClasses.Revert()
+	s.resourceClaims.Revert()
+	s.resourceSlices.Revert()
+}
+
+// Fork adds a new, empty patch layer to the top of the stack for all
+// resource types. Subsequent modifications will be recorded in this
+// new layer until Commit() or Revert() are invoked.
+func (s *Snapshot) Fork() {
+	s.deviceClasses.Fork()
+	s.resourceClaims.Fork()
+	s.resourceSlices.Fork()
+}
+
+// listDeviceClasses retrieves all effective DeviceClasses from the snapshot.
+func (s *Snapshot) listDeviceClasses() []*resourceapi.DeviceClass {
+	deviceClasses := s.deviceClasses.AsMap()
+	deviceClassesList := make([]*resourceapi.DeviceClass, 0, len(deviceClasses))
+	for _, class := range deviceClasses {
+		deviceClassesList = append(deviceClassesList, class)
+	}
+
+	return deviceClassesList
+}
+
+// listResourceClaims retrieves all effective ResourceClaims from the snapshot.
+func (s *Snapshot) listResourceClaims() []*resourceapi.ResourceClaim {
+	claims := s.resourceClaims.AsMap()
+	claimsList := make([]*resourceapi.ResourceClaim, 0, len(claims))
+	for _, claim := range claims {
+		claimsList = append(claimsList, claim)
+	}
+
+	return claimsList
+}
+
+// configureResourceClaim updates or adds a ResourceClaim in the current patch layer.
+// This is typically used internally when a claim's state (like allocation) changes.
+func (s *Snapshot) configureResourceClaim(claim *resourceapi.ResourceClaim) {
+	claimId := ResourceClaimId{Name: claim.Name, Namespace: claim.Namespace}
+	s.resourceClaims.SetCurrent(claimId, claim)
+}
+
+// getResourceClaim retrieves a specific ResourceClaim by its ID from the snapshot.
+func (s *Snapshot) getResourceClaim(claimId ResourceClaimId) (*resourceapi.ResourceClaim, bool) {
+	return s.resourceClaims.FindValue(claimId)
+}
+
+// listResourceSlices retrieves all effective ResourceSlices from the snapshot.
+func (s *Snapshot) listResourceSlices() []*resourceapi.ResourceSlice {
+	resourceSlices := s.resourceSlices.AsMap()
+	resourceSlicesList := make([]*resourceapi.ResourceSlice, 0, len(resourceSlices))
+	for _, nodeSlices := range resourceSlices {
+		resourceSlicesList = append(resourceSlicesList, nodeSlices...)
 	}
 
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: pod.Namespace}]
-	if !found {
-		return nil, fmt.Errorf("couldn't find ResourceClaim %q", claimName)
+	return resourceSlicesList
+}
+
+// getDeviceClass retrieves a specific DeviceClass by its name from the snapshot.
+func (s *Snapshot) getDeviceClass(className string) (*resourceapi.DeviceClass, bool) {
+	return s.deviceClasses.FindValue(className)
+}
+
+// findPodClaims retrieves all ResourceClaim objects referenced by a given pod.
+// If ignoreNotTracked is true, it skips claims not found in the snapshot; otherwise, it returns an error.
+func (s *Snapshot) findPodClaims(pod *apiv1.Pod, ignoreNotTracked bool) ([]*resourceapi.ResourceClaim, error) {
+	result := make([]*resourceapi.ResourceClaim, len(pod.Spec.ResourceClaims))
+	for claimIndex, claimRef := range pod.Spec.ResourceClaims {
+		claimName := claimRefToName(pod, claimRef)
+		if claimName == "" {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't determine ResourceClaim name",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
+		claim, found := s.resourceClaims.FindValue(claimId)
+		if !found {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't find ResourceClaim in the snapshot",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		result[claimIndex] = claim
+	}
+
+	return result, nil
+}
+
+// ensureClaimWritable returns a resource claim suitable for inplace modifications,
+// in case if requested claim is stored in the current patch - the same object
+// is returned, otherwise a deep-copy is created. This is required for resource claim
+// state changing operations to implement copy-on-write policy for inplace modifications
+// when there's no claim tracked on the current layer of the patchset.
+func (s *Snapshot) ensureClaimWritable(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
+	claimId := GetClaimId(claim)
+	if s.resourceClaims.InCurrentPatch(claimId) {
+		return claim
 	}
 
-	return claim, nil
+	return claim.DeepCopy()
 }
 
+// claimRefToName determines the actual name of a ResourceClaim based on a PodResourceClaim reference.
+// It first checks if the name is directly specified in the reference. If not, it looks up the name
+// in the pod's status. Returns an empty string if the name cannot be determined.
 func claimRefToName(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) string {
 	if claimRef.ResourceClaimName != nil {
 		return *claimRef.ResourceClaimName
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
index c1dad60a12cae89e88651069488863244e2e2271..a277303243e414448b3980eb8d4a36015c89898a 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
@@ -25,51 +25,51 @@ import (
 	"k8s.io/dynamic-resource-allocation/structured"
 )
 
-type snapshotClaimTracker Snapshot
+type snapshotClaimTracker struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-	for _, claim := range s.resourceClaimsById {
-		result = append(result, claim)
-	}
-	return result, nil
+func (ct snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
+	return ct.snapshot.listResourceClaims(), nil
 }
 
-func (s snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: namespace}]
+func (ct snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
+	claimId := ResourceClaimId{Name: claimName, Namespace: namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return nil, fmt.Errorf("claim %s/%s not found", namespace, claimName)
 	}
 	return claim, nil
 }
 
-func (s snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
+func (ct snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
 	result := sets.New[structured.DeviceID]()
-	for _, claim := range s.resourceClaimsById {
+	for _, claim := range ct.snapshot.listResourceClaims() {
 		result = result.Union(claimAllocatedDevices(claim))
 	}
 	return result, nil
 }
 
-func (s snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
 	// The DRA scheduler plugin calls this at the end of the scheduling phase, in Reserve. Then, the allocation is persisted via an API
 	// call during the binding phase.
 	//
 	// In Cluster Autoscaler only the scheduling phase is run, so SignalClaimPendingAllocation() is used to obtain the allocation
 	// and persist it in-memory in the snapshot.
-	ref := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
-	claim, found := s.resourceClaimsById[ref]
+	claimId := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return fmt.Errorf("claim %s/%s not found", allocatedClaim.Namespace, allocatedClaim.Name)
 	}
 	if claim.UID != claimUid {
 		return fmt.Errorf("claim %s/%s: snapshot has UID %q, allocation came for UID %q - shouldn't happenn", allocatedClaim.Namespace, allocatedClaim.Name, claim.UID, claimUid)
 	}
-	s.resourceClaimsById[ref] = allocatedClaim
+
+	ct.snapshot.configureResourceClaim(allocatedClaim)
 	return nil
 }
 
-func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
+func (ct snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
 	// The DRA scheduler plugin calls this at the beginning of Filter, and fails the filter if true is returned to handle race conditions.
 	//
 	// In the scheduler implementation, ClaimHasPendingAllocation() starts answering true after SignalClaimPendingAllocation()
@@ -81,19 +81,19 @@ func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool
 	return false
 }
 
-func (s snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
+func (ct snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.RemoveClaimPendingAllocation() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumeClaimAfterAPICall() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
+func (ct snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumedClaimRestore() was called - this should never happen")
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
index 273d7c24aa48300871900d007c5faadbbae69dfd..b3d50e0badfd1d83d2adf3784cc068600d76b5f1 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
@@ -170,7 +170,7 @@ func TestSnapshotClaimTrackerListAllAllocatedDevices(t *testing.T) {
 				GetClaimId(allocatedClaim2): allocatedClaim2,
 				GetClaimId(claim3):          claim3,
 			},
-			wantDevices: sets.New[structured.DeviceID](
+			wantDevices: sets.New(
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-1"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-2"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-3"),
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
index 7beaaba22763b67c671157fba458b71bf2efef66..39affd2847c2270eadaa553b0febf7c8f0696936 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
@@ -22,18 +22,16 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotClassLister Snapshot
+type snapshotClassLister struct {
+	snapshot *Snapshot
+}
 
 func (s snapshotClassLister) List() ([]*resourceapi.DeviceClass, error) {
-	var result []*resourceapi.DeviceClass
-	for _, class := range s.deviceClasses {
-		result = append(result, class)
-	}
-	return result, nil
+	return s.snapshot.listDeviceClasses(), nil
 }
 
 func (s snapshotClassLister) Get(className string) (*resourceapi.DeviceClass, error) {
-	class, found := s.deviceClasses[className]
+	class, found := s.snapshot.getDeviceClass(className)
 	if !found {
 		return nil, fmt.Errorf("DeviceClass %q not found", className)
 	}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
index de14ded286e422a6e8423d7c2d953597517a6596..65f941d85f97030cea25b047a93127cb3de36721 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
@@ -20,15 +20,10 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotSliceLister Snapshot
+type snapshotSliceLister struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
-	var result []*resourceapi.ResourceSlice
-	for _, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result = append(result, slice)
-		}
-	}
-	result = append(result, s.nonNodeLocalResourceSlices...)
-	return result, nil
+func (sl snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
+	return sl.snapshot.listResourceSlices(), nil
 }
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
index 386765313f7a4e4a12eb2f3a62f91b35ebcd54be..7388cbcc3c029251a6eab7163f933ebd94ba134a 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
@@ -68,6 +68,9 @@ var (
 	pod2OwnClaim1 = drautils.TestClaimWithPodOwnership(pod2,
 		&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod2-ownClaim1-abc", UID: "pod2-ownClaim1-abc", Namespace: "default"}},
 	)
+
+	deviceClass1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class1", UID: "class1-uid"}}
+	deviceClass2 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class2", UID: "class2-uid"}}
 )
 
 func TestSnapshotResourceClaims(t *testing.T) {
@@ -79,7 +82,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 
 		claims map[ResourceClaimId]*resourceapi.ResourceClaim
 
-		claimsModFun        func(snapshot Snapshot) error
+		claimsModFun        func(snapshot *Snapshot) error
 		wantClaimsModFunErr error
 
 		pod              *apiv1.Pod
@@ -141,7 +144,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), sharedClaim1.DeepCopy()})
 			},
 			wantClaimsModFunErr: cmpopts.AnyError,
@@ -153,7 +156,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), pod2OwnClaim1.DeepCopy()}); err != nil {
 					return err
 				}
@@ -171,7 +174,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -189,7 +192,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -211,7 +214,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -234,7 +237,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -258,7 +261,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.ReservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -286,7 +289,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.UnreservePodClaims(pod1)
 			},
@@ -309,7 +312,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -338,7 +341,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1), nil),
 				GetClaimId(pod1OwnClaim2): drautils.TestClaimWithAllocation(pod1OwnClaim2.DeepCopy(), nil),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -404,7 +407,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 	for _, tc := range []struct {
 		testName string
 
-		slicesModFun        func(snapshot Snapshot) error
+		slicesModFun        func(snapshot *Snapshot) error
 		wantSlicesModFunErr error
 
 		nodeName            string
@@ -426,7 +429,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a Node that already has slices tracked is an error",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1})
 			},
 			wantSlicesModFunErr: cmpopts.AnyError,
@@ -434,7 +437,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a new Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{extraNode3Slice1, extraNode3Slice2})
 			},
 			nodeName:            "node3",
@@ -444,7 +447,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for a non-existing Node is a no-op",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node3")
 				return nil
 			},
@@ -452,7 +455,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for an existing Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node2")
 				return nil
 			},
@@ -574,6 +577,7 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 			cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
 				cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
 				test.IgnoreObjectOrder[*resourceapi.ResourceClaim](), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()}
+
 			if diff := cmp.Diff(tc.wantNodeInfo, nodeInfo, cmpOpts...); diff != "" {
 				t.Errorf("Snapshot.WrapSchedulerNodeInfo(): unexpected output (-want +got): %s", diff)
 			}
@@ -581,121 +585,6 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 	}
 }
 
-func TestSnapshotClone(t *testing.T) {
-	for _, tc := range []struct {
-		testName           string
-		snapshot           Snapshot
-		cloneModFun        func(snapshot Snapshot) error
-		wantModifiedClaims []*resourceapi.ResourceClaim
-		wantModifiedSlices []*resourceapi.ResourceSlice
-	}{
-		{
-			testName: "empty snapshot",
-			snapshot: Snapshot{},
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1, node1Slice2})
-			},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node1Slice1, node1Slice2},
-		},
-		{
-			testName: "non-empty snapshot",
-			snapshot: NewSnapshot(
-				map[ResourceClaimId]*resourceapi.ResourceClaim{
-					GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
-					GetClaimId(sharedClaim2):  sharedClaim2.DeepCopy(),
-					GetClaimId(sharedClaim3):  drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
-					GetClaimId(pod2OwnClaim1): drautils.TestClaimWithPodOwnership(pod2, drautils.TestClaimWithPodReservations(pod2OwnClaim1, pod2)),
-				},
-				map[string][]*resourceapi.ResourceSlice{
-					"node1": {node1Slice1, node1Slice2},
-					"node2": {node2Slice1, node2Slice2},
-				},
-				[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2}, nil),
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{node3Slice1, node3Slice2}); err != nil {
-					return err
-				}
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				if err := snapshot.ReservePodClaims(pod1); err != nil {
-					return err
-				}
-				snapshot.RemovePodOwnedClaims(pod2)
-				snapshot.RemoveNodeResourceSlices("node1")
-				return nil
-			},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node2Slice1, node2Slice2, node3Slice1, node3Slice2, globalSlice1, globalSlice2},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{
-				drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
-				drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim1, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
-				sharedClaim3,
-			},
-		},
-	} {
-		t.Run(tc.testName, func(t *testing.T) {
-			// Grab the initial state of the snapshot to verify that it doesn't change when the clone is modified.
-			initialClaims, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlices, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-
-			// Clone and verify that the clone is identical to the original.
-			snapshotClone := tc.snapshot.Clone()
-			if diff := cmp.Diff(tc.snapshot, snapshotClone, cmpopts.EquateEmpty(), cmp.AllowUnexported(Snapshot{}, framework.NodeInfo{}, schedulerframework.NodeInfo{})); diff != "" {
-				t.Fatalf("Snapshot.Clone(): snapshot not identical after cloning (-want +got): %s", diff)
-			}
-
-			// Modify the clone.
-			if err := tc.cloneModFun(snapshotClone); err != nil {
-				t.Fatalf("Snapshot: unexpected error during snapshot modification: %v", err)
-			}
-
-			// Verify that the clone changed as expected.
-			modifiedClaims, err := snapshotClone.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			modifiedSlices, err := snapshotClone.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(tc.wantModifiedClaims, modifiedClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceClaim state after modifications (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(tc.wantModifiedSlices, modifiedSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceSlice state after modifications (-want +got): %s", diff)
-			}
-
-			// Verify that the original hasn't changed during clone modifications.
-			initialClaimsAfterCloneMod, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlicesAfterCloneMod, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(initialClaims, initialClaimsAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: ResourceClaim state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(initialSlices, initialSlicesAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: ResourceSlice state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-		})
-	}
-}
-
 func testPods(count int) []*apiv1.Pod {
 	var result []*apiv1.Pod
 	for i := range count {
@@ -703,3 +592,179 @@ func testPods(count int) []*apiv1.Pod {
 	}
 	return result
 }
+
+func TestSnapshotForkCommitRevert(t *testing.T) {
+	initialClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
+	}
+	initialDeviceClasses := map[string]*resourceapi.DeviceClass{deviceClass1.Name: deviceClass1.DeepCopy(), deviceClass2.Name: deviceClass2.DeepCopy()}
+	initialLocalSlices := map[string][]*resourceapi.ResourceSlice{node1Slice1.Spec.NodeName: {node1Slice1.DeepCopy()}}
+	initialGlobalSlices := []*resourceapi.ResourceSlice{globalSlice1.DeepCopy(), globalSlice2.DeepCopy()}
+	initialState := NewSnapshot(initialClaims, initialLocalSlices, initialGlobalSlices, initialDeviceClasses)
+
+	addedClaim := sharedClaim2.DeepCopy()
+	addedNodeSlice := node2Slice1.DeepCopy()
+	podToReserve := pod1.DeepCopy()
+
+	modifiedClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, podToReserve),
+		GetClaimId(sharedClaim2):  drautils.TestClaimWithPodReservations(addedClaim, podToReserve),
+		GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, podToReserve),
+		GetClaimId(pod1OwnClaim2): drautils.TestClaimWithPodReservations(pod1OwnClaim2, podToReserve),
+	}
+	modifiedLocalSlices := map[string][]*resourceapi.ResourceSlice{addedNodeSlice.Spec.NodeName: {addedNodeSlice.DeepCopy()}}
+	// Expected state after modifications are applied
+	modifiedState := NewSnapshot(
+		modifiedClaims,
+		modifiedLocalSlices,
+		initialGlobalSlices,
+		initialDeviceClasses,
+	)
+
+	applyModifications := func(t *testing.T, s *Snapshot) {
+		t.Helper()
+
+		addedSlices := []*resourceapi.ResourceSlice{addedNodeSlice.DeepCopy()}
+		if err := s.AddNodeResourceSlices(addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
+			t.Fatalf("failed to add %s resource slices: %v", addedNodeSlice.Spec.NodeName, err)
+		}
+		if err := s.AddClaims([]*resourceapi.ResourceClaim{addedClaim}); err != nil {
+			t.Fatalf("failed to add %s claim: %v", addedClaim.Name, err)
+		}
+		if err := s.ReservePodClaims(podToReserve); err != nil {
+			t.Fatalf("failed to reserve claim %s for pod %s: %v", sharedClaim1.Name, podToReserve.Name, err)
+		}
+
+		s.RemoveNodeResourceSlices(node1Slice1.Spec.NodeName)
+	}
+
+	compareSnapshots := func(t *testing.T, want, got *Snapshot, msg string) {
+		t.Helper()
+		if diff := cmp.Diff(want, got, SnapshotFlattenedComparer(), cmpopts.EquateEmpty()); diff != "" {
+			t.Errorf("%s: Snapshot state mismatch (-want +got):\n%s", msg, diff)
+		}
+	}
+
+	t.Run("Fork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork and Modify")
+	})
+
+	t.Run("ForkRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+	})
+
+	t.Run("ForkCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit")
+	})
+
+	t.Run("ForkForkRevertRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+
+		// Apply further modifications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Revert() // Revert first fork
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Revert")
+	})
+
+	t.Run("ForkForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit() // Commit second fork into first fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Fork, Modify, Commit")
+
+		snapshot.Revert() // Revert first fork (which now contains committed changes)
+		compareSnapshots(t, initialState, snapshot, "After Fork, Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkForkRevertCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+		// Apply further mofications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Commit() // Commit first fork (with original modifications)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Commit")
+	})
+
+	t.Run("CommitNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Commit() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Commit with no Fork")
+	})
+
+	t.Run("RevertNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Revert() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Revert with no Fork")
+	})
+
+	t.Run("ForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		// Now try to revert the committed changes (should be no-op as only base layer exists)
+		snapshot.Revert()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkRevertFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+
+		snapshot.Fork() // Fork again from the reverted (initial) state
+		differentClaim := sharedClaim3.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{differentClaim}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+
+		expectedState := CloneTestSnapshot(initialState)
+		expectedState.AddClaims([]*resourceapi.ResourceClaim{differentClaim.DeepCopy()}) // Apply same change to expected state
+
+		compareSnapshots(t, expectedState, snapshot, "After Fork, Modify, Revert, Fork, Modify")
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..6293f3a8728a578f49cfdbf0c71db43687d2fb27
--- /dev/null
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshot
+
+import (
+	"maps"
+
+	"github.com/google/go-cmp/cmp"
+	resourceapi "k8s.io/api/resource/v1beta1"
+)
+
+// CloneTestSnapshot creates a deep copy of the provided Snapshot.
+// This function is intended for testing purposes only.
+func CloneTestSnapshot(snapshot *Snapshot) *Snapshot {
+	cloned := &Snapshot{
+		deviceClasses:  newPatchSet[string, *resourceapi.DeviceClass](),
+		resourceSlices: newPatchSet[string, []*resourceapi.ResourceSlice](),
+		resourceClaims: newPatchSet[ResourceClaimId, *resourceapi.ResourceClaim](),
+	}
+
+	for i := 0; i < len(snapshot.deviceClasses.patches); i++ {
+		devicesPatch := snapshot.deviceClasses.patches[i]
+		clonedPatch := newPatch[string, *resourceapi.DeviceClass]()
+		for key, deviceClass := range devicesPatch.Modified {
+			clonedPatch.Modified[key] = deviceClass.DeepCopy()
+		}
+		maps.Copy(clonedPatch.Deleted, devicesPatch.Deleted)
+		cloned.deviceClasses.patches = append(cloned.deviceClasses.patches, clonedPatch)
+	}
+
+	for i := 0; i < len(snapshot.resourceSlices.patches); i++ {
+		slicesPatch := snapshot.resourceSlices.patches[i]
+		clonedPatch := newPatch[string, []*resourceapi.ResourceSlice]()
+		for key, slices := range slicesPatch.Modified {
+			deepCopySlices := make([]*resourceapi.ResourceSlice, len(slices))
+			for i, slice := range slices {
+				deepCopySlices[i] = slice.DeepCopy()
+			}
+
+			clonedPatch.Modified[key] = deepCopySlices
+		}
+		maps.Copy(clonedPatch.Deleted, slicesPatch.Deleted)
+		cloned.resourceSlices.patches = append(cloned.resourceSlices.patches, clonedPatch)
+	}
+
+	for i := 0; i < len(snapshot.resourceClaims.patches); i++ {
+		claimsPatch := snapshot.resourceClaims.patches[i]
+		clonedPatch := newPatch[ResourceClaimId, *resourceapi.ResourceClaim]()
+		for claimId, claim := range claimsPatch.Modified {
+			clonedPatch.Modified[claimId] = claim.DeepCopy()
+		}
+		maps.Copy(clonedPatch.Deleted, claimsPatch.Deleted)
+		cloned.resourceClaims.patches = append(cloned.resourceClaims.patches, clonedPatch)
+	}
+
+	return cloned
+}
+
+// SnapshotFlattenedComparer returns a cmp.Option that provides a custom comparer function
+// for comparing two *Snapshot objects based on their underlying data maps, it doesn't
+// compare the underlying patchsets, instead flattened objects are compared.
+// This function is intended for testing purposes only.
+func SnapshotFlattenedComparer() cmp.Option {
+	return cmp.Comparer(func(a, b *Snapshot) bool {
+		if a == nil || b == nil {
+			return a == b
+		}
+
+		devicesEqual := cmp.Equal(a.deviceClasses.AsMap(), b.deviceClasses.AsMap())
+		slicesEqual := cmp.Equal(a.resourceSlices.AsMap(), b.resourceSlices.AsMap())
+		claimsEqual := cmp.Equal(a.resourceClaims.AsMap(), b.resourceClaims.AsMap())
+
+		return devicesEqual && slicesEqual && claimsEqual
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
index 09cffb6d88f2b69887dc63ab78531169682dce4b..58481b673f262e5ad4dce964dc44371c55b10a74 100644
--- a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
+++ b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
@@ -22,10 +22,29 @@ import (
 	apiv1 "k8s.io/api/core/v1"
 	resourceapi "k8s.io/api/resource/v1beta1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/component-helpers/scheduling/corev1"
-	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/utils/ptr"
 )
 
+// ClaimOwningPod returns the name and UID of the Pod owner of the provided claim. If the claim isn't
+// owned by a Pod, empty strings are returned.
+func ClaimOwningPod(claim *resourceapi.ResourceClaim) (string, types.UID) {
+	for _, owner := range claim.OwnerReferences {
+		if ptr.Deref(owner.Controller, false) && owner.APIVersion == "v1" && owner.Kind == "Pod" {
+			return owner.Name, owner.UID
+		}
+	}
+	return "", ""
+}
+
+// PodOwnsClaim determines if the provided pod is the controller of the given claim.
+func PodOwnsClaim(pod *apiv1.Pod, claim *resourceapi.ResourceClaim) bool {
+	ownerPodName, ownerPodUid := ClaimOwningPod(claim)
+	return ownerPodName == pod.Name && ownerPodUid == pod.UID
+}
+
 // ClaimAllocated returns whether the provided claim is allocated.
 func ClaimAllocated(claim *resourceapi.ResourceClaim) bool {
 	return claim.Status.Allocation != nil