diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
index 1ac006c6d06564f42f57dc2c59bef6cdf685204f..242caeb1a6b40b2a4d675e63dcffaaf0ab7653bf 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go
@@ -26,7 +26,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/config"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -111,7 +110,7 @@ func TestFilterOutExpendable(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			processor := NewFilterOutExpendablePodListProcessor()
 			snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-			err := snapshot.SetClusterState(tc.nodes, nil, drasnapshot.Snapshot{})
+			err := snapshot.SetClusterState(tc.nodes, nil, nil)
 			assert.NoError(t, err)
 
 			pods, err := processor.Process(&context.AutoscalingContext{
diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
index 61303ed3a8d154abb3b78538dc60df6857abfc2e..5eb7db7b64e51ba791f723fb3db8d6c77dc43d91 100644
--- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
+++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go
@@ -27,7 +27,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -281,7 +280,7 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
 				}
 
 				clusterSnapshot := snapshotFactory()
-				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, drasnapshot.Snapshot{}); err != nil {
+				if err := clusterSnapshot.SetClusterState(nodes, scheduledPods, nil); err != nil {
 					assert.NoError(b, err)
 				}
 
diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator.go b/cluster-autoscaler/core/scaledown/actuation/actuator.go
index 2542b383667505d1c5634bae73935786b3f36fdc..55ef2e5a8fa66a1367965ac3aa1b6b286604659a 100644
--- a/cluster-autoscaler/core/scaledown/actuation/actuator.go
+++ b/cluster-autoscaler/core/scaledown/actuation/actuator.go
@@ -406,7 +406,7 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS
 	scheduledPods := kube_util.ScheduledPods(pods)
 	nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
+	var draSnapshot *drasnapshot.Snapshot
 	if a.ctx.DynamicResourceAllocationEnabled && a.ctx.DraProvider != nil {
 		draSnapshot, err = a.ctx.DraProvider.Snapshot()
 		if err != nil {
diff --git a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
index de00c8bb1180a4bf10640fd728b424da469d5b0b..41544693574b9496788e2dbdd15b3fca2a101a68 100644
--- a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
+++ b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go
@@ -42,7 +42,7 @@ type testCase struct {
 	desc                        string
 	nodes                       []*apiv1.Node
 	pods                        []*apiv1.Pod
-	draSnapshot                 drasnapshot.Snapshot
+	draSnapshot                 *drasnapshot.Snapshot
 	draEnabled                  bool
 	wantUnneeded                []string
 	wantUnremovable             []*simulator.UnremovableNode
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
index 1ba94b3d6b7e0eb3bf171026cc22239b3abc9d9b..70be8d036a16f9616b2147ae30b7ba32aa8d7296 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
@@ -46,7 +46,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/status"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -1044,7 +1043,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
 	// build orchestrator
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods), nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1154,7 +1153,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1197,7 +1196,7 @@ func TestBinpackingLimiter(t *testing.T) {
 
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
 		Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
@@ -1257,7 +1256,7 @@ func TestScaleUpNoHelp(t *testing.T) {
 	}
 	context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 	assert.NoError(t, err)
-	err = context.ClusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, pods, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 	clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1412,7 +1411,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
 			listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
 			ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1496,7 +1495,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
 			}
 			context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
 			assert.NoError(t, err)
-			err = context.ClusterSnapshot.SetClusterState(nodes, podList, drasnapshot.Snapshot{})
+			err = context.ClusterSnapshot.SetClusterState(nodes, podList, nil)
 			assert.NoError(t, err)
 			nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
 			clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
@@ -1672,7 +1671,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*apiv1.Node{n1, n2}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 	processors := processorstest.NewTestProcessors(&context)
diff --git a/cluster-autoscaler/core/scaleup/resource/manager_test.go b/cluster-autoscaler/core/scaleup/resource/manager_test.go
index 280942601ef3822a2db7790f33894575bd595578..7d4573afed790dda206a1a912ae2900e0ee42676 100644
--- a/cluster-autoscaler/core/scaleup/resource/manager_test.go
+++ b/cluster-autoscaler/core/scaleup/resource/manager_test.go
@@ -32,7 +32,6 @@ import (
 	"k8s.io/autoscaler/cluster-autoscaler/core/test"
 	"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
 	processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
 	utils_test "k8s.io/autoscaler/cluster-autoscaler/utils/test"
@@ -72,7 +71,7 @@ func TestDeltaForNode(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -115,7 +114,7 @@ func TestResourcesLeft(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -168,7 +167,7 @@ func TestApplyLimits(t *testing.T) {
 
 		ng := testCase.nodeGroupConfig
 		group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
-		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+		err := ctx.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 		assert.NoError(t, err)
 		nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
@@ -235,7 +234,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
 	assert.NoError(t, err)
 
 	nodes := []*corev1.Node{n1}
-	err = context.ClusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err = context.ClusterSnapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 	nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
 
diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go
index 95285b828923de14703ab66410a2c918f83aa9d4..f0109cf366500091f3a4593c6152b8c9c22abb8c 100644
--- a/cluster-autoscaler/core/static_autoscaler.go
+++ b/cluster-autoscaler/core/static_autoscaler.go
@@ -335,7 +335,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
 	}
 	nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff)
 
-	var draSnapshot drasnapshot.Snapshot
+	var draSnapshot *drasnapshot.Snapshot
 	if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
 		draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
 		if err != nil {
diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
index c28933637663f63c610a965fa0a2f382664473e8..9a28ea1ae3093c6d68ecaa1d4ca9bbfea13a4adc 100644
--- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
+++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go
@@ -28,7 +28,6 @@ import (
 	testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
 	"k8s.io/autoscaler/cluster-autoscaler/context"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
@@ -86,7 +85,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
 
 	nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1, ready7, readyToBeDeleted6}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
@@ -173,7 +172,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
 
 	nodes := []*apiv1.Node{unready4, unready3, ready2, ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	// Fill cache
@@ -264,7 +263,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
 
 	nodes := []*apiv1.Node{ready1}
 	snapshot := testsnapshot.NewTestSnapshotOrDie(t)
-	err := snapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+	err := snapshot.SetClusterState(nodes, nil, nil)
 	assert.NoError(t, err)
 
 	ctx := context.AutoscalingContext{
diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
index fb6b98efec260397a40d0f4a8ccc256e151b9423..d184637a9026a2da6a9f7cd8d509f68f3697ebe2 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go
@@ -77,7 +77,7 @@ type ClusterSnapshotStore interface {
 
 	// SetClusterState resets the snapshot to an unforked state and replaces the contents of the snapshot
 	// with the provided data. scheduledPods are correlated to their Nodes based on spec.NodeName.
-	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error
+	SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error
 
 	// ForceAddPod adds the given Pod to the Node with the given nodeName inside the snapshot without checking scheduler predicates.
 	ForceAddPod(pod *apiv1.Pod, nodeName string) error
@@ -93,7 +93,7 @@ type ClusterSnapshotStore interface {
 	RemoveSchedulerNodeInfo(nodeName string) error
 
 	// DraSnapshot returns an interface that allows accessing and modifying the DRA objects in the snapshot.
-	DraSnapshot() drasnapshot.Snapshot
+	DraSnapshot() *drasnapshot.Snapshot
 
 	// Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert().
 	// Use WithForkedSnapshot() helper function instead if possible.
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
index 8c4f359327563a84094116f4c7d1ab5978516cc4..d347e4e7d2107a4f2aa94c7f7bf4dd21d0d989ae 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go
@@ -56,6 +56,7 @@ func (s *PredicateSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, e
 	if err != nil {
 		return nil, err
 	}
+
 	if s.draEnabled {
 		return s.ClusterSnapshotStore.DraSnapshot().WrapSchedulerNodeInfo(schedNodeInfo)
 	}
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
index 1e4c5fbf885c9c695bd81c57c50b94ef9cdb2938..b384bcecc02f7637b7a9647aa3ccf05538ab8c52 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -40,7 +39,7 @@ func BenchmarkAddNodeInfo(b *testing.B) {
 			b.Run(fmt.Sprintf("%s: AddNodeInfo() %d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
-					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+					assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil, nil))
 					b.StartTimer()
 					for _, node := range nodes {
 						err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
@@ -62,7 +61,7 @@ func BenchmarkListNodeInfos(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			if err != nil {
 				assert.NoError(b, err)
 			}
@@ -92,14 +91,14 @@ func BenchmarkAddPods(b *testing.B) {
 			clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 			clusterSnapshot, err := snapshotFactory()
 			assert.NoError(b, err)
-			err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+			err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 			assert.NoError(b, err)
 			b.ResetTimer()
 			b.Run(fmt.Sprintf("%s: ForceAddPod() 30*%d", snapshotName, tc), func(b *testing.B) {
 				for i := 0; i < b.N; i++ {
 					b.StopTimer()
 
-					err = clusterSnapshot.SetClusterState(nodes, nil, drasnapshot.Snapshot{})
+					err = clusterSnapshot.SetClusterState(nodes, nil, nil)
 					if err != nil {
 						assert.NoError(b, err)
 					}
@@ -128,7 +127,7 @@ func BenchmarkForkAddRevert(b *testing.B) {
 				clustersnapshot.AssignTestPodsToNodes(pods, nodes)
 				clusterSnapshot, err := snapshotFactory()
 				assert.NoError(b, err)
-				err = clusterSnapshot.SetClusterState(nodes, pods, drasnapshot.Snapshot{})
+				err = clusterSnapshot.SetClusterState(nodes, pods, nil)
 				assert.NoError(b, err)
 				tmpNode1 := BuildTestNode("tmp-1", 2000, 2000000)
 				tmpNode2 := BuildTestNode("tmp-2", 2000, 2000000)
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..581e7ed5d10124df4f80f27a7ed603426b143ba4
--- /dev/null
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_dra_benchmark_test.go
@@ -0,0 +1,397 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package predicate
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/google/uuid"
+	apiv1 "k8s.io/api/core/v1"
+	resourceapi "k8s.io/api/resource/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apiserver/pkg/util/feature"
+	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
+	drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
+	featuretesting "k8s.io/component-base/featuregate/testing"
+	"k8s.io/kubernetes/pkg/features"
+
+	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
+)
+
+func createTestResourceSlice(nodeName string, devicesPerSlice int, slicesPerNode int, driver string, device resourceapi.BasicDevice) *resourceapi.ResourceSlice {
+	sliceId := uuid.New().String()
+	name := fmt.Sprintf("rs-%s", sliceId)
+	uid := types.UID(fmt.Sprintf("rs-%s-uid", sliceId))
+	devices := make([]resourceapi.Device, devicesPerSlice)
+	for deviceIndex := 0; deviceIndex < devicesPerSlice; deviceIndex++ {
+		deviceName := fmt.Sprintf("rs-dev-%s-%d", sliceId, deviceIndex)
+		deviceCopy := device
+		devices[deviceIndex] = resourceapi.Device{Name: deviceName, Basic: &deviceCopy}
+	}
+
+	return &resourceapi.ResourceSlice{
+		ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid},
+		Spec: resourceapi.ResourceSliceSpec{
+			NodeName: nodeName,
+			Driver:   driver,
+			Pool: resourceapi.ResourcePool{
+				Name:               nodeName,
+				ResourceSliceCount: int64(slicesPerNode),
+			},
+			Devices: devices,
+		},
+	}
+}
+
+func createTestResourceClaim(requestsPerClaim int, devicesPerRequest int, driver string, deviceClass string) *resourceapi.ResourceClaim {
+	claimId := uuid.New().String()
+	name := fmt.Sprintf("claim-%s", claimId)
+	uid := types.UID(fmt.Sprintf("claim-%s-uid", claimId))
+	expression := fmt.Sprintf(`device.driver == "%s"`, driver)
+
+	requests := make([]resourceapi.DeviceRequest, requestsPerClaim)
+	for requestIndex := 0; requestIndex < requestsPerClaim; requestIndex++ {
+		requests[requestIndex] = resourceapi.DeviceRequest{
+			Name:            fmt.Sprintf("deviceRequest-%d", requestIndex),
+			DeviceClassName: deviceClass,
+			Selectors:       []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: expression}}},
+			AllocationMode:  resourceapi.DeviceAllocationModeExactCount,
+			Count:           int64(devicesPerRequest),
+		}
+	}
+
+	return &resourceapi.ResourceClaim{
+		ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid, Namespace: "default"},
+		Spec: resourceapi.ResourceClaimSpec{
+			Devices: resourceapi.DeviceClaim{Requests: requests},
+		},
+	}
+}
+
+// allocateResourceSlicesForClaim attempts to allocate devices from the provided ResourceSlices
+// to satisfy the requests in the given ResourceClaim. It iterates through the claim's device
+// requests and, for each request, tries to find enough available devices in the provided slices.
+//
+// The function returns a new ResourceClaim object with the allocation result (if successful)
+// and a boolean indicating whether all requests in the claim were satisfied.
+//
+// If not all requests can be satisfied with the given slices, the returned ResourceClaim will
+// have a partial or empty allocation, and the boolean will be false.
+// The original ResourceClaim object is not modified.
+func allocateResourceSlicesForClaim(claim *resourceapi.ResourceClaim, nodeName string, slices ...*resourceapi.ResourceSlice) (*resourceapi.ResourceClaim, bool) {
+	allocatedDevices := make([]resourceapi.DeviceRequestAllocationResult, 0, len(claim.Spec.Devices.Requests))
+	sliceIndex, deviceIndex := 0, 0
+	requestSatisfied := true
+
+allocationLoop:
+	for _, request := range claim.Spec.Devices.Requests {
+		for devicesRequired := request.Count; devicesRequired > 0; devicesRequired-- {
+			// Skipping resource slices until we find one with at least a single device available
+			for sliceIndex < len(slices) && deviceIndex >= len(slices[sliceIndex].Spec.Devices) {
+				sliceIndex++
+				deviceIndex = 0
+			}
+
+			// In case in the previous look we weren't able to find a resource slice containing
+			// at least a single device and there's still device request pending from resource
+			// claim - terminate allocation loop and indicate the request wasn't fully satisfied
+			if sliceIndex >= len(slices) {
+				requestSatisfied = false
+				break allocationLoop
+			}
+
+			slice := slices[sliceIndex]
+			device := slice.Spec.Devices[deviceIndex]
+			deviceAllocation := resourceapi.DeviceRequestAllocationResult{
+				Request: request.Name,
+				Driver:  slice.Spec.Driver,
+				Pool:    slice.Spec.Pool.Name,
+				Device:  device.Name,
+			}
+
+			allocatedDevices = append(allocatedDevices, deviceAllocation)
+			deviceIndex++
+		}
+	}
+
+	allocation := &resourceapi.AllocationResult{
+		NodeSelector: selectorForNode(nodeName),
+		Devices:      resourceapi.DeviceAllocationResult{Results: allocatedDevices},
+	}
+
+	return drautils.TestClaimWithAllocation(claim, allocation), requestSatisfied
+}
+
+func selectorForNode(node string) *apiv1.NodeSelector {
+	return &apiv1.NodeSelector{
+		NodeSelectorTerms: []apiv1.NodeSelectorTerm{
+			{
+				MatchFields: []apiv1.NodeSelectorRequirement{
+					{
+						Key:      "metadata.name",
+						Operator: apiv1.NodeSelectorOpIn,
+						Values:   []string{node},
+					},
+				},
+			},
+		},
+	}
+}
+
+// BenchmarkScheduleRevert measures the performance of scheduling pods which interact with Dynamic Resources Allocation
+// API onto nodes within a cluster snapshot, followed by snapshot manipulation operations (fork, commit, revert).
+//
+// The benchmark iterates tests for various configurations, varying:
+// - The number of nodes in the initial snapshot.
+// - The number of pods being scheduled, categorized by whether they use shared or pod-owned ResourceClaims.
+// - The number of snapshot operations (Fork, Commit, Revert) performed before/after scheduling.
+//
+// For each configuration and snapshot type, the benchmark performs the following steps:
+//  1. Initializes a cluster snapshot with a predefined set of nodes, ResourceSlices, DeviceClasses, and pre-allocated ResourceClaims (both shared and potentially pod-owned).
+//  2. Iterates through a subset of the nodes based on the configuration.
+//  3. For each node:
+//     a. Performs the configured number of snapshot Forks.
+//     b. Adds the node's NodeInfo (including its ResourceSlices) to the snapshot.
+//     c. Schedules a configured number of pods that reference a shared ResourceClaim onto the node.
+//     d. Schedules a configured number of pods that reference their own pre-allocated pod-owned ResourceClaims onto the node.
+//     e. Performs the configured number of snapshot Commits.
+//     f. Performs the configured number of snapshot Reverts.
+//
+// This benchmark helps evaluate the efficiency of:
+// - Scheduling pods with different types of DRA claims.
+// - Adding nodes with DRA resources to the snapshot.
+// - The overhead of snapshot Fork, Commit, and Revert operations, especially in scenarios involving DRA objects.
+func BenchmarkScheduleRevert(b *testing.B) {
+	featuretesting.SetFeatureGateDuringTest(b, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
+
+	const maxNodesCount = 100
+	const devicesPerSlice = 100
+	const maxPodsCount = 100
+	const deviceClassName = "defaultClass"
+	const driverName = "driver.foo.com"
+
+	configurations := map[string]struct {
+		nodesCount int
+
+		sharedClaimPods int
+		ownedClaimPods  int
+		forks           int
+		commits         int
+		reverts         int
+	}{
+		// SHARED CLAIMS
+		"100x32/SharedClaims/ForkRevert":           {sharedClaimPods: 32, nodesCount: 100, forks: 1, reverts: 1},
+		"100x32/SharedClaims/ForkCommit":           {sharedClaimPods: 32, nodesCount: 100, forks: 1, commits: 1},
+		"100x32/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x32/SharedClaims/Fork":                 {sharedClaimPods: 32, nodesCount: 100, forks: 1},
+		"100x32/SharedClaims/Fork5Revert5":         {sharedClaimPods: 32, nodesCount: 100, forks: 5, reverts: 5},
+		"100x1/SharedClaims/ForkRevert":            {sharedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1/SharedClaims/ForkCommit":            {sharedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1/SharedClaims/ForkForkCommitRevert":  {sharedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1/SharedClaims/Fork":                  {sharedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1/SharedClaims/Fork5Revert5":          {sharedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x32/SharedClaims/ForkRevert":            {sharedClaimPods: 32, nodesCount: 10, forks: 1, reverts: 1},
+		"10x32/SharedClaims/ForkCommit":            {sharedClaimPods: 32, nodesCount: 10, forks: 1, commits: 1},
+		"10x32/SharedClaims/ForkForkCommitRevert":  {sharedClaimPods: 32, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x32/SharedClaims/Fork":                  {sharedClaimPods: 32, nodesCount: 10, forks: 1},
+		"10x32/SharedClaims/Fork5Revert5":          {sharedClaimPods: 32, nodesCount: 10, forks: 5, reverts: 5},
+		"10x1/SharedClaims/ForkRevert":             {sharedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1/SharedClaims/ForkCommit":             {sharedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1/SharedClaims/ForkForkCommitRevert":   {sharedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1/SharedClaims/Fork":                   {sharedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1/SharedClaims/Fork5Revert5":           {sharedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+		// POD OWNED CLAIMS
+		"100x100/OwnedClaims/ForkRevert":           {ownedClaimPods: 100, nodesCount: 100, forks: 1, reverts: 1},
+		"100x100/OwnedClaims/ForkCommit":           {ownedClaimPods: 100, nodesCount: 100, forks: 1, commits: 1},
+		"100x100/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 100, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x100/OwnedClaims/Fork":                 {ownedClaimPods: 100, nodesCount: 100, forks: 1},
+		"100x100/OwnedClaims/Fork5Revert5":         {ownedClaimPods: 100, nodesCount: 100, forks: 5, reverts: 5},
+		"100х1/OwnedClaims/ForkRevert":             {ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1/OwnedClaims/ForkCommit":             {ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1/OwnedClaims/ForkForkCommitRevert":   {ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1/OwnedClaims/Fork":                   {ownedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1/OwnedClaims/Fork5Revert5":           {ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x100/OwnedClaims/ForkRevert":            {ownedClaimPods: 100, nodesCount: 10, forks: 1, reverts: 1},
+		"10x100/OwnedClaims/ForkCommit":            {ownedClaimPods: 100, nodesCount: 10, forks: 1, commits: 1},
+		"10x100/OwnedClaims/ForkForkCommitRevert":  {ownedClaimPods: 100, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x100/OwnedClaims/Fork":                  {ownedClaimPods: 100, nodesCount: 10, forks: 1},
+		"10x100/OwnedClaims/Fork5Revert5":          {ownedClaimPods: 100, nodesCount: 10, forks: 5, reverts: 5},
+		"10х1/OwnedClaims/ForkRevert":              {ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1/OwnedClaims/ForkCommit":              {ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1/OwnedClaims/ForkForkCommitRevert":    {ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1/OwnedClaims/Fork":                    {ownedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1/OwnedClaims/Fork5Revert5":            {ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+		// MIXED CLAIMS
+		"100x32x50/MixedClaims/ForkRevert":           {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, reverts: 1},
+		"100x32x50/MixedClaims/ForkCommit":           {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, commits: 1},
+		"100x32x50/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x32x50/MixedClaims/Fork":                 {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1},
+		"100x32x50/MixedClaims/Fork5Revert5":         {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 5, reverts: 5},
+		"100x1x1/MixedClaims/ForkRevert":             {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
+		"100x1x1/MixedClaims/ForkCommit":             {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
+		"100x1x1/MixedClaims/ForkForkCommitRevert":   {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
+		"100x1x1/MixedClaims/Fork":                   {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1},
+		"100x1x1/MixedClaims/Fork5Revert5":           {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
+		"10x32x50/MixedClaims/ForkRevert":            {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, reverts: 1},
+		"10x32x50/MixedClaims/ForkCommit":            {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, commits: 1},
+		"10x32x50/MixedClaims/ForkForkCommitRevert":  {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x32x50/MixedClaims/Fork":                  {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1},
+		"10x32x50/MixedClaims/Fork5Revert5":          {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 5, reverts: 5},
+		"10x1x1/MixedClaims/ForkRevert":              {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
+		"10x1x1/MixedClaims/ForkCommit":              {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
+		"10x1x1/MixedClaims/ForkForkCommitRevert":    {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
+		"10x1x1/MixedClaims/Fork":                    {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1},
+		"10x1x1/MixedClaims/Fork5Revert5":            {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
+	}
+
+	devicesClasses := map[string]*resourceapi.DeviceClass{
+		deviceClassName: {ObjectMeta: metav1.ObjectMeta{Name: deviceClassName, UID: "defaultClassUid"}},
+	}
+
+	nodeInfos := make([]*framework.NodeInfo, maxNodesCount)
+	sharedClaims := make([]*resourceapi.ResourceClaim, maxNodesCount)
+	ownedClaims := make([][]*resourceapi.ResourceClaim, maxNodesCount)
+	owningPods := make([][]*apiv1.Pod, maxNodesCount)
+	for nodeIndex := 0; nodeIndex < maxNodesCount; nodeIndex++ {
+		nodeName := fmt.Sprintf("node-%d", nodeIndex)
+		node := BuildTestNode(nodeName, 10000, 10000)
+		nodeSlice := createTestResourceSlice(node.Name, devicesPerSlice, 1, driverName, resourceapi.BasicDevice{})
+		nodeInfo := framework.NewNodeInfo(node, []*resourceapi.ResourceSlice{nodeSlice})
+
+		sharedClaim := createTestResourceClaim(devicesPerSlice, 1, driverName, deviceClassName)
+		sharedClaim, satisfied := allocateResourceSlicesForClaim(sharedClaim, nodeName, nodeSlice)
+		if !satisfied {
+			b.Errorf("Error during setup, claim allocation cannot be satistied")
+		}
+
+		claimsOnNode := make([]*resourceapi.ResourceClaim, maxPodsCount)
+		podsOnNode := make([]*apiv1.Pod, maxPodsCount)
+		for podIndex := 0; podIndex < maxPodsCount; podIndex++ {
+			podName := fmt.Sprintf("pod-%d-%d", nodeIndex, podIndex)
+			ownedClaim := createTestResourceClaim(1, 1, driverName, deviceClassName)
+			pod := BuildTestPod(
+				podName,
+				1,
+				1,
+				WithResourceClaim(ownedClaim.Name, ownedClaim.Name, ""),
+			)
+
+			ownedClaim = drautils.TestClaimWithPodOwnership(pod, ownedClaim)
+			ownedClaim, satisfied := allocateResourceSlicesForClaim(ownedClaim, nodeName, nodeSlice)
+			if !satisfied {
+				b.Errorf("Error during setup, claim allocation cannot be satistied")
+			}
+
+			podsOnNode[podIndex] = pod
+			claimsOnNode[podIndex] = ownedClaim
+		}
+
+		nodeInfos[nodeIndex] = nodeInfo
+		sharedClaims[nodeIndex] = sharedClaim
+		ownedClaims[nodeIndex] = claimsOnNode
+		owningPods[nodeIndex] = podsOnNode
+	}
+
+	b.ResetTimer()
+	for snapshotName, snapshotFactory := range snapshots {
+		b.Run(snapshotName, func(b *testing.B) {
+			for cfgName, cfg := range configurations {
+				b.Run(cfgName, func(b *testing.B) {
+					for i := 0; i < b.N; i++ {
+						snapshot, err := snapshotFactory()
+						if err != nil {
+							b.Errorf("Failed to create a snapshot: %v", err)
+						}
+
+						draSnapshot := drasnapshot.NewSnapshot(
+							nil,
+							nil,
+							nil,
+							devicesClasses,
+						)
+
+						draSnapshot.AddClaims(sharedClaims)
+						for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
+							draSnapshot.AddClaims(ownedClaims[nodeIndex])
+						}
+
+						err = snapshot.SetClusterState(nil, nil, draSnapshot)
+						if err != nil {
+							b.Errorf("Failed to set cluster state: %v", err)
+						}
+
+						for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
+							nodeInfo := nodeInfos[nodeIndex]
+							for i := 0; i < cfg.forks; i++ {
+								snapshot.Fork()
+							}
+
+							err := snapshot.AddNodeInfo(nodeInfo)
+							if err != nil {
+								b.Errorf("Failed to add node info to snapshot: %v", err)
+							}
+
+							sharedClaim := sharedClaims[nodeIndex]
+							for podIndex := 0; podIndex < cfg.sharedClaimPods; podIndex++ {
+								pod := BuildTestPod(
+									fmt.Sprintf("pod-%d", podIndex),
+									1,
+									1,
+									WithResourceClaim(sharedClaim.Name, sharedClaim.Name, ""),
+								)
+
+								err := snapshot.SchedulePod(pod, nodeInfo.Node().Name)
+								if err != nil {
+									b.Errorf(
+										"Failed to schedule a pod %s to node %s: %v",
+										pod.Name,
+										nodeInfo.Node().Name,
+										err,
+									)
+								}
+							}
+
+							for podIndex := 0; podIndex < cfg.ownedClaimPods; podIndex++ {
+								owningPod := owningPods[nodeIndex][podIndex]
+								err := snapshot.SchedulePod(owningPod, nodeInfo.Node().Name)
+								if err != nil {
+									b.Errorf(
+										"Failed to schedule a pod %s to node %s: %v",
+										owningPod.Name,
+										nodeInfo.Node().Name,
+										err,
+									)
+								}
+							}
+
+							for i := 0; i < cfg.commits; i++ {
+								snapshot.Commit()
+							}
+
+							for i := 0; i < cfg.reverts; i++ {
+								snapshot.Revert()
+							}
+						}
+					}
+				})
+			}
+		})
+	}
+}
diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
index ad9b78638788a29c1ab96cf68107336bf068e2d4..3d98e3ac608a012a843cf24bd7c66c0e07066909 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go
@@ -71,7 +71,7 @@ func extractNodes(nodeInfos []*framework.NodeInfo) []*apiv1.Node {
 type snapshotState struct {
 	nodes       []*apiv1.Node
 	podsByNode  map[string][]*apiv1.Pod
-	draSnapshot drasnapshot.Snapshot
+	draSnapshot *drasnapshot.Snapshot
 }
 
 func compareStates(t *testing.T, a, b snapshotState) {
@@ -148,7 +148,9 @@ func startSnapshot(t *testing.T, snapshotFactory func() (clustersnapshot.Cluster
 			pods = append(pods, pod)
 		}
 	}
-	err = snapshot.SetClusterState(state.nodes, pods, state.draSnapshot.Clone())
+
+	draSnapshot := drasnapshot.CloneTestSnapshot(state.draSnapshot)
+	err = snapshot.SetClusterState(state.nodes, pods, draSnapshot)
 	assert.NoError(t, err)
 	return snapshot
 }
@@ -476,704 +478,707 @@ func validTestCases(t *testing.T, snapshotName string) []modificationTestCase {
 				podsByNode: map[string][]*apiv1.Pod{largeNode.Name: {withNodeName(largePod, largeNode.Name)}},
 			},
 		},
-	}
-
-	// Only the Basic store is compatible with DRA for now.
-	if snapshotName == "basic" {
-		// Uncomment to get logs from the DRA plugin.
-		// var fs flag.FlagSet
-		// klog.InitFlags(&fs)
-		//if err := fs.Set("v", "10"); err != nil {
-		//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
-		//}
-		featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
-
-		testCases = append(testCases, []modificationTestCase{
-			{
-				name: "add empty nodeInfo with LocalResourceSlices",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:       []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		{
+			name: "add empty nodeInfo with LocalResourceSlices",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, deviceClasses),
 			},
-			{
-				name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
-				// needs to be added to the DRA snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim should just get a reservation for the pod added in the DRA snapshot.
-				// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
-				// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "adding LocalResourceSlices for an already tracked Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
-				},
-				// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:       []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding already tracked pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "add nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "adding unallocated pod-owned ResourceClaims is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						podOwnedClaim.DeepCopy(),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod in the added NodeInfo references the shared claim already in the DRA snapshot, and a new pod-owned allocated claim that
+			// needs to be added to the DRA snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The shared claim should just get a reservation for the pod added in the DRA snapshot.
+			// The pod-owned claim should get added to the DRA snapshot, with a reservation for the pod.
+			// LocalResourceSlices from the NodeInfo should get added to the DRA snapshot.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing a shared claim already at max reservations is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding LocalResourceSlices for an already tracked Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "adding a pod referencing its own claim without adding the claim is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices))
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				// One claim is shared, one is pod-owned.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo(node.Name)
-				},
-				// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
-				// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
-				// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						}, nil, nil, deviceClasses),
-				},
+			// LocalResourceSlices for the Node already exist in the DRA snapshot, so trying to add them again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
-				// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
-						nil, deviceClasses),
-				},
-				// Remove the NodeInfo and then add it back to the snapshot.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
-						return err
-					}
-					podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
-						drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-						drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-					})
-					return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
-				},
-				// The state should be identical to the initial one after the modifications.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding already tracked pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "removing LocalResourceSlices for a non-existing Node is an error",
-				state: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.RemoveNodeInfo("wrong-name")
-				},
-				// The removed Node isn't in the snapshot, so this should be an error.
-				wantErr: cmpopts.AnyError,
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The pod-owned claim already exists in the DRA snapshot, so trying to add it again should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The pod should get added to the Node.
-				// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding unallocated pod-owned ResourceClaims is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					podOwnedClaim.DeepCopy(),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.SchedulePod(podWithClaims, node.Name)
-				},
-				// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{node},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The added pod-owned claim isn't allocated, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(podOwnedClaim, podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references, but they aren't allocated yet.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
-					}
-					return err
-				},
-				// The pod should get added to the Node.
-				// The claims referenced by the Pod should get allocated and reserved for the Pod.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{otherNode, node, largeNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "adding pod-owned ResourceClaims allocated to the wrong Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
 			},
-			{
-				name: "scheduling pod on any Node with failing DRA predicates is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
-				// referenced by the Pod we're trying to schedule.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod-owned claim is allocated to a different Node than the one being added, so AddNodeInfo should fail.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAllocWrongNode), podWithClaims),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing a shared claim already at max reservations is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "adding a pod referencing its own claim without adding the claim is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
+			},
+			// The added pod references a pod-owned claim that isn't present in the PodInfo - this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in AddNodeInfo, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			// One claim is shared, one is pod-owned.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo(node.Name)
+			},
+			// LocalResourceSlices for the removed Node should get removed from the DRA snapshot.
+			// The pod-owned claim referenced by a pod from the removed Node should get removed from the DRA snapshot.
+			// The shared claim referenced by a pod from the removed Node should stay in the DRA snapshot, but the pod's reservation should be removed.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					}, nil, nil, deviceClasses),
+			},
+		},
+		{
+			name: "remove nodeInfo with LocalResourceSlices and NeededResourceClaims, then add it back",
+			// Start with a NodeInfo with LocalResourceSlices and pods with NeededResourceClaims in the DRA snapshot.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices},
+					nil, deviceClasses),
+			},
+			// Remove the NodeInfo and then add it back to the snapshot.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.RemoveNodeInfo(node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
-				wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(sharedClaim): sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				podInfo := framework.NewPodInfo(podWithClaims, []*resourceapi.ResourceClaim{
+					drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+					drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+				})
+				return snapshot.AddNodeInfo(framework.NewNodeInfo(node, resourceSlices, podInfo))
 			},
-			{
-				name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
-				// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
-				// that the pod references. The shared claim is already allocated and at max reservations.
-				state: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
-					if foundNodeName != "" {
-						t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
-					}
+			// The state should be identical to the initial one after the modifications.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "removing LocalResourceSlices for a non-existing Node is an error",
+			state: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.RemoveNodeInfo("wrong-name")
+			},
+			// The removed Node isn't in the snapshot, so this should be an error.
+			wantErr: cmpopts.AnyError,
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				draSnapshot: drasnapshot.NewSnapshot(nil, map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims (some of them shared and already allocated) to an existing nodeInfo",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated, the pod-owned one isn't yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the pod-owned claim in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The pod should get added to the Node.
+			// The pod-owned claim referenced by the Pod should get allocated. Both claims referenced by the Pod should get reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// SchedulePod should fail at checking scheduling predicates, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.SchedulePod(podWithClaims, node.Name)
+			},
+			// The shared claim referenced by the pod is already at the max reservation count, and no more reservations can be added - this should be an error to match scheduler behavior.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePod, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{node},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "schedule pod with NeededResourceClaims to any Node (only one Node has ResourceSlices)",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references, but they aren't allocated yet.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			// Run SchedulePod, which should allocate the claims in the DRA snapshot via the DRA scheduler plugin.
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if diff := cmp.Diff(node.Name, foundNodeName); diff != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output (-want +got): %s", diff)
+				}
+				return err
+			},
+			// The pod should get added to the Node.
+			// The claims referenced by the Pod should get allocated and reserved for the Pod.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{otherNode, node, largeNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {podWithClaims}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod on any Node with failing DRA predicates is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot doesn't track one of the claims
+			// referenced by the Pod we're trying to schedule.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim.DeepCopy(),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at checking scheduling predicates for every Node, because the DRA plugin can't find one of the claims.
+			wantErr: clustersnapshot.NewFailingPredicateError(nil, "", nil, "", ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(sharedClaim): sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "scheduling pod referencing a shared claim already at max reservations on any Node is an error",
+			// Start with a NodeInfo with LocalResourceSlices but no Pods, plus some other Nodes that don't have any slices. The DRA snapshot already tracks all the claims
+			// that the pod references. The shared claim is already allocated and at max reservations.
+			state: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim.DeepCopy(),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				foundNodeName, err := snapshot.SchedulePodOnAnyNodeMatching(podWithClaims, func(_ *framework.NodeInfo) bool { return true })
+				if foundNodeName != "" {
+					t.Errorf("SchedulePodOnAnyNodeMatching(): unexpected output: want empty string, got %q", foundNodeName)
+				}
+				return err
+			},
+			// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
+			wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
+			// The state shouldn't change on error.
+			// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
+			modifiedState: snapshotState{
+				nodes: []*apiv1.Node{otherNode, node, largeNode},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
+						drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
+			},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims and schedule it back",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
 					return err
-				},
-				// SchedulePodOnAnyNodeMatching should fail at trying to add a reservation to the shared claim for every Node.
-				wantErr: clustersnapshot.NewSchedulingInternalError(nil, ""), // Only the type of the error is asserted (via cmp.EquateErrors() and errors.Is()), so the parameters don't matter here.
-				// The state shouldn't change on error.
-				// TODO(DRA): Until transaction-like clean-up is implemented in SchedulePodOnAnyNodeMatching, the state is not cleaned up on error. Make modifiedState identical to initial state after the clean-up is implemented.
-				modifiedState: snapshotState{
-					nodes: []*apiv1.Node{otherNode, node, largeNode},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc),
-							drasnapshot.GetClaimId(sharedClaim):   fullyReservedClaim(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc)),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed, and the claims should be deallocated.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   sharedClaim,
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims and schedule it back",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim, both used only by the pod.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
+			// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods)",
-				// Start with a Pod already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
-				},
-				// The unscheduled pod should be removed from the Node.
-				// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
-				// The pod-owned claim should get deallocated, but the shared one shouldn't.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				return snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name)
 			},
-			{
-				name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
-						return err
-					}
-					return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+			// The unscheduled pod should be removed from the Node.
+			// The claims referenced by the pod should stay in the DRA snapshot, but the pod's reservations should get removed.
+			// The pod-owned claim should get deallocated, but the shared one shouldn't.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): podOwnedClaim,
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-			{
-				name: "get/list NodeInfo with DRA objects",
-				// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
-				// and pods in the cluster.
-				state: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
-				op: func(snapshot clustersnapshot.ClusterSnapshot) error {
-					nodeInfoDiffOpts := []cmp.Option{
-						// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
-						cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
-						cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
-						cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
-						cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
-						IgnoreObjectOrder[*resourceapi.ResourceClaim](),
-						IgnoreObjectOrder[*resourceapi.ResourceSlice](),
-					}
-
-					// Verify that GetNodeInfo works as expected.
-					nodeInfo, err := snapshot.GetNodeInfo(node.Name)
-					if err != nil {
-						return err
-					}
-					wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
-						framework.NewPodInfo(withNodeName(pod, node.Name), nil),
-						framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						}),
-					)
-					if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
-					}
-
-					// Verify that ListNodeInfo works as expected.
-					nodeInfos, err := snapshot.ListNodeInfos()
-					if err != nil {
-						return err
-					}
-					wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
-					if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
-						t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
-					}
-
-					return nil
-				},
-				// The state shouldn't change.
-				modifiedState: snapshotState{
-					nodes:      []*apiv1.Node{node, otherNode},
-					podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
-					draSnapshot: drasnapshot.NewSnapshot(
-						map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
-							drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
-							drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
-						},
-						map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
-				},
+		},
+		{
+			name: "unschedule Pod with NeededResourceClaims (some are shared and still used by other pods) and schedule it back",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				if err := snapshot.UnschedulePod(podWithClaims.Namespace, podWithClaims.Name, node.Name); err != nil {
+					return err
+				}
+				return snapshot.SchedulePod(withNodeName(podWithClaims, node.Name), node.Name)
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+		{
+			name: "get/list NodeInfo with DRA objects",
+			// Start with a Pod with NeededResourceClaims already scheduled on a Node. The pod references a pod-owned and a shared claim used by other pods. There are other Nodes
+			// and pods in the cluster.
+			state: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
 			},
-		}...)
+			op: func(snapshot clustersnapshot.ClusterSnapshot) error {
+				nodeInfoDiffOpts := []cmp.Option{
+					// We don't care about this field staying the same, and it differs because it's a global counter bumped on every AddPod.
+					cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"),
+					cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
+					cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
+					cmpopts.SortSlices(func(i1, i2 *framework.NodeInfo) bool { return i1.Node().Name < i2.Node().Name }),
+					IgnoreObjectOrder[*resourceapi.ResourceClaim](),
+					IgnoreObjectOrder[*resourceapi.ResourceSlice](),
+				}
+
+				// Verify that GetNodeInfo works as expected.
+				nodeInfo, err := snapshot.GetNodeInfo(node.Name)
+				if err != nil {
+					return err
+				}
+				wantNodeInfo := framework.NewNodeInfo(node, resourceSlices,
+					framework.NewPodInfo(withNodeName(pod, node.Name), nil),
+					framework.NewPodInfo(withNodeName(podWithClaims, node.Name), []*resourceapi.ResourceClaim{
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					}),
+				)
+				if diff := cmp.Diff(wantNodeInfo, nodeInfo, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("GetNodeInfo(): unexpected output (-want +got): %s", diff)
+				}
+
+				// Verify that ListNodeInfo works as expected.
+				nodeInfos, err := snapshot.ListNodeInfos()
+				if err != nil {
+					return err
+				}
+				wantNodeInfos := []*framework.NodeInfo{wantNodeInfo, framework.NewNodeInfo(otherNode, nil)}
+				if diff := cmp.Diff(wantNodeInfos, nodeInfos, nodeInfoDiffOpts...); diff != "" {
+					t.Errorf("ListNodeInfos(): unexpected output (-want +got): %s", diff)
+				}
+
+				return nil
+			},
+			// The state shouldn't change.
+			modifiedState: snapshotState{
+				nodes:      []*apiv1.Node{node, otherNode},
+				podsByNode: map[string][]*apiv1.Pod{node.Name: {withNodeName(pod, node.Name), withNodeName(podWithClaims, node.Name)}},
+				draSnapshot: drasnapshot.NewSnapshot(
+					map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{
+						drasnapshot.GetClaimId(podOwnedClaim): drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(podOwnedClaim, podOwnedClaimAlloc), podWithClaims),
+						drasnapshot.GetClaimId(sharedClaim):   drautils.TestClaimWithPodReservations(drautils.TestClaimWithAllocation(sharedClaim, sharedClaimAlloc), podWithClaims, pod),
+					},
+					map[string][]*resourceapi.ResourceSlice{node.Name: resourceSlices}, nil, deviceClasses),
+			},
+		},
+	}
+
+	for i := range testCases {
+		if testCases[i].modifiedState.draSnapshot == nil {
+			testCases[i].modifiedState.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
+
+		if testCases[i].state.draSnapshot == nil {
+			testCases[i].state.draSnapshot = drasnapshot.NewEmptySnapshot()
+		}
 	}
 
 	return testCases
 }
 
 func TestForking(t *testing.T) {
-	node := BuildTestNode("specialNode-2", 10, 100)
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
 
+	node := BuildTestNode("specialNode-2", 10, 100)
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
 			t.Run(fmt.Sprintf("%s: %s base", snapshotName, tc.name), func(t *testing.T) {
@@ -1300,7 +1305,7 @@ func TestSetClusterState(t *testing.T) {
 	nodes := clustersnapshot.CreateTestNodes(nodeCount)
 	pods := clustersnapshot.CreateTestPods(podCount)
 	podsByNode := clustersnapshot.AssignTestPodsToNodes(pods, nodes)
-	state := snapshotState{nodes: nodes, podsByNode: podsByNode}
+	state := snapshotState{nodes: nodes, podsByNode: podsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}
 
 	extraNodes := clustersnapshot.CreateTestNodesWithPrefix("extra", extraNodeCount)
 
@@ -1323,9 +1328,9 @@ func TestSetClusterState(t *testing.T) {
 				snapshot := startSnapshot(t, snapshotFactory, state)
 				compareStates(t, state, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear base %d nodes %d pods and set a new state", name, nodeCount, podCount),
 			func(t *testing.T) {
@@ -1334,9 +1339,9 @@ func TestSetClusterState(t *testing.T) {
 
 				newNodes, newPods := clustersnapshot.CreateTestNodes(13), clustersnapshot.CreateTestPods(37)
 				newPodsByNode := clustersnapshot.AssignTestPodsToNodes(newPods, newNodes)
-				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(newNodes, newPods, nil))
 
-				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: newNodes, podsByNode: newPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 			})
 		t.Run(fmt.Sprintf("%s: clear fork %d nodes %d pods %d extra nodes %d extra pods", name, nodeCount, podCount, extraNodeCount, extraPodCount),
 			func(t *testing.T) {
@@ -1355,11 +1360,11 @@ func TestSetClusterState(t *testing.T) {
 					assert.NoError(t, err)
 				}
 
-				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{nodes: allNodes, podsByNode: allPodsByNode, draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
-				assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+				assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
-				compareStates(t, snapshotState{}, getSnapshotState(t, snapshot))
+				compareStates(t, snapshotState{draSnapshot: drasnapshot.NewEmptySnapshot()}, getSnapshotState(t, snapshot))
 
 				// SetClusterState() should break out of forked state.
 				snapshot.Fork()
@@ -1768,7 +1773,7 @@ func TestPVCClearAndFork(t *testing.T) {
 			volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, true, volumeExists)
 
-			assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+			assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 			volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1"))
 			assert.Equal(t, false, volumeExists)
 
@@ -1777,6 +1782,14 @@ func TestPVCClearAndFork(t *testing.T) {
 }
 
 func TestWithForkedSnapshot(t *testing.T) {
+	// Uncomment to get logs from the DRA plugin.
+	// var fs flag.FlagSet
+	// klog.InitFlags(&fs)
+	//if err := fs.Set("v", "10"); err != nil {
+	//	t.Fatalf("Error while setting higher klog verbosity: %v", err)
+	//}
+	featuretesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
+
 	err := fmt.Errorf("some error")
 	for snapshotName, snapshotFactory := range snapshots {
 		for _, tc := range validTestCases(t, snapshotName) {
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
index 8c62b720685b67c3bbe3c685b940a7ec4f638372..61abc684b79e4f1fabf6b6d1b6e86d9e6462fd4b 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/basic.go
@@ -29,13 +29,13 @@ import (
 // BasicSnapshotStore is simple, reference implementation of ClusterSnapshotStore.
 // It is inefficient. But hopefully bug-free and good for initial testing.
 type BasicSnapshotStore struct {
-	data []*internalBasicSnapshotData
+	data        []*internalBasicSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 }
 
 type internalBasicSnapshotData struct {
 	nodeInfoMap        map[string]*schedulerframework.NodeInfo
 	pvcNamespacePodMap map[string]map[string]bool
-	draSnapshot        drasnapshot.Snapshot
 }
 
 func (data *internalBasicSnapshotData) listNodeInfos() []*schedulerframework.NodeInfo {
@@ -142,7 +142,6 @@ func (data *internalBasicSnapshotData) clone() *internalBasicSnapshotData {
 	return &internalBasicSnapshotData{
 		nodeInfoMap:        clonedNodeInfoMap,
 		pvcNamespacePodMap: clonedPvcNamespaceNodeMap,
-		draSnapshot:        data.draSnapshot.Clone(),
 	}
 }
 
@@ -208,8 +207,8 @@ func (snapshot *BasicSnapshotStore) getInternalData() *internalBasicSnapshotData
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *BasicSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	return snapshot.getInternalData().draSnapshot
+func (snapshot *BasicSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -226,7 +225,7 @@ func (snapshot *BasicSnapshotStore) AddSchedulerNodeInfo(nodeInfo *schedulerfram
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	knownNodes := make(map[string]bool)
@@ -243,7 +242,13 @@ func (snapshot *BasicSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 			}
 		}
 	}
-	snapshot.getInternalData().draSnapshot = draSnapshot
+
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -271,6 +276,7 @@ func (snapshot *BasicSnapshotStore) IsPVCUsedByPods(key string) bool {
 func (snapshot *BasicSnapshotStore) Fork() {
 	forkData := snapshot.getInternalData().clone()
 	snapshot.data = append(snapshot.data, forkData)
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -279,6 +285,7 @@ func (snapshot *BasicSnapshotStore) Revert() {
 		return
 	}
 	snapshot.data = snapshot.data[:len(snapshot.data)-1]
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -288,6 +295,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 		return nil
 	}
 	snapshot.data = append(snapshot.data[:len(snapshot.data)-2], snapshot.data[len(snapshot.data)-1])
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -295,6 +303,7 @@ func (snapshot *BasicSnapshotStore) Commit() error {
 func (snapshot *BasicSnapshotStore) clear() {
 	baseData := newInternalBasicSnapshotData()
 	snapshot.data = []*internalBasicSnapshotData{baseData}
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
 
 // implementation of SharedLister interface
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
index 67c1bc67fe3b524723aa779732516688a3cc9914..6d0d5c7e76182357b80d4489c5539e864890c0e3 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta.go
@@ -41,12 +41,18 @@ import (
 //
 // Watch out for:
 //
-//	node deletions, pod additions & deletions - invalidates cache of current snapshot
-//		(when forked affects delta, but not base.)
-//	pod affinity - causes scheduler framework to list pods with non-empty selector,
-//		so basic caching doesn't help.
+// * Node deletions, pod additions & deletions - invalidates cache of current snapshot
+// (when forked affects delta, but not base.)
+//
+// * Pod affinity - causes scheduler framework to list pods with non-empty selector,
+// so basic caching doesn't help.
+//
+// * DRA objects are tracked in the separate snapshot and while they don't exactly share
+// memory and time complexities of DeltaSnapshotStore - they are optimized for
+// cluster autoscaler operations
 type DeltaSnapshotStore struct {
 	data        *internalDeltaSnapshotData
+	draSnapshot *drasnapshot.Snapshot
 	parallelism int
 }
 
@@ -321,6 +327,7 @@ func (data *internalDeltaSnapshotData) commit() (*internalDeltaSnapshotData, err
 			return nil, err
 		}
 	}
+
 	return data.baseData, nil
 }
 
@@ -419,9 +426,8 @@ func NewDeltaSnapshotStore(parallelism int) *DeltaSnapshotStore {
 }
 
 // DraSnapshot returns the DRA snapshot.
-func (snapshot *DeltaSnapshotStore) DraSnapshot() drasnapshot.Snapshot {
-	// TODO(DRA): Return DRA snapshot.
-	return drasnapshot.Snapshot{}
+func (snapshot *DeltaSnapshotStore) DraSnapshot() *drasnapshot.Snapshot {
+	return snapshot.draSnapshot
 }
 
 // AddSchedulerNodeInfo adds a NodeInfo.
@@ -469,7 +475,7 @@ func (snapshot *DeltaSnapshotStore) setClusterStatePodsParallelized(nodeInfos []
 }
 
 // SetClusterState sets the cluster state.
-func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot drasnapshot.Snapshot) error {
+func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod, draSnapshot *drasnapshot.Snapshot) error {
 	snapshot.clear()
 
 	nodeNameToIdx := make(map[string]int, len(nodes))
@@ -494,7 +500,12 @@ func (snapshot *DeltaSnapshotStore) SetClusterState(nodes []*apiv1.Node, schedul
 	// Clear caches after adding pods.
 	snapshot.data.clearCaches()
 
-	// TODO(DRA): Save DRA snapshot.
+	if draSnapshot == nil {
+		snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
+	} else {
+		snapshot.draSnapshot = draSnapshot
+	}
+
 	return nil
 }
 
@@ -522,6 +533,7 @@ func (snapshot *DeltaSnapshotStore) IsPVCUsedByPods(key string) bool {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) Fork() {
 	snapshot.data = snapshot.data.fork()
+	snapshot.draSnapshot.Fork()
 }
 
 // Revert reverts snapshot state to moment of forking.
@@ -530,6 +542,7 @@ func (snapshot *DeltaSnapshotStore) Revert() {
 	if snapshot.data.baseData != nil {
 		snapshot.data = snapshot.data.baseData
 	}
+	snapshot.draSnapshot.Revert()
 }
 
 // Commit commits changes done after forking.
@@ -540,6 +553,7 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 		return err
 	}
 	snapshot.data = newData
+	snapshot.draSnapshot.Commit()
 	return nil
 }
 
@@ -547,4 +561,5 @@ func (snapshot *DeltaSnapshotStore) Commit() error {
 // Time: O(1)
 func (snapshot *DeltaSnapshotStore) clear() {
 	snapshot.data = newInternalDeltaSnapshotData()
+	snapshot.draSnapshot = drasnapshot.NewEmptySnapshot()
 }
diff --git a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
index 5f618befd180c3f33958b68be468f81d33faf38e..0d426b7e39b11eb3c332c2822640d9a97cdd7771 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/store/delta_benchmark_test.go
@@ -23,7 +23,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -49,7 +48,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("fork add 1000 to %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount + 1000)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes[:tc.nodeCount], nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			deltaStore.Fork()
@@ -71,7 +70,7 @@ func BenchmarkBuildNodeInfoList(b *testing.B) {
 		b.Run(fmt.Sprintf("base %d", tc.nodeCount), func(b *testing.B) {
 			nodes := clustersnapshot.CreateTestNodes(tc.nodeCount)
 			deltaStore := NewDeltaSnapshotStore(16)
-			if err := deltaStore.SetClusterState(nodes, nil, drasnapshot.Snapshot{}); err != nil {
+			if err := deltaStore.SetClusterState(nodes, nil, nil); err != nil {
 				assert.NoError(b, err)
 			}
 			b.ResetTimer()
diff --git a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
index b98c9e117e1f034909b42e808443f34d572fb9e4..d95a3e6437c405c2786c0054070817b3193f28fe 100644
--- a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
+++ b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go
@@ -25,7 +25,6 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	apiv1 "k8s.io/api/core/v1"
-	drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	"k8s.io/autoscaler/cluster-autoscaler/utils/test"
 )
@@ -39,7 +38,7 @@ func InitializeClusterSnapshotOrDie(
 	pods []*apiv1.Pod) {
 	var err error
 
-	assert.NoError(t, snapshot.SetClusterState(nil, nil, drasnapshot.Snapshot{}))
+	assert.NoError(t, snapshot.SetClusterState(nil, nil, nil))
 
 	for _, node := range nodes {
 		err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node))
diff --git a/cluster-autoscaler/simulator/common/patch.go b/cluster-autoscaler/simulator/common/patch.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfd3923264a63101c7f9ebdc8c7eda7e7ed6953c
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patch.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// Patch represents a single layer of modifications (additions/updates)
+// and deletions for a set of key-value pairs. It's used as a building
+// block for the patchSet.
+type Patch[K comparable, V any] struct {
+	modified map[K]V
+	deleted  map[K]bool
+}
+
+// Set marks a key-value pair as modified in the current patch.
+// If the key was previously marked as deleted, the deletion mark is removed.
+func (p *Patch[K, V]) Set(key K, value V) {
+	p.modified[key] = value
+	delete(p.deleted, key)
+}
+
+// Delete marks a key as deleted in the current patch.
+// If the key was previously marked as modified, the modification is removed.
+func (p *Patch[K, V]) Delete(key K) {
+	delete(p.modified, key)
+	p.deleted[key] = true
+}
+
+// Get retrieves the modified value for a key within this specific patch.
+// It returns the value and true if the key was found in this patch
+// or zero value and false otherwise.
+func (p *Patch[K, V]) Get(key K) (value V, found bool) {
+	value, found = p.modified[key]
+	return value, found
+}
+
+// IsDeleted checks if the key is marked as deleted within this specific patch.
+func (p *Patch[K, V]) IsDeleted(key K) bool {
+	return p.deleted[key]
+}
+
+// NewPatch creates a new, empty patch with no modifications or deletions.
+func NewPatch[K comparable, V any]() *Patch[K, V] {
+	return &Patch[K, V]{
+		modified: make(map[K]V),
+		deleted:  make(map[K]bool),
+	}
+}
+
+// NewPatchFromMap creates a new patch initialized with the data from the provided map
+// the data supplied is recorded as modified in the patch.
+func NewPatchFromMap[M ~map[K]V, K comparable, V any](source M) *Patch[K, V] {
+	if source == nil {
+		source = make(M)
+	}
+
+	return &Patch[K, V]{
+		modified: source,
+		deleted:  make(map[K]bool),
+	}
+}
+
+// mergePatchesInPlace merges two patches into one, while modifying patch a
+// inplace taking records in b as a priority when overwrites are required
+func mergePatchesInPlace[K comparable, V any](a *Patch[K, V], b *Patch[K, V]) *Patch[K, V] {
+	for key, value := range b.modified {
+		a.Set(key, value)
+	}
+
+	for key := range b.deleted {
+		a.Delete(key)
+	}
+
+	return a
+}
diff --git a/cluster-autoscaler/simulator/common/patch_test.go b/cluster-autoscaler/simulator/common/patch_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..30a6d778a5c8102423b77c2bfd9aeb88d7de8bd8
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patch_test.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"maps"
+	"testing"
+)
+
+func TestPatchOperations(t *testing.T) {
+	p := NewPatch[string, int]()
+
+	// 1. Get and IsDeleted on non-existent key
+	if _, found := p.Get("a"); found {
+		t.Errorf("Get for 'a' should not find anything")
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted for 'a' should be false")
+	}
+
+	// 2. Set 'a' key
+	p.Set("a", 1)
+	if val, found := p.Get("a"); !found || val != 1 {
+		t.Errorf("Get('a') = %v,%v, expected 1,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 3. Overwrite 'a' key
+	p.Set("a", 2)
+	if val, found := p.Get("a"); !found || val != 2 {
+		t.Errorf("Get('a') = %v,%v, expected 2,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 4. Delete 'a' key
+	p.Delete("a")
+	if val, found := p.Get("a"); found {
+		t.Errorf("Get('a') = %v,%v, should not find anything after delete", val, found)
+	}
+	if !p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = false, should be true")
+	}
+
+	// 5. Set 'a' key again after deletion
+	p.Set("a", 3)
+	if val, found := p.Get("a"); !found || val != 3 {
+		t.Errorf("Get('a') = %v,%v, expected 3,true", val, found)
+	}
+	if p.IsDeleted("a") {
+		t.Errorf("IsDeleted('a') = true, should be false")
+	}
+
+	// 6. Delete a non-existent key 'c'
+	p.Delete("c")
+	if val, found := p.Get("c"); found {
+		t.Errorf("Get('c') = %v, %v, should not find anything", val, found)
+	}
+	if !p.IsDeleted("c") {
+		t.Errorf("IsDeleted('c') = false, should be true")
+	}
+}
+
+func TestCreatePatch(t *testing.T) {
+	tests := map[string]struct {
+		sourceMap    map[string]int
+		addKeys      map[string]int
+		deleteKeys   []string
+		wantModified map[string]int
+		wantDeleted  map[string]bool
+	}{
+		"SourceMapOnlyNoModifications": {
+			sourceMap:    map[string]int{"k1": 1, "k2": 2},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{"k1": 1, "k2": 2},
+			wantDeleted:  map[string]bool{},
+		},
+		"NilSourceMapAddAndDelete": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{"a": 1, "b": 2},
+			deleteKeys:   []string{"b"},
+			wantModified: map[string]int{"a": 1},
+			wantDeleted:  map[string]bool{"b": true},
+		},
+		"EmptySourceMapAddAndDelete": {
+			sourceMap:    map[string]int{},
+			addKeys:      map[string]int{"x": 10},
+			deleteKeys:   []string{"y"},
+			wantModified: map[string]int{"x": 10},
+			wantDeleted:  map[string]bool{"y": true},
+		},
+		"NonEmptySourceMapAddOverwriteDelete": {
+			sourceMap:    map[string]int{"orig1": 100, "orig2": 200},
+			addKeys:      map[string]int{"new1": 300, "orig1": 101},
+			deleteKeys:   []string{"orig2", "new1"},
+			wantModified: map[string]int{"orig1": 101},
+			wantDeleted:  map[string]bool{"orig2": true, "new1": true},
+		},
+		"DeleteKeyFromSourceMap": {
+			sourceMap:    map[string]int{"key_to_delete": 70, "key_to_keep": 80},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{"key_to_delete"},
+			wantModified: map[string]int{"key_to_keep": 80},
+			wantDeleted:  map[string]bool{"key_to_delete": true},
+		},
+		"AddOnlyNoSourceMap": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{"add1": 10, "add2": 20},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{"add1": 10, "add2": 20},
+			wantDeleted:  map[string]bool{},
+		},
+		"DeleteOnlyNoSourceMap": {
+			sourceMap:    nil,
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{"del1", "del2"},
+			wantModified: map[string]int{},
+			wantDeleted:  map[string]bool{"del1": true, "del2": true},
+		},
+		"DeleteKeyNotPresentInSourceOrAdded": {
+			sourceMap:    map[string]int{"a": 1},
+			addKeys:      map[string]int{"b": 2},
+			deleteKeys:   []string{"c"},
+			wantModified: map[string]int{"a": 1, "b": 2},
+			wantDeleted:  map[string]bool{"c": true},
+		},
+		"AddKeyThenDeleteIt": {
+			sourceMap:    map[string]int{"base": 100},
+			addKeys:      map[string]int{"temp": 50},
+			deleteKeys:   []string{"temp"},
+			wantModified: map[string]int{"base": 100},
+			wantDeleted:  map[string]bool{"temp": true},
+		},
+		"AllOperations": {
+			sourceMap:    map[string]int{"s1": 1, "s2": 2, "s3": 3},
+			addKeys:      map[string]int{"a1": 10, "s2": 22, "a2": 20},
+			deleteKeys:   []string{"s1", "a1", "nonexistent"},
+			wantModified: map[string]int{"s2": 22, "s3": 3, "a2": 20},
+			wantDeleted:  map[string]bool{"s1": true, "a1": true, "nonexistent": true},
+		},
+		"NoOperationsEmptySource": {
+			sourceMap:    map[string]int{},
+			addKeys:      map[string]int{},
+			deleteKeys:   []string{},
+			wantModified: map[string]int{},
+			wantDeleted:  map[string]bool{},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			p := NewPatchFromMap(test.sourceMap)
+
+			for k, v := range test.addKeys {
+				p.Set(k, v)
+			}
+			for _, k := range test.deleteKeys {
+				p.Delete(k)
+			}
+
+			if !maps.Equal(p.modified, test.wantModified) {
+				t.Errorf("Modified map mismatch: got %v, want %v", p.modified, test.wantModified)
+			}
+
+			if !maps.Equal(p.deleted, test.wantDeleted) {
+				t.Errorf("Deleted map mismatch: got %v, want %v", p.deleted, test.wantDeleted)
+			}
+		})
+	}
+}
+
+func TestMergePatchesInPlace(t *testing.T) {
+	tests := map[string]struct {
+		modifiedPatchA     map[string]int
+		deletedPatchA      map[string]bool
+		modifiedPatchB     map[string]int
+		deletedPatchB      map[string]bool
+		wantModifiedMerged map[string]int
+		wantDeletedMerged  map[string]bool
+	}{
+		"PatchBOverwritesValueInPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1, "b": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"b": 22, "c": 3},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a": 1, "b": 22, "c": 3},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"PatchBDeletesValuePresentInPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1, "b": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"a": true},
+			wantModifiedMerged: map[string]int{"b": 2},
+			wantDeletedMerged:  map[string]bool{"a": true},
+		},
+		"PatchBDeletesValueAlreadyDeletedInPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"x": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"x": true, "y": true},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{"x": true, "y": true},
+		},
+		"PatchBAddsValuePreviouslyDeletedInPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"x": true},
+			modifiedPatchB:     map[string]int{"x": 10},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"x": 10},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"MergeEmptyPatchBIntoNonEmptyPatchA": {
+			modifiedPatchA:     map[string]int{"a": 1},
+			deletedPatchA:      map[string]bool{"b": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a": 1},
+			wantDeletedMerged:  map[string]bool{"b": true},
+		},
+		"MergeNonEmptyPatchBIntoEmptyPatchA": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"a": 1},
+			deletedPatchB:      map[string]bool{"b": true},
+			wantModifiedMerged: map[string]int{"a": 1},
+			wantDeletedMerged:  map[string]bool{"b": true},
+		},
+		"MergeTwoEmptyPatches": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"NoOverlapBetweenPatchAAndPatchBModifications": {
+			modifiedPatchA:     map[string]int{"a1": 1, "a2": 2},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{"b1": 10, "b2": 20},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"a1": 1, "a2": 2, "b1": 10, "b2": 20},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"NoOverlapBetweenPatchAAndPatchBDeletions": {
+			modifiedPatchA:     map[string]int{},
+			deletedPatchA:      map[string]bool{"a1": true, "a2": true},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"b1": true, "b2": true},
+			wantModifiedMerged: map[string]int{},
+			wantDeletedMerged:  map[string]bool{"a1": true, "a2": true, "b1": true, "b2": true},
+		},
+		"PatchBOnlyAddsNewKeysPatchAUnchanged": {
+			modifiedPatchA:     map[string]int{"orig": 5},
+			modifiedPatchB:     map[string]int{"new1": 100, "new2": 200},
+			deletedPatchA:      map[string]bool{},
+			deletedPatchB:      map[string]bool{},
+			wantModifiedMerged: map[string]int{"orig": 5, "new1": 100, "new2": 200},
+			wantDeletedMerged:  map[string]bool{},
+		},
+		"PatchBOnlyDeletesNewKeysPatchAUnchanged": {
+			modifiedPatchA:     map[string]int{"orig": 5},
+			deletedPatchA:      map[string]bool{},
+			modifiedPatchB:     map[string]int{},
+			deletedPatchB:      map[string]bool{"del1": true, "del2": true},
+			wantModifiedMerged: map[string]int{"orig": 5},
+			wantDeletedMerged:  map[string]bool{"del1": true, "del2": true},
+		},
+		"AllInOne": {
+			modifiedPatchA:     map[string]int{"k1": 1, "k2": 2, "k3": 3},
+			deletedPatchA:      map[string]bool{"d1": true, "d2": true},
+			modifiedPatchB:     map[string]int{"k2": 22, "k4": 4, "d1": 11},
+			deletedPatchB:      map[string]bool{"k3": true, "d2": true, "d3": true},
+			wantModifiedMerged: map[string]int{"k1": 1, "k2": 22, "k4": 4, "d1": 11},
+			wantDeletedMerged:  map[string]bool{"k3": true, "d2": true, "d3": true},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			patchA := NewPatchFromMap(test.modifiedPatchA)
+			for k := range test.deletedPatchA {
+				patchA.Delete(k)
+			}
+
+			patchB := NewPatchFromMap(test.modifiedPatchB)
+			for k := range test.deletedPatchB {
+				patchB.Delete(k)
+			}
+
+			merged := mergePatchesInPlace(patchA, patchB)
+
+			if merged != patchA {
+				t.Errorf("mergePatchesInPlace did not modify patchA inplace, references are different")
+			}
+
+			if !maps.Equal(merged.modified, test.wantModifiedMerged) {
+				t.Errorf("Modified map mismatch: got %v, want %v", merged.modified, test.wantModifiedMerged)
+			}
+
+			if !maps.Equal(merged.deleted, test.wantDeletedMerged) {
+				t.Errorf("Deleted map mismatch: got %v, want %v", merged.deleted, test.wantDeletedMerged)
+			}
+		})
+	}
+}
diff --git a/cluster-autoscaler/simulator/common/patchset.go b/cluster-autoscaler/simulator/common/patchset.go
new file mode 100644
index 0000000000000000000000000000000000000000..c30ad06e93eeec60e5504c4b83fd27fcd2dfc569
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patchset.go
@@ -0,0 +1,265 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// PatchSet manages a stack of patches, allowing for fork/revert/commit operations.
+// It provides a view of the data as if all patches were applied sequentially.
+//
+// Time Complexities:
+//   - Fork(): O(1).
+//   - Commit(): O(P), where P is the number of modified/deleted entries
+//     in the current patch or no-op for PatchSet with a single patch.
+//   - Revert(): O(P), where P is the number of modified/deleted entries
+//     in the topmost patch or no-op for PatchSet with a single patch.
+//   - FindValue(key): O(1) for cached keys, O(N * P) - otherwise.
+//   - AsMap(): O(N * P) as it needs to iterate through all the layers
+//     modifications and deletions to get the latest state. Best case -
+//     if the cache is currently in sync with the PatchSet data complexity becomes
+//     O(C), where C is the actual number key/value pairs in flattened PatchSet.
+//   - SetCurrent(key, value): O(1).
+//   - DeleteCurrent(key): O(1).
+//   - InCurrentPatch(key): O(1).
+//
+// Variables used in complexity analysis:
+//   - N: The number of patch layers in the PatchSet.
+//   - P: The number of modified/deleted entries in a single patch layer
+//
+// Caching:
+//
+// The PatchSet employs a lazy caching mechanism to speed up access to data. When a specific item is requested,
+// the cache is checked first. If the item isn't cached, its effective value is computed by traversing the layers
+// of patches from the most recent to the oldest, and the result is then stored in the cache. For operations that
+// require the entire dataset like AsMap, if a cache is fully up-to-date, the data is served directly from the cache.
+// Otherwise, the entire dataset is rebuilt by applying all patches, and this process fully populates the cache.
+// Direct modifications to the current patch update the specific item in the cache immediately. Reverting the latest
+// patch will clear affected items from the cache and mark the cache as potentially out-of-sync, as underlying values may
+// no longer represent the PatchSet as a whole. Committing and forking does not invalidate the cache, as the effective
+// values remain consistent from the perspective of read operations.
+type PatchSet[K comparable, V any] struct {
+	// patches is a stack of individual modification layers. The base data is
+	// at index 0, and subsequent modifications are layered on top.
+	// PatchSet should always contain at least a single patch.
+	patches []*Patch[K, V]
+
+	// cache stores the computed effective value for keys that have been accessed.
+	// A nil pointer indicates the key is effectively deleted or not present.
+	cache map[K]*V
+
+	// cacheInSync indicates whether the cache map accurately reflects the
+	// current state derived from applying all patches in the 'patches' slice.
+	// When false, the cache may be stale and needs to be rebuilt or validated
+	// before being fully trusted for all keys.
+	cacheInSync bool
+}
+
+// NewPatchSet creates a new PatchSet, initializing it with the provided base patches.
+func NewPatchSet[K comparable, V any](patches ...*Patch[K, V]) *PatchSet[K, V] {
+	return &PatchSet[K, V]{
+		patches:     patches,
+		cache:       make(map[K]*V),
+		cacheInSync: false,
+	}
+}
+
+// Fork adds a new, empty patch layer to the top of the stack.
+// Subsequent modifications will be recorded in this new layer.
+func (p *PatchSet[K, V]) Fork() {
+	p.patches = append(p.patches, NewPatch[K, V]())
+}
+
+// Commit merges the topmost patch layer into the one below it.
+// If there's only one layer (or none), it's a no-op.
+func (p *PatchSet[K, V]) Commit() {
+	if len(p.patches) < 2 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	previousPatch := p.patches[len(p.patches)-2]
+	mergePatchesInPlace(previousPatch, currentPatch)
+	p.patches = p.patches[:len(p.patches)-1]
+}
+
+// Revert removes the topmost patch layer.
+// Any modifications or deletions recorded in that layer are discarded.
+func (p *PatchSet[K, V]) Revert() {
+	if len(p.patches) <= 1 {
+		return
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	p.patches = p.patches[:len(p.patches)-1]
+
+	for key := range currentPatch.modified {
+		delete(p.cache, key)
+	}
+
+	for key := range currentPatch.deleted {
+		delete(p.cache, key)
+	}
+
+	p.cacheInSync = false
+}
+
+// FindValue searches for the effective value of a key by looking through the patches
+// from top to bottom. It returns the value and true if found, or the zero value and false
+// if the key is deleted or not found in any patch.
+func (p *PatchSet[K, V]) FindValue(key K) (value V, found bool) {
+	var zero V
+
+	if cachedValue, cacheHit := p.cache[key]; cacheHit {
+		if cachedValue == nil {
+			return zero, false
+		}
+
+		return *cachedValue, true
+	}
+
+	value = zero
+	found = false
+	for i := len(p.patches) - 1; i >= 0; i-- {
+		patch := p.patches[i]
+		if patch.IsDeleted(key) {
+			break
+		}
+
+		foundValue, ok := patch.Get(key)
+		if ok {
+			value = foundValue
+			found = true
+			break
+		}
+	}
+
+	if found {
+		p.cache[key] = &value
+	} else {
+		p.cache[key] = nil
+	}
+
+	return value, found
+}
+
+// AsMap merges all patches into a single map representing the current effective state.
+// It iterates through all patches from bottom to top, applying modifications and deletions.
+// The cache is populated with the results during this process.
+func (p *PatchSet[K, V]) AsMap() map[K]V {
+	if p.cacheInSync {
+		patchSetMap := make(map[K]V, len(p.cache))
+		for key, value := range p.cache {
+			if value != nil {
+				patchSetMap[key] = *value
+			}
+		}
+		return patchSetMap
+	}
+
+	keysCount := p.totalKeyCount()
+	patchSetMap := make(map[K]V, keysCount)
+
+	for _, patch := range p.patches {
+		for key, value := range patch.modified {
+			patchSetMap[key] = value
+		}
+
+		for key := range patch.deleted {
+			delete(patchSetMap, key)
+		}
+	}
+
+	for key, value := range patchSetMap {
+		p.cache[key] = &value
+	}
+	p.cacheInSync = true
+
+	return patchSetMap
+}
+
+// SetCurrent adds or updates a key-value pair in the topmost patch layer.
+func (p *PatchSet[K, V]) SetCurrent(key K, value V) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Set(key, value)
+	p.cache[key] = &value
+}
+
+// DeleteCurrent marks a key as deleted in the topmost patch layer.
+func (p *PatchSet[K, V]) DeleteCurrent(key K) {
+	if len(p.patches) == 0 {
+		p.Fork()
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	currentPatch.Delete(key)
+	p.cache[key] = nil
+}
+
+// InCurrentPatch checks if the key is available in the topmost patch layer.
+func (p *PatchSet[K, V]) InCurrentPatch(key K) bool {
+	if len(p.patches) == 0 {
+		return false
+	}
+
+	currentPatch := p.patches[len(p.patches)-1]
+	_, found := currentPatch.Get(key)
+	return found
+}
+
+// totalKeyCount calculates an approximate total number of key-value
+// pairs across all patches taking the highest number of records possible
+// this calculation does not consider deleted records - thus it is likely
+// to be not accurate
+func (p *PatchSet[K, V]) totalKeyCount() int {
+	count := 0
+	for _, patch := range p.patches {
+		count += len(patch.modified)
+	}
+
+	return count
+}
+
+// ClonePatchSet creates a deep copy of a PatchSet object with the same patch layers
+// structure, while copying keys and values using cloneKey and cloneValue functions
+// provided.
+//
+// This function is intended for testing purposes only.
+func ClonePatchSet[K comparable, V any](ps *PatchSet[K, V], cloneKey func(K) K, cloneValue func(V) V) *PatchSet[K, V] {
+	if ps == nil {
+		return nil
+	}
+
+	cloned := NewPatchSet[K, V]()
+	for _, patch := range ps.patches {
+		clonedPatch := NewPatch[K, V]()
+		for key, value := range patch.modified {
+			clonedKey, clonedValue := cloneKey(key), cloneValue(value)
+			clonedPatch.Set(clonedKey, clonedValue)
+		}
+
+		for key := range patch.deleted {
+			clonedKey := cloneKey(key)
+			clonedPatch.Delete(clonedKey)
+		}
+
+		cloned.patches = append(cloned.patches, clonedPatch)
+	}
+
+	return cloned
+}
diff --git a/cluster-autoscaler/simulator/common/patchset_test.go b/cluster-autoscaler/simulator/common/patchset_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6571321bb08bac6c86eecc519463bfb55b35ccc
--- /dev/null
+++ b/cluster-autoscaler/simulator/common/patchset_test.go
@@ -0,0 +1,708 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"maps"
+	"testing"
+
+	"k8s.io/utils/ptr"
+)
+
+func TestPatchSetAsMap(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		wantMap     map[string]int
+	}{
+		"EmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			wantMap:     map[string]int{},
+		},
+		"SingleLayerNoDeletions": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 1, "b": 2},
+		},
+		"SingleLayerOnlyDeletions": {
+			patchLayers: []map[string]*int{{"a": nil, "b": nil}},
+			wantMap:     map[string]int{},
+		},
+		"SingleLayerModificationsAndDeletions": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": nil, "c": ptr.To(3)}},
+			wantMap: map[string]int{"a": 1, "c": 3},
+		},
+		"MultipleLayersNoDeletionsOverwrite": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2)},
+				{"b": ptr.To(22), "c": ptr.To(3)}},
+			wantMap: map[string]int{"a": 1, "b": 22, "c": 3},
+		},
+		"MultipleLayersWithDeletions": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": nil, "x": ptr.To(100)},
+				{"c": ptr.To(3), "x": ptr.To(101), "a": nil}},
+			wantMap: map[string]int{"c": 3, "x": 101},
+		},
+		"MultipleLayersDeletionInLowerLayer": {
+			patchLayers: []map[string]*int{
+				{"a": nil, "b": ptr.To(2)},
+				{"c": ptr.To(3)}},
+			wantMap: map[string]int{"b": 2, "c": 3},
+		},
+		"MultipleLayersAddAfterDelete": {
+			patchLayers: []map[string]*int{
+				{"a": nil},
+				{"a": ptr.To(11), "b": ptr.To(2)}},
+			wantMap: map[string]int{"a": 11, "b": 2},
+		},
+		"MultipleLayersDeletePreviouslyAdded": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2)},
+				{"c": ptr.To(3), "b": nil}},
+			wantMap: map[string]int{"a": 1, "c": 3},
+		},
+		"AllInOne": {
+			patchLayers: []map[string]*int{
+				{"k1": ptr.To(1), "k2": ptr.To(2)},
+				{"k2": ptr.To(22), "k3": ptr.To(3), "k1": nil},
+				{"k1": ptr.To(111), "k3": ptr.To(33), "k4": ptr.To(4), "deleted": nil},
+			},
+			wantMap: map[string]int{"k2": 22, "k3": 33, "k4": 4, "k1": 111},
+		},
+	}
+
+	for testName, test := range tests {
+		t.Run(testName, func(t *testing.T) {
+			patchset := buildTestPatchSet(t, test.patchLayers)
+			mergedMap := patchset.AsMap()
+			if !maps.Equal(mergedMap, test.wantMap) {
+				t.Errorf("AsMap() result mismatch: got %v, want %v", mergedMap, test.wantMap)
+			}
+		})
+	}
+}
+
+func TestPatchSetCommit(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		wantMap     map[string]int // Expected map after each commit
+	}{
+		"CommitEmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			wantMap:     map[string]int{},
+		},
+		"CommitSingleLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			wantMap:     map[string]int{"a": 1},
+		},
+		"CommitTwoLayersNoOverlap": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 1, "b": 2},
+		},
+		"CommitTwoLayersWithOverwrite": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(2)}},
+			wantMap:     map[string]int{"a": 2},
+		},
+		"CommitTwoLayersWithDeleteInTopLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(5)}, {"c": ptr.To(3), "a": nil}},
+			wantMap:     map[string]int{"b": 5, "c": 3},
+		},
+		"CommitTwoLayersWithDeleteInBottomLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": nil}, {"a": ptr.To(11), "c": ptr.To(3)}},
+			wantMap:     map[string]int{"a": 11, "c": 3},
+		},
+		"CommitThreeLayers": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2), "z": ptr.To(100)},
+				{"b": ptr.To(22), "c": ptr.To(3), "a": nil},
+				{"c": ptr.To(33), "d": ptr.To(4), "a": ptr.To(111), "b": nil, "deleted": nil},
+			},
+			wantMap: map[string]int{"z": 100, "c": 33, "d": 4, "a": 111},
+		},
+		"CommitMultipleLayersDeleteAndAddBack": {
+			patchLayers: []map[string]*int{
+				{"x": ptr.To(10)},
+				{"x": nil},
+				{"x": ptr.To(30)},
+				{"y": ptr.To(40), "x": nil}},
+			wantMap: map[string]int{"y": 40},
+		},
+		"CommitEmptyLayersBetweenDataLayers": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {}, {"b": ptr.To(2)}, {}, {"c": ptr.To(3)}},
+			wantMap:     map[string]int{"a": 1, "b": 2, "c": 3},
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			initialNumPatches := len(ps.patches)
+
+			if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantMap) {
+				t.Errorf("AsMap() before any commits mismatch: got %v, want %v", currentMap, tc.wantMap)
+			}
+
+			for i := 0; i < initialNumPatches-1; i++ {
+				expectedPatchesAfterCommit := len(ps.patches) - 1
+				ps.Commit()
+				if len(ps.patches) != expectedPatchesAfterCommit {
+					t.Errorf("After commit #%d, expected %d patches, got %d", i+1, expectedPatchesAfterCommit, len(ps.patches))
+				}
+				if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantMap) {
+					t.Errorf("AsMap() after commit #%d mismatch: got %v, want %v", i+1, currentMap, tc.wantMap)
+				}
+			}
+
+			if initialNumPatches > 0 && len(ps.patches) != 1 {
+				t.Errorf("Expected 1 patch after all commits, got %d", len(ps.patches))
+			} else if initialNumPatches == 0 && len(ps.patches) != 0 {
+				t.Errorf("Expected 0 patches after all commits, got %d", len(ps.patches))
+			}
+		})
+	}
+}
+
+func TestPatchSetRevert(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers         []map[string]*int
+		wantInitialMap      map[string]int
+		wantMapsAfterRevert []map[string]int
+	}{
+		"RevertEmptyPatchSet": {
+			patchLayers:         []map[string]*int{},
+			wantInitialMap:      map[string]int{},
+			wantMapsAfterRevert: []map[string]int{{}},
+		},
+		"RevertSingleLayer": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}},
+			wantInitialMap:      map[string]int{"a": 1},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}},
+		},
+		"RevertTwoLayersNoOverlap": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			wantInitialMap:      map[string]int{"a": 1, "b": 2},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}, {"a": 1}},
+		},
+		"RevertTwoLayersWithOverwrite": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(2)}},
+			wantInitialMap:      map[string]int{"a": 2},
+			wantMapsAfterRevert: []map[string]int{{"a": 1}, {"a": 1}},
+		},
+		"RevertTwoLayersWithDeleteInTopLayer": {
+			patchLayers:         []map[string]*int{{"a": ptr.To(1), "b": ptr.To(5)}, {"c": ptr.To(3), "a": nil}},
+			wantInitialMap:      map[string]int{"b": 5, "c": 3},
+			wantMapsAfterRevert: []map[string]int{{"a": 1, "b": 5}, {"a": 1, "b": 5}},
+		},
+		"RevertThreeLayers": {
+			patchLayers: []map[string]*int{
+				{"a": ptr.To(1), "b": ptr.To(2), "z": ptr.To(100)},
+				{"b": ptr.To(22), "c": ptr.To(3), "a": nil},
+				{"c": ptr.To(33), "d": ptr.To(4), "a": ptr.To(111), "b": nil, "deleted": nil},
+			},
+			wantInitialMap: map[string]int{"z": 100, "c": 33, "d": 4, "a": 111},
+			wantMapsAfterRevert: []map[string]int{
+				{"a": 1, "b": 2, "z": 100},
+				{"a": 1, "b": 2, "z": 100},
+				{"z": 100, "b": 22, "c": 3},
+			},
+		},
+		"RevertMultipleLayersDeleteAndAddBack": {
+			patchLayers:    []map[string]*int{{"x": ptr.To(10)}, {"x": nil}, {"x": ptr.To(30)}, {"y": ptr.To(40), "x": nil}},
+			wantInitialMap: map[string]int{"y": 40},
+			wantMapsAfterRevert: []map[string]int{
+				{"x": 10},
+				{"x": 10},
+				{},
+				{"x": 30},
+			},
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			patchesNumber := len(ps.patches)
+
+			if currentMap := ps.AsMap(); !maps.Equal(currentMap, tc.wantInitialMap) {
+				t.Errorf("AsMap() before any reverts mismatch: got %v, want %v", currentMap, tc.wantInitialMap)
+			}
+
+			for i := 0; i <= patchesNumber; i++ {
+				wantPatchesAfterRevert := len(ps.patches) - 1
+				ps.Revert()
+				if len(ps.patches) != wantPatchesAfterRevert && len(ps.patches) > 1 {
+					t.Errorf("After revert #%d, expected %d patches, got %d", i+1, wantPatchesAfterRevert, len(ps.patches))
+				}
+
+				currentMap := ps.AsMap()
+				wantMapIndex := patchesNumber - i - 1
+				if wantMapIndex < 0 {
+					wantMapIndex = 0
+				}
+				wantMapAfterRevert := tc.wantMapsAfterRevert[wantMapIndex]
+				if !maps.Equal(currentMap, wantMapAfterRevert) {
+					t.Errorf("AsMap() after revert #%d mismatch: got %v, want %v", i+1, currentMap, wantMapAfterRevert)
+				}
+			}
+
+			if patchesNumber >= 1 && len(ps.patches) != 1 {
+				t.Errorf("Expected 1 patch after all reverts, got %d", len(ps.patches))
+			} else if patchesNumber == 0 && len(ps.patches) != 0 {
+				t.Errorf("Expected 0 patches after all reverts, got %d", len(ps.patches))
+			}
+		})
+	}
+}
+
+func TestPatchSetForkRevert(t *testing.T) {
+	// 1. Initialize empty patchset
+	ps := NewPatchSet(NewPatch[string, int]())
+	if initialMap := ps.AsMap(); len(initialMap) != 0 {
+		t.Fatalf("Initial AsMap() got %v, want empty", initialMap)
+	}
+
+	// 2. Call Fork
+	ps.Fork()
+	if len(ps.patches) != 2 {
+		t.Fatalf("Expected 2 patches after Fork(), got %d", len(ps.patches))
+	}
+
+	// 3. Perform some mutation operations on the new layer
+	ps.SetCurrent("a", 1)
+	ps.SetCurrent("b", 2)
+	ps.DeleteCurrent("a")
+
+	// 4. Call Revert
+	ps.Revert()
+	if len(ps.patches) != 1 {
+		t.Fatalf("Expected 1 patch after Revert(), got %d", len(ps.patches))
+	}
+
+	// 5. Compare state to the empty map
+	if finalMap := ps.AsMap(); len(finalMap) != 0 {
+		t.Errorf("AsMap() got %v, want empty", finalMap)
+	}
+}
+
+func TestPatchSetForkCommit(t *testing.T) {
+	// 1. Initialize empty patchset
+	ps := NewPatchSet(NewPatch[string, int]())
+	if initialMap := ps.AsMap(); len(initialMap) != 0 {
+		t.Fatalf("Initial AsMap() got %v, want empty", initialMap)
+	}
+
+	// 2. Call Fork two times
+	ps.Fork()
+	ps.Fork()
+	if len(ps.patches) != 3 {
+		t.Fatalf("Expected 3 patches after 2xFork(), got %d", len(ps.patches))
+	}
+
+	// 3. Perform some mutation operations on the current layer
+	ps.SetCurrent("a", 1)
+	ps.SetCurrent("b", 2)
+	ps.DeleteCurrent("a")
+
+	// 4. Call Commit to persist changes
+	ps.Commit()
+	if len(ps.patches) != 2 {
+		t.Fatalf("Expected 1 patch after Commit(), got %d", len(ps.patches))
+	}
+
+	// 5. Call Revert on the empty layer
+	ps.Revert()
+	if len(ps.patches) != 1 {
+		t.Fatalf("Expected 1 patch after Revert(), got %d", len(ps.patches))
+	}
+
+	// 6. Compare state to the empty map
+	wantMap := map[string]int{"b": 2}
+	if finalMap := ps.AsMap(); maps.Equal(wantMap, finalMap) {
+		t.Errorf("AsMap() got %v, want %v", finalMap, wantMap)
+	}
+}
+
+func TestPatchSetFindValue(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers []map[string]*int
+		searchKey   string
+		wantValue   int
+		wantFound   bool
+	}{
+		"EmptyPatchSet_KeyNotFound": {
+			patchLayers: []map[string]*int{},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"SingleLayer_KeyFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   1,
+			wantFound:   true,
+		},
+		"SingleLayer_KeyNotFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			searchKey:   "x",
+			wantFound:   false,
+		},
+		"SingleLayer_KeyDeleted": {
+			patchLayers: []map[string]*int{{"a": nil, "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyInTopLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(10), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   10,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyInBottomLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}, {"b": ptr.To(22)}},
+			searchKey:   "a",
+			wantValue:   1,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyOverwrittenThenDeleted": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": ptr.To(10)}, {"a": nil, "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyDeletedThenAddedBack": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": nil}, {"a": ptr.To(100), "b": ptr.To(2)}},
+			searchKey:   "a",
+			wantValue:   100,
+			wantFound:   true,
+		},
+		"MultiLayer_KeyNotFoundInAnyLayer": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			searchKey:   "x",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyDeletedInMiddleLayer_NotFound": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"a": nil, "b": ptr.To(2)}, {"c": ptr.To(3)}},
+			searchKey:   "a",
+			wantFound:   false,
+		},
+		"MultiLayer_KeyPresentInBase_DeletedInOverlay_ReAddedInTop": {
+			patchLayers: []map[string]*int{
+				{"k1": ptr.To(1)},
+				{"k1": nil},
+				{"k1": ptr.To(111)},
+			},
+			searchKey: "k1",
+			wantValue: 111,
+			wantFound: true,
+		},
+	}
+
+	for testName, tc := range tests {
+		t.Run(testName, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			val, found := ps.FindValue(tc.searchKey)
+
+			if found != tc.wantFound || (found && val != tc.wantValue) {
+				t.Errorf("FindValue(%q) got val=%v, found=%v; want val=%v, found=%v", tc.searchKey, val, found, tc.wantValue, tc.wantFound)
+			}
+		})
+	}
+}
+
+func TestPatchSetOperations(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers    []map[string]*int
+		mutatePatchSet func(ps *PatchSet[string, int])
+		searchKey      string
+		wantInCurrent  bool
+	}{
+		"SetCurrent_NewKey_InitiallyEmptyPatchSet": {
+			patchLayers: []map[string]*int{},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_NewKey_OnExistingEmptyLayer": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_OverwriteKey_InCurrentPatch": {
+			patchLayers: []map[string]*int{{"a": ptr.To(10)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"SetCurrent_OverwriteKey_InLowerPatch_AfterFork": {
+			patchLayers: []map[string]*int{{"a": ptr.To(10)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.Fork()
+				ps.SetCurrent("a", 1)
+			},
+			searchKey:     "a",
+			wantInCurrent: true,
+		},
+		"DeleteCurrent_ExistingKey_InCurrentPatch": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_ExistingKey_InLowerPatch_AfterFork": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.Fork()
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_NonExistentKey_OnExistingEmptyLayer": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("x")
+			},
+			searchKey:     "x",
+			wantInCurrent: false,
+		},
+		"DeleteCurrent_NonExistentKey_InitiallyEmptyPatchSet": {
+			patchLayers: []map[string]*int{}, // Starts with no layers
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("x")
+			},
+			searchKey:     "x",
+			wantInCurrent: false,
+		},
+		"InCurrentPatch_KeySetInCurrent": {
+			patchLayers:    []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {},
+			searchKey:      "a",
+			wantInCurrent:  true,
+		},
+		"InCurrentPatch_KeySetInLower_NotInCurrent_AfterFork": {
+			patchLayers:    []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) { ps.Fork() },
+			searchKey:      "a",
+			wantInCurrent:  false,
+		},
+		"InCurrentPatch_KeyDeletedInCurrent": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.DeleteCurrent("a")
+			},
+			searchKey:     "a",
+			wantInCurrent: false, // Get in current patch won't find it.
+		},
+		"InCurrentPatch_NonExistentKey_InitiallyEmptyPatchSet": {
+			patchLayers:    []map[string]*int{},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {},
+			searchKey:      "a",
+			wantInCurrent:  false,
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			tc.mutatePatchSet(ps)
+
+			inCurrent := ps.InCurrentPatch(tc.searchKey)
+			if inCurrent != tc.wantInCurrent {
+				t.Errorf("InCurrentPatch(%q): got %v, want %v", tc.searchKey, inCurrent, tc.wantInCurrent)
+			}
+		})
+	}
+}
+
+func TestPatchSetCache(t *testing.T) {
+	tests := map[string]struct {
+		patchLayers     []map[string]*int
+		mutatePatchSet  func(ps *PatchSet[string, int])
+		wantCache       map[string]*int
+		wantCacheInSync bool
+	}{
+		"Initial_EmptyPatchSet": {
+			patchLayers:     []map[string]*int{},
+			mutatePatchSet:  func(ps *PatchSet[string, int]) {},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"Initial_WithData_NoCacheAccess": {
+			patchLayers:     []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet:  func(ps *PatchSet[string, int]) {},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"FindValue_PopulatesCacheForKey": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"FindValue_DeletedKey_PopulatesCacheWithNil": {
+			patchLayers: []map[string]*int{{"a": nil, "b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+			},
+			wantCache:       map[string]*int{"a": nil},
+			wantCacheInSync: false,
+		},
+		"AsMap_PopulatesAndSyncsCache": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1), "b": nil, "c": ptr.To(3)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "c": ptr.To(3)}, // Cache does not necessarily track deletions like 'b' key
+			wantCacheInSync: true,
+		},
+		"SetCurrent_UpdatesCache_NewKey": {
+			patchLayers: []map[string]*int{{}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.SetCurrent("x", 10)
+			},
+			wantCache:       map[string]*int{"x": ptr.To(10)},
+			wantCacheInSync: false,
+		},
+		"SetCurrent_UpdatesCache_OverwriteKey": {
+			patchLayers: []map[string]*int{{"x": ptr.To(5)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("x")
+				ps.SetCurrent("x", 10)
+			},
+			wantCache:       map[string]*int{"x": ptr.To(10)},
+			wantCacheInSync: false,
+		},
+		"DeleteCurrent_UpdatesCache": {
+			patchLayers: []map[string]*int{{"x": ptr.To(5)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("x")
+				ps.DeleteCurrent("x")
+			},
+			wantCache:       map[string]*int{"x": nil},
+			wantCacheInSync: false,
+		},
+		"Revert_ClearsAffectedCacheEntries_And_SetsCacheNotInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2), "a": ptr.To(11)}}, // Layer 0: a=1; Layer 1: b=2, a=11
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.FindValue("b")
+				ps.Revert()
+			},
+			wantCache:       map[string]*int{},
+			wantCacheInSync: false,
+		},
+		"Revert_OnSyncedCache_SetsCacheNotInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Revert()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"Commit_DoesNotInvalidateCache_IfValuesConsistent": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.FindValue("b")
+				ps.Commit()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "b": ptr.To(2)},
+			wantCacheInSync: false,
+		},
+		"Commit_OnSyncedCache_KeepsCacheInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}, {"b": ptr.To(2)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Commit()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1), "b": ptr.To(2)},
+			wantCacheInSync: true,
+		},
+		"Fork_DoesNotInvalidateCache": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.FindValue("a")
+				ps.Fork()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: false,
+		},
+		"Fork_OnSyncedCache_KeepsCacheInSync": {
+			patchLayers: []map[string]*int{{"a": ptr.To(1)}},
+			mutatePatchSet: func(ps *PatchSet[string, int]) {
+				ps.AsMap()
+				ps.Fork()
+			},
+			wantCache:       map[string]*int{"a": ptr.To(1)},
+			wantCacheInSync: true,
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			ps := buildTestPatchSet(t, tc.patchLayers)
+			tc.mutatePatchSet(ps)
+
+			if !maps.EqualFunc(ps.cache, tc.wantCache, func(a, b *int) bool {
+				if a == nil && b == nil {
+					return true
+				}
+				if a == nil || b == nil {
+					return false
+				}
+				return *a == *b
+			}) {
+				t.Errorf("Cache content mismatch: got %v, want %v", ps.cache, tc.wantCache)
+			}
+
+			if ps.cacheInSync != tc.wantCacheInSync {
+				t.Errorf("cacheInSync status mismatch: got %v, want %v", ps.cacheInSync, tc.wantCacheInSync)
+			}
+		})
+	}
+}
+
+func buildTestPatchSet[K comparable, V any](t *testing.T, patchLayers []map[K]*V) *PatchSet[K, V] {
+	t.Helper()
+
+	patchesNumber := len(patchLayers)
+	patches := make([]*Patch[K, V], patchesNumber)
+	for i := 0; i < patchesNumber; i++ {
+		layerMap := patchLayers[i]
+		currentPatch := NewPatch[K, V]()
+		for k, vPtr := range layerMap {
+			if vPtr != nil {
+				currentPatch.Set(k, *vPtr)
+			} else {
+				currentPatch.Delete(k)
+			}
+		}
+		patches[i] = currentPatch
+	}
+
+	return NewPatchSet(patches...)
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
index 7c6cddac31fd31e62200fa57453f6a511c180db5..02e02562b93bbca44af1ff2d1099552a095fffb2 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider.go
@@ -48,10 +48,10 @@ func NewProvider(claims allObjectsLister[*resourceapi.ResourceClaim], slices all
 }
 
 // Snapshot returns a snapshot of all DRA resources at a ~single point in time.
-func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
+func (p *Provider) Snapshot() (*drasnapshot.Snapshot, error) {
 	claims, err := p.resourceClaims.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	claimMap := make(map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim)
 	for _, claim := range claims {
@@ -60,7 +60,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	slices, err := p.resourceSlices.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	slicesMap := make(map[string][]*resourceapi.ResourceSlice)
 	var nonNodeLocalSlices []*resourceapi.ResourceSlice
@@ -74,7 +74,7 @@ func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) {
 
 	classes, err := p.deviceClasses.ListAll()
 	if err != nil {
-		return drasnapshot.Snapshot{}, err
+		return nil, err
 	}
 	classMap := make(map[string]*resourceapi.DeviceClass)
 	for _, class := range classes {
diff --git a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
index d8ff91cab1b1a79ea24fe11d5616bfc067eead3a..1e6e608004b29dae708e9726d21ed867a9047471 100644
--- a/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
@@ -61,7 +61,7 @@ func TestProviderSnapshot(t *testing.T) {
 		triggerSlicesError  bool
 		classes             []*resourceapi.DeviceClass
 		triggerClassesError bool
-		wantSnapshot        drasnapshot.Snapshot
+		wantSnapshot        *drasnapshot.Snapshot
 		wantErr             error
 	}{
 		{
@@ -133,7 +133,7 @@ func TestProviderSnapshot(t *testing.T) {
 			if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): unexpected error (-want +got): %s", diff)
 			}
-			if diff := cmp.Diff(tc.wantSnapshot, snapshot, cmp.AllowUnexported(drasnapshot.Snapshot{}), cmpopts.EquateEmpty()); diff != "" {
+			if diff := cmp.Diff(tc.wantSnapshot, snapshot, drasnapshot.SnapshotFlattenedComparer()); diff != "" {
 				t.Fatalf("Provider.Snapshot(): snapshot differs from expected (-want +got): %s", diff)
 			}
 		})
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
index f9e2a3f2b5ba7bdd676cf6bb4ab78e8e28f6da5b..cc9007e213310086613e4ea33f4e09aadf413b04 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot.go
@@ -18,13 +18,16 @@ package snapshot
 
 import (
 	"fmt"
+	"maps"
 
 	apiv1 "k8s.io/api/core/v1"
 	resourceapi "k8s.io/api/resource/v1beta1"
 	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/common"
 	drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
 	"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
 	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/klog/v2"
 	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
 )
 
@@ -43,54 +46,67 @@ func GetClaimId(claim *resourceapi.ResourceClaim) ResourceClaimId {
 // obtained via the Provider. Then, it can be modified using the exposed methods, to simulate scheduling actions
 // in the cluster.
 type Snapshot struct {
-	resourceClaimsById         map[ResourceClaimId]*resourceapi.ResourceClaim
-	resourceSlicesByNodeName   map[string][]*resourceapi.ResourceSlice
-	nonNodeLocalResourceSlices []*resourceapi.ResourceSlice
-	deviceClasses              map[string]*resourceapi.DeviceClass
+	resourceClaims *common.PatchSet[ResourceClaimId, *resourceapi.ResourceClaim]
+	resourceSlices *common.PatchSet[string, []*resourceapi.ResourceSlice]
+	deviceClasses  *common.PatchSet[string, *resourceapi.DeviceClass]
 }
 
+// nonNodeLocalResourceSlicesIdentifier is a special key used in the resourceSlices patchSet
+// to store ResourceSlices that apply do not apply to specific nodes. The string itself is
+// using a value which kubernetes node names cannot possibly have to avoid having collisions.
+const nonNodeLocalResourceSlicesIdentifier = "--NON-LOCAL--"
+
 // NewSnapshot returns a Snapshot created from the provided data.
-func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, globalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) Snapshot {
-	if claims == nil {
-		claims = map[ResourceClaimId]*resourceapi.ResourceClaim{}
-	}
-	if nodeLocalSlices == nil {
-		nodeLocalSlices = map[string][]*resourceapi.ResourceSlice{}
-	}
-	if deviceClasses == nil {
-		deviceClasses = map[string]*resourceapi.DeviceClass{}
+func NewSnapshot(claims map[ResourceClaimId]*resourceapi.ResourceClaim, nodeLocalSlices map[string][]*resourceapi.ResourceSlice, nonNodeLocalSlices []*resourceapi.ResourceSlice, deviceClasses map[string]*resourceapi.DeviceClass) *Snapshot {
+	slices := make(map[string][]*resourceapi.ResourceSlice, len(nodeLocalSlices)+1)
+	maps.Copy(slices, nodeLocalSlices)
+	slices[nonNodeLocalResourceSlicesIdentifier] = nonNodeLocalSlices
+
+	claimsPatch := common.NewPatchFromMap(claims)
+	slicesPatch := common.NewPatchFromMap(slices)
+	devicesPatch := common.NewPatchFromMap(deviceClasses)
+	return &Snapshot{
+		resourceClaims: common.NewPatchSet(claimsPatch),
+		resourceSlices: common.NewPatchSet(slicesPatch),
+		deviceClasses:  common.NewPatchSet(devicesPatch),
 	}
-	return Snapshot{
-		resourceClaimsById:         claims,
-		resourceSlicesByNodeName:   nodeLocalSlices,
-		nonNodeLocalResourceSlices: globalSlices,
-		deviceClasses:              deviceClasses,
+}
+
+// NewEmptySnapshot returns a zero initialized Snapshot.
+func NewEmptySnapshot() *Snapshot {
+	claimsPatch := common.NewPatch[ResourceClaimId, *resourceapi.ResourceClaim]()
+	slicesPatch := common.NewPatch[string, []*resourceapi.ResourceSlice]()
+	devicesPatch := common.NewPatch[string, *resourceapi.DeviceClass]()
+	return &Snapshot{
+		resourceClaims: common.NewPatchSet(claimsPatch),
+		resourceSlices: common.NewPatchSet(slicesPatch),
+		deviceClasses:  common.NewPatchSet(devicesPatch),
 	}
 }
 
 // ResourceClaims exposes the Snapshot as schedulerframework.ResourceClaimTracker, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
-	return snapshotClaimTracker(s)
+func (s *Snapshot) ResourceClaims() schedulerframework.ResourceClaimTracker {
+	return snapshotClaimTracker{snapshot: s}
 }
 
 // ResourceSlices exposes the Snapshot as schedulerframework.ResourceSliceLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
-	return snapshotSliceLister(s)
+func (s *Snapshot) ResourceSlices() schedulerframework.ResourceSliceLister {
+	return snapshotSliceLister{snapshot: s}
 }
 
 // DeviceClasses exposes the Snapshot as schedulerframework.DeviceClassLister, in order to interact with
 // the scheduler framework.
-func (s Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
-	return snapshotClassLister(s)
+func (s *Snapshot) DeviceClasses() schedulerframework.DeviceClassLister {
+	return snapshotClassLister{snapshot: s}
 }
 
 // WrapSchedulerNodeInfo wraps the provided *schedulerframework.NodeInfo into an internal *framework.NodeInfo, adding
 // dra information. Node-local ResourceSlices are added to the NodeInfo, and all ResourceClaims referenced by each Pod
 // are added to each PodInfo. Returns an error if any of the Pods is missing a ResourceClaim.
-func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
-	podExtraInfos := map[types.UID]framework.PodExtraInfo{}
+func (s *Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeInfo) (*framework.NodeInfo, error) {
+	podExtraInfos := make(map[types.UID]framework.PodExtraInfo, len(schedNodeInfo.Pods))
 	for _, pod := range schedNodeInfo.Pods {
 		podClaims, err := s.PodClaims(pod.Pod)
 		if err != nil {
@@ -104,161 +120,251 @@ func (s Snapshot) WrapSchedulerNodeInfo(schedNodeInfo *schedulerframework.NodeIn
 	return framework.WrapSchedulerNodeInfo(schedNodeInfo, nodeSlices, podExtraInfos), nil
 }
 
-// Clone returns a copy of this Snapshot that can be independently modified without affecting this Snapshot.
-// The only mutable objects in the Snapshot are ResourceClaims, so they are deep-copied. The rest is only a
-// shallow copy.
-func (s Snapshot) Clone() Snapshot {
-	result := Snapshot{
-		resourceClaimsById:       map[ResourceClaimId]*resourceapi.ResourceClaim{},
-		resourceSlicesByNodeName: map[string][]*resourceapi.ResourceSlice{},
-		deviceClasses:            map[string]*resourceapi.DeviceClass{},
-	}
-	// The claims are mutable, they have to be deep-copied.
-	for id, claim := range s.resourceClaimsById {
-		result.resourceClaimsById[id] = claim.DeepCopy()
-	}
-	// The rest of the objects aren't mutable, so a shallow copy should be enough.
-	for nodeName, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result.resourceSlicesByNodeName[nodeName] = append(result.resourceSlicesByNodeName[nodeName], slice)
-		}
-	}
-	for _, slice := range s.nonNodeLocalResourceSlices {
-		result.nonNodeLocalResourceSlices = append(result.nonNodeLocalResourceSlices, slice)
-	}
-	for className, class := range s.deviceClasses {
-		result.deviceClasses[className] = class
-	}
-	return result
-}
-
 // AddClaims adds additional ResourceClaims to the Snapshot. It can be used e.g. if we need to duplicate a Pod that
 // owns ResourceClaims. Returns an error if any of the claims is already tracked in the snapshot.
-func (s Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
+func (s *Snapshot) AddClaims(newClaims []*resourceapi.ResourceClaim) error {
 	for _, claim := range newClaims {
-		if _, found := s.resourceClaimsById[GetClaimId(claim)]; found {
+		if _, found := s.resourceClaims.FindValue(GetClaimId(claim)); found {
 			return fmt.Errorf("claim %s/%s already tracked in the snapshot", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range newClaims {
-		s.resourceClaimsById[GetClaimId(claim)] = claim
+		s.resourceClaims.SetCurrent(GetClaimId(claim), claim)
 	}
+
 	return nil
 }
 
 // PodClaims returns ResourceClaims objects for all claims referenced by the Pod. If any of the referenced claims
 // isn't tracked in the Snapshot, an error is returned.
-func (s Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-
-	for _, claimRef := range pod.Spec.ResourceClaims {
-		claim, err := s.claimForPod(pod, claimRef)
-		if err != nil {
-			return nil, fmt.Errorf("error while obtaining ResourceClaim %s for pod %s/%s: %v", claimRef.Name, pod.Namespace, pod.Name, err)
-		}
-		result = append(result, claim)
-	}
-
-	return result, nil
+func (s *Snapshot) PodClaims(pod *apiv1.Pod) ([]*resourceapi.ResourceClaim, error) {
+	return s.findPodClaims(pod, false)
 }
 
 // RemovePodOwnedClaims iterates over all the claims referenced by the Pod, and removes the ones owned by the Pod from the Snapshot.
 // Claims referenced by the Pod but not owned by it are not removed, but the Pod's reservation is removed from them.
 // This method removes all relevant claims that are in the snapshot, and doesn't error out if any of the claims are missing.
-func (s Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
-	for _, podClaimRef := range pod.Spec.ResourceClaims {
-		claimName := claimRefToName(pod, podClaimRef)
-		if claimName == "" {
-			// This most likely means that the Claim hasn't actually been created. Nothing to remove/modify, so continue to the next claim.
-			continue
-		}
-		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
-		claim, found := s.resourceClaimsById[claimId]
-		if !found {
-			// The claim isn't tracked in the snapshot for some reason. Nothing to remove/modify, so continue to the next claim.
-			continue
-		}
+func (s *Snapshot) RemovePodOwnedClaims(pod *apiv1.Pod) {
+	claims, err := s.findPodClaims(pod, true)
+	if err != nil {
+		klog.Errorf("Snapshot.RemovePodOwnedClaims ignored an error: %s", err)
+	}
+
+	for _, claim := range claims {
+		claimId := GetClaimId(claim)
 		if err := resourceclaim.IsForPod(pod, claim); err == nil {
-			delete(s.resourceClaimsById, claimId)
-		} else {
-			drautils.ClearPodReservationInPlace(claim, pod)
+			s.resourceClaims.DeleteCurrent(claimId)
+			continue
 		}
+
+		claim := s.ensureClaimWritable(claim)
+		drautils.ClearPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 }
 
 // ReservePodClaims adds a reservation for the provided Pod to all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, or if any of the claims are already at maximum reservation count, an error is
 // returned.
-func (s Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) ReservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
 		if drautils.ClaimFullyReserved(claim) && !resourceclaim.IsReservedForPod(pod, claim) {
 			return fmt.Errorf("claim %s/%s already has max number of reservations set, can't add more", claim.Namespace, claim.Name)
 		}
 	}
+
 	for _, claim := range claims {
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.AddPodReservationInPlace(claim, pod)
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
+
 	return nil
 }
 
 // UnreservePodClaims removes reservations for the provided Pod from all the claims it references. If any of the referenced
 // claims isn't tracked in the Snapshot, an error is returned. If a claim is owned by the pod, or if the claim has no more reservations,
 // its allocation is cleared.
-func (s Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
-	claims, err := s.PodClaims(pod)
+func (s *Snapshot) UnreservePodClaims(pod *apiv1.Pod) error {
+	claims, err := s.findPodClaims(pod, false)
 	if err != nil {
 		return err
 	}
+
 	for _, claim := range claims {
-		podOwnedClaim := resourceclaim.IsForPod(pod, claim) == nil
+		claimId := GetClaimId(claim)
+		claim := s.ensureClaimWritable(claim)
 		drautils.ClearPodReservationInPlace(claim, pod)
-		if podOwnedClaim || !drautils.ClaimInUse(claim) {
+		if err := resourceclaim.IsForPod(pod, claim); err == nil || !drautils.ClaimInUse(claim) {
 			drautils.DeallocateClaimInPlace(claim)
 		}
+
+		s.resourceClaims.SetCurrent(claimId, claim)
 	}
 	return nil
 }
 
 // NodeResourceSlices returns all node-local ResourceSlices for the given Node.
-func (s Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
-	slices, found := s.resourceSlicesByNodeName[nodeName]
+func (s *Snapshot) NodeResourceSlices(nodeName string) ([]*resourceapi.ResourceSlice, bool) {
+	slices, found := s.resourceSlices.FindValue(nodeName)
 	return slices, found
 }
 
 // AddNodeResourceSlices adds additional node-local ResourceSlices to the Snapshot. This should be used whenever a Node with
 // node-local ResourceSlices is duplicated in the cluster snapshot.
-func (s Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
-	if _, alreadyInSnapshot := s.resourceSlicesByNodeName[nodeName]; alreadyInSnapshot {
+func (s *Snapshot) AddNodeResourceSlices(nodeName string, slices []*resourceapi.ResourceSlice) error {
+	if _, alreadyInSnapshot := s.NodeResourceSlices(nodeName); alreadyInSnapshot {
 		return fmt.Errorf("node %q ResourceSlices already present", nodeName)
 	}
-	s.resourceSlicesByNodeName[nodeName] = slices
+
+	s.resourceSlices.SetCurrent(nodeName, slices)
 	return nil
 }
 
-// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given nodeName.
+// RemoveNodeResourceSlices removes all node-local ResourceSlices for the Node with the given node name.
 // It's a no-op if there aren't any slices to remove.
-func (s Snapshot) RemoveNodeResourceSlices(nodeName string) {
-	delete(s.resourceSlicesByNodeName, nodeName)
+func (s *Snapshot) RemoveNodeResourceSlices(nodeName string) {
+	s.resourceSlices.DeleteCurrent(nodeName)
+}
+
+// Commit persists changes done in the topmost layer merging them into the one below it.
+func (s *Snapshot) Commit() {
+	s.deviceClasses.Commit()
+	s.resourceClaims.Commit()
+	s.resourceSlices.Commit()
+}
+
+// Revert removes the topmost patch layer for all resource types, discarding
+// any modifications or deletions recorded there.
+func (s *Snapshot) Revert() {
+	s.deviceClasses.Revert()
+	s.resourceClaims.Revert()
+	s.resourceSlices.Revert()
+}
+
+// Fork adds a new, empty patch layer to the top of the stack for all
+// resource types. Subsequent modifications will be recorded in this
+// new layer until Commit() or Revert() are invoked.
+func (s *Snapshot) Fork() {
+	s.deviceClasses.Fork()
+	s.resourceClaims.Fork()
+	s.resourceSlices.Fork()
+}
+
+// listDeviceClasses retrieves all effective DeviceClasses from the snapshot.
+func (s *Snapshot) listDeviceClasses() []*resourceapi.DeviceClass {
+	deviceClasses := s.deviceClasses.AsMap()
+	deviceClassesList := make([]*resourceapi.DeviceClass, 0, len(deviceClasses))
+	for _, class := range deviceClasses {
+		deviceClassesList = append(deviceClassesList, class)
+	}
+
+	return deviceClassesList
+}
+
+// listResourceClaims retrieves all effective ResourceClaims from the snapshot.
+func (s *Snapshot) listResourceClaims() []*resourceapi.ResourceClaim {
+	claims := s.resourceClaims.AsMap()
+	claimsList := make([]*resourceapi.ResourceClaim, 0, len(claims))
+	for _, claim := range claims {
+		claimsList = append(claimsList, claim)
+	}
+
+	return claimsList
+}
+
+// configureResourceClaim updates or adds a ResourceClaim in the current patch layer.
+// This is typically used internally when a claim's state (like allocation) changes.
+func (s *Snapshot) configureResourceClaim(claim *resourceapi.ResourceClaim) {
+	claimId := ResourceClaimId{Name: claim.Name, Namespace: claim.Namespace}
+	s.resourceClaims.SetCurrent(claimId, claim)
+}
+
+// getResourceClaim retrieves a specific ResourceClaim by its ID from the snapshot.
+func (s *Snapshot) getResourceClaim(claimId ResourceClaimId) (*resourceapi.ResourceClaim, bool) {
+	return s.resourceClaims.FindValue(claimId)
 }
 
-func (s Snapshot) claimForPod(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) (*resourceapi.ResourceClaim, error) {
-	claimName := claimRefToName(pod, claimRef)
-	if claimName == "" {
-		return nil, fmt.Errorf("couldn't determine ResourceClaim name")
+// listResourceSlices retrieves all effective ResourceSlices from the snapshot.
+func (s *Snapshot) listResourceSlices() []*resourceapi.ResourceSlice {
+	resourceSlices := s.resourceSlices.AsMap()
+	resourceSlicesList := make([]*resourceapi.ResourceSlice, 0, len(resourceSlices))
+	for _, nodeSlices := range resourceSlices {
+		resourceSlicesList = append(resourceSlicesList, nodeSlices...)
 	}
 
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: pod.Namespace}]
-	if !found {
-		return nil, fmt.Errorf("couldn't find ResourceClaim %q", claimName)
+	return resourceSlicesList
+}
+
+// getDeviceClass retrieves a specific DeviceClass by its name from the snapshot.
+func (s *Snapshot) getDeviceClass(className string) (*resourceapi.DeviceClass, bool) {
+	return s.deviceClasses.FindValue(className)
+}
+
+// findPodClaims retrieves all ResourceClaim objects referenced by a given pod.
+// If ignoreNotTracked is true, it skips claims not found in the snapshot; otherwise, it returns an error.
+func (s *Snapshot) findPodClaims(pod *apiv1.Pod, ignoreNotTracked bool) ([]*resourceapi.ResourceClaim, error) {
+	result := make([]*resourceapi.ResourceClaim, len(pod.Spec.ResourceClaims))
+	for claimIndex, claimRef := range pod.Spec.ResourceClaims {
+		claimName := claimRefToName(pod, claimRef)
+		if claimName == "" {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't determine ResourceClaim name",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		claimId := ResourceClaimId{Name: claimName, Namespace: pod.Namespace}
+		claim, found := s.resourceClaims.FindValue(claimId)
+		if !found {
+			if !ignoreNotTracked {
+				return nil, fmt.Errorf(
+					"error while obtaining ResourceClaim %s for pod %s/%s: couldn't find ResourceClaim in the snapshot",
+					claimRef.Name,
+					pod.Namespace,
+					pod.Name,
+				)
+			}
+
+			continue
+		}
+
+		result[claimIndex] = claim
+	}
+
+	return result, nil
+}
+
+// ensureClaimWritable returns a resource claim suitable for inplace modifications,
+// in case if requested claim is stored in the current patch - the same object
+// is returned, otherwise a deep-copy is created. This is required for resource claim
+// state changing operations to implement copy-on-write policy for inplace modifications
+// when there's no claim tracked on the current layer of the patchset.
+func (s *Snapshot) ensureClaimWritable(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
+	claimId := GetClaimId(claim)
+	if s.resourceClaims.InCurrentPatch(claimId) {
+		return claim
 	}
 
-	return claim, nil
+	return claim.DeepCopy()
 }
 
+// claimRefToName determines the actual name of a ResourceClaim based on a PodResourceClaim reference.
+// It first checks if the name is directly specified in the reference. If not, it looks up the name
+// in the pod's status. Returns an empty string if the name cannot be determined.
 func claimRefToName(pod *apiv1.Pod, claimRef apiv1.PodResourceClaim) string {
 	if claimRef.ResourceClaimName != nil {
 		return *claimRef.ResourceClaimName
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
index c1dad60a12cae89e88651069488863244e2e2271..a277303243e414448b3980eb8d4a36015c89898a 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker.go
@@ -25,51 +25,51 @@ import (
 	"k8s.io/dynamic-resource-allocation/structured"
 )
 
-type snapshotClaimTracker Snapshot
+type snapshotClaimTracker struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
-	var result []*resourceapi.ResourceClaim
-	for _, claim := range s.resourceClaimsById {
-		result = append(result, claim)
-	}
-	return result, nil
+func (ct snapshotClaimTracker) List() ([]*resourceapi.ResourceClaim, error) {
+	return ct.snapshot.listResourceClaims(), nil
 }
 
-func (s snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
-	claim, found := s.resourceClaimsById[ResourceClaimId{Name: claimName, Namespace: namespace}]
+func (ct snapshotClaimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
+	claimId := ResourceClaimId{Name: claimName, Namespace: namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return nil, fmt.Errorf("claim %s/%s not found", namespace, claimName)
 	}
 	return claim, nil
 }
 
-func (s snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
+func (ct snapshotClaimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
 	result := sets.New[structured.DeviceID]()
-	for _, claim := range s.resourceClaimsById {
+	for _, claim := range ct.snapshot.listResourceClaims() {
 		result = result.Union(claimAllocatedDevices(claim))
 	}
 	return result, nil
 }
 
-func (s snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) SignalClaimPendingAllocation(claimUid types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
 	// The DRA scheduler plugin calls this at the end of the scheduling phase, in Reserve. Then, the allocation is persisted via an API
 	// call during the binding phase.
 	//
 	// In Cluster Autoscaler only the scheduling phase is run, so SignalClaimPendingAllocation() is used to obtain the allocation
 	// and persist it in-memory in the snapshot.
-	ref := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
-	claim, found := s.resourceClaimsById[ref]
+	claimId := ResourceClaimId{Name: allocatedClaim.Name, Namespace: allocatedClaim.Namespace}
+	claim, found := ct.snapshot.getResourceClaim(claimId)
 	if !found {
 		return fmt.Errorf("claim %s/%s not found", allocatedClaim.Namespace, allocatedClaim.Name)
 	}
 	if claim.UID != claimUid {
 		return fmt.Errorf("claim %s/%s: snapshot has UID %q, allocation came for UID %q - shouldn't happenn", allocatedClaim.Namespace, allocatedClaim.Name, claim.UID, claimUid)
 	}
-	s.resourceClaimsById[ref] = allocatedClaim
+
+	ct.snapshot.configureResourceClaim(allocatedClaim)
 	return nil
 }
 
-func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
+func (ct snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool {
 	// The DRA scheduler plugin calls this at the beginning of Filter, and fails the filter if true is returned to handle race conditions.
 	//
 	// In the scheduler implementation, ClaimHasPendingAllocation() starts answering true after SignalClaimPendingAllocation()
@@ -81,19 +81,19 @@ func (s snapshotClaimTracker) ClaimHasPendingAllocation(claimUid types.UID) bool
 	return false
 }
 
-func (s snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
+func (ct snapshotClaimTracker) RemoveClaimPendingAllocation(claimUid types.UID) (deleted bool) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.RemoveClaimPendingAllocation() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
+func (ct snapshotClaimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumeClaimAfterAPICall() was called - this should never happen")
 }
 
-func (s snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
+func (ct snapshotClaimTracker) AssumedClaimRestore(namespace, claimName string) {
 	// This method is only called during the Bind phase of scheduler framework, which is never run by CA. We need to implement
 	// it to satisfy the interface, but it should never be called.
 	panic("snapshotClaimTracker.AssumedClaimRestore() was called - this should never happen")
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
index 273d7c24aa48300871900d007c5faadbbae69dfd..b3d50e0badfd1d83d2adf3784cc068600d76b5f1 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_claim_tracker_test.go
@@ -170,7 +170,7 @@ func TestSnapshotClaimTrackerListAllAllocatedDevices(t *testing.T) {
 				GetClaimId(allocatedClaim2): allocatedClaim2,
 				GetClaimId(claim3):          claim3,
 			},
-			wantDevices: sets.New[structured.DeviceID](
+			wantDevices: sets.New(
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-1"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-2"),
 				structured.MakeDeviceID("driver.example.com", "pool-1", "device-3"),
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
index 7beaaba22763b67c671157fba458b71bf2efef66..39affd2847c2270eadaa553b0febf7c8f0696936 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_class_lister.go
@@ -22,18 +22,16 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotClassLister Snapshot
+type snapshotClassLister struct {
+	snapshot *Snapshot
+}
 
 func (s snapshotClassLister) List() ([]*resourceapi.DeviceClass, error) {
-	var result []*resourceapi.DeviceClass
-	for _, class := range s.deviceClasses {
-		result = append(result, class)
-	}
-	return result, nil
+	return s.snapshot.listDeviceClasses(), nil
 }
 
 func (s snapshotClassLister) Get(className string) (*resourceapi.DeviceClass, error) {
-	class, found := s.deviceClasses[className]
+	class, found := s.snapshot.getDeviceClass(className)
 	if !found {
 		return nil, fmt.Errorf("DeviceClass %q not found", className)
 	}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
index de14ded286e422a6e8423d7c2d953597517a6596..65f941d85f97030cea25b047a93127cb3de36721 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_slice_lister.go
@@ -20,15 +20,10 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 )
 
-type snapshotSliceLister Snapshot
+type snapshotSliceLister struct {
+	snapshot *Snapshot
+}
 
-func (s snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
-	var result []*resourceapi.ResourceSlice
-	for _, slices := range s.resourceSlicesByNodeName {
-		for _, slice := range slices {
-			result = append(result, slice)
-		}
-	}
-	result = append(result, s.nonNodeLocalResourceSlices...)
-	return result, nil
+func (sl snapshotSliceLister) List() ([]*resourceapi.ResourceSlice, error) {
+	return sl.snapshot.listResourceSlices(), nil
 }
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
index 386765313f7a4e4a12eb2f3a62f91b35ebcd54be..7388cbcc3c029251a6eab7163f933ebd94ba134a 100644
--- a/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go
@@ -68,6 +68,9 @@ var (
 	pod2OwnClaim1 = drautils.TestClaimWithPodOwnership(pod2,
 		&resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "pod2-ownClaim1-abc", UID: "pod2-ownClaim1-abc", Namespace: "default"}},
 	)
+
+	deviceClass1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class1", UID: "class1-uid"}}
+	deviceClass2 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class2", UID: "class2-uid"}}
 )
 
 func TestSnapshotResourceClaims(t *testing.T) {
@@ -79,7 +82,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 
 		claims map[ResourceClaimId]*resourceapi.ResourceClaim
 
-		claimsModFun        func(snapshot Snapshot) error
+		claimsModFun        func(snapshot *Snapshot) error
 		wantClaimsModFunErr error
 
 		pod              *apiv1.Pod
@@ -141,7 +144,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), sharedClaim1.DeepCopy()})
 			},
 			wantClaimsModFunErr: cmpopts.AnyError,
@@ -153,7 +156,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{sharedClaim2.DeepCopy(), pod2OwnClaim1.DeepCopy()}); err != nil {
 					return err
 				}
@@ -171,7 +174,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -189,7 +192,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				snapshot.RemovePodOwnedClaims(pod1)
 				return nil
 			},
@@ -211,7 +214,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -234,7 +237,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.ReservePodClaims(pod1)
 			},
@@ -258,7 +261,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.ReservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -286,7 +289,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				// sharedClaim2 is missing in claims above, so this should be an error.
 				return snapshot.UnreservePodClaims(pod1)
 			},
@@ -309,7 +312,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
 				GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -338,7 +341,7 @@ func TestSnapshotResourceClaims(t *testing.T) {
 				GetClaimId(pod1OwnClaim1): drautils.TestClaimWithAllocation(drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1), nil),
 				GetClaimId(pod1OwnClaim2): drautils.TestClaimWithAllocation(pod1OwnClaim2.DeepCopy(), nil),
 			},
-			claimsModFun: func(snapshot Snapshot) error {
+			claimsModFun: func(snapshot *Snapshot) error {
 				return snapshot.UnreservePodClaims(pod1)
 			},
 			pod: pod1,
@@ -404,7 +407,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 	for _, tc := range []struct {
 		testName string
 
-		slicesModFun        func(snapshot Snapshot) error
+		slicesModFun        func(snapshot *Snapshot) error
 		wantSlicesModFunErr error
 
 		nodeName            string
@@ -426,7 +429,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a Node that already has slices tracked is an error",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1})
 			},
 			wantSlicesModFunErr: cmpopts.AnyError,
@@ -434,7 +437,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "AddNodeResourceSlices(): adding slices for a new Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				return snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{extraNode3Slice1, extraNode3Slice2})
 			},
 			nodeName:            "node3",
@@ -444,7 +447,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for a non-existing Node is a no-op",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node3")
 				return nil
 			},
@@ -452,7 +455,7 @@ func TestSnapshotResourceSlices(t *testing.T) {
 		},
 		{
 			testName: "RemoveNodeResourceSlices(): removing slices for an existing Node works correctly",
-			slicesModFun: func(snapshot Snapshot) error {
+			slicesModFun: func(snapshot *Snapshot) error {
 				snapshot.RemoveNodeResourceSlices("node2")
 				return nil
 			},
@@ -574,6 +577,7 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 			cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmp.AllowUnexported(framework.NodeInfo{}, schedulerframework.NodeInfo{}),
 				cmpopts.IgnoreUnexported(schedulerframework.PodInfo{}),
 				test.IgnoreObjectOrder[*resourceapi.ResourceClaim](), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()}
+
 			if diff := cmp.Diff(tc.wantNodeInfo, nodeInfo, cmpOpts...); diff != "" {
 				t.Errorf("Snapshot.WrapSchedulerNodeInfo(): unexpected output (-want +got): %s", diff)
 			}
@@ -581,121 +585,6 @@ func TestSnapshotWrapSchedulerNodeInfo(t *testing.T) {
 	}
 }
 
-func TestSnapshotClone(t *testing.T) {
-	for _, tc := range []struct {
-		testName           string
-		snapshot           Snapshot
-		cloneModFun        func(snapshot Snapshot) error
-		wantModifiedClaims []*resourceapi.ResourceClaim
-		wantModifiedSlices []*resourceapi.ResourceSlice
-	}{
-		{
-			testName: "empty snapshot",
-			snapshot: Snapshot{},
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				return snapshot.AddNodeResourceSlices("node1", []*resourceapi.ResourceSlice{node1Slice1, node1Slice2})
-			},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{pod1OwnClaim1, pod1OwnClaim2},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node1Slice1, node1Slice2},
-		},
-		{
-			testName: "non-empty snapshot",
-			snapshot: NewSnapshot(
-				map[ResourceClaimId]*resourceapi.ResourceClaim{
-					GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, pod2),
-					GetClaimId(sharedClaim2):  sharedClaim2.DeepCopy(),
-					GetClaimId(sharedClaim3):  drautils.TestClaimWithPodReservations(sharedClaim3, pod2),
-					GetClaimId(pod2OwnClaim1): drautils.TestClaimWithPodOwnership(pod2, drautils.TestClaimWithPodReservations(pod2OwnClaim1, pod2)),
-				},
-				map[string][]*resourceapi.ResourceSlice{
-					"node1": {node1Slice1, node1Slice2},
-					"node2": {node2Slice1, node2Slice2},
-				},
-				[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2}, nil),
-			cloneModFun: func(snapshot Snapshot) error {
-				if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{node3Slice1, node3Slice2}); err != nil {
-					return err
-				}
-				if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{pod1OwnClaim1.DeepCopy(), pod1OwnClaim2.DeepCopy()}); err != nil {
-					return err
-				}
-				if err := snapshot.ReservePodClaims(pod1); err != nil {
-					return err
-				}
-				snapshot.RemovePodOwnedClaims(pod2)
-				snapshot.RemoveNodeResourceSlices("node1")
-				return nil
-			},
-			wantModifiedSlices: []*resourceapi.ResourceSlice{node2Slice1, node2Slice2, node3Slice1, node3Slice2, globalSlice1, globalSlice2},
-			wantModifiedClaims: []*resourceapi.ResourceClaim{
-				drautils.TestClaimWithPodReservations(pod1OwnClaim1, pod1),
-				drautils.TestClaimWithPodReservations(pod1OwnClaim2, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim1, pod1),
-				drautils.TestClaimWithPodReservations(sharedClaim2, pod1),
-				sharedClaim3,
-			},
-		},
-	} {
-		t.Run(tc.testName, func(t *testing.T) {
-			// Grab the initial state of the snapshot to verify that it doesn't change when the clone is modified.
-			initialClaims, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlices, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-
-			// Clone and verify that the clone is identical to the original.
-			snapshotClone := tc.snapshot.Clone()
-			if diff := cmp.Diff(tc.snapshot, snapshotClone, cmpopts.EquateEmpty(), cmp.AllowUnexported(Snapshot{}, framework.NodeInfo{}, schedulerframework.NodeInfo{})); diff != "" {
-				t.Fatalf("Snapshot.Clone(): snapshot not identical after cloning (-want +got): %s", diff)
-			}
-
-			// Modify the clone.
-			if err := tc.cloneModFun(snapshotClone); err != nil {
-				t.Fatalf("Snapshot: unexpected error during snapshot modification: %v", err)
-			}
-
-			// Verify that the clone changed as expected.
-			modifiedClaims, err := snapshotClone.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			modifiedSlices, err := snapshotClone.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(tc.wantModifiedClaims, modifiedClaims, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceClaim state after modifications (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(tc.wantModifiedSlices, modifiedSlices, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: unexpected ResourceSlice state after modifications (-want +got): %s", diff)
-			}
-
-			// Verify that the original hasn't changed during clone modifications.
-			initialClaimsAfterCloneMod, err := tc.snapshot.ResourceClaims().List()
-			if err != nil {
-				t.Fatalf("ResourceClaims().List(): unexpected error: %v", err)
-			}
-			initialSlicesAfterCloneMod, err := tc.snapshot.ResourceSlices().List()
-			if err != nil {
-				t.Fatalf("ResourceSlices().List(): unexpected error: %v", err)
-			}
-			if diff := cmp.Diff(initialClaims, initialClaimsAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" {
-				t.Errorf("Snapshot: ResourceClaim state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-			if diff := cmp.Diff(initialSlices, initialSlicesAfterCloneMod, cmpopts.EquateEmpty(), test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" {
-				t.Errorf("Snapshot: ResourceSlice state changed in original snapshot during modifications on Clone (-want +got): %s", diff)
-			}
-		})
-	}
-}
-
 func testPods(count int) []*apiv1.Pod {
 	var result []*apiv1.Pod
 	for i := range count {
@@ -703,3 +592,179 @@ func testPods(count int) []*apiv1.Pod {
 	}
 	return result
 }
+
+func TestSnapshotForkCommitRevert(t *testing.T) {
+	initialClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  sharedClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim1): pod1OwnClaim1.DeepCopy(),
+		GetClaimId(pod1OwnClaim2): pod1OwnClaim2.DeepCopy(),
+	}
+	initialDeviceClasses := map[string]*resourceapi.DeviceClass{deviceClass1.Name: deviceClass1.DeepCopy(), deviceClass2.Name: deviceClass2.DeepCopy()}
+	initialLocalSlices := map[string][]*resourceapi.ResourceSlice{node1Slice1.Spec.NodeName: {node1Slice1.DeepCopy()}}
+	initialGlobalSlices := []*resourceapi.ResourceSlice{globalSlice1.DeepCopy(), globalSlice2.DeepCopy()}
+	initialState := NewSnapshot(initialClaims, initialLocalSlices, initialGlobalSlices, initialDeviceClasses)
+
+	addedClaim := sharedClaim2.DeepCopy()
+	addedNodeSlice := node2Slice1.DeepCopy()
+	podToReserve := pod1.DeepCopy()
+
+	modifiedClaims := map[ResourceClaimId]*resourceapi.ResourceClaim{
+		GetClaimId(sharedClaim1):  drautils.TestClaimWithPodReservations(sharedClaim1, podToReserve),
+		GetClaimId(sharedClaim2):  drautils.TestClaimWithPodReservations(addedClaim, podToReserve),
+		GetClaimId(pod1OwnClaim1): drautils.TestClaimWithPodReservations(pod1OwnClaim1, podToReserve),
+		GetClaimId(pod1OwnClaim2): drautils.TestClaimWithPodReservations(pod1OwnClaim2, podToReserve),
+	}
+	modifiedLocalSlices := map[string][]*resourceapi.ResourceSlice{addedNodeSlice.Spec.NodeName: {addedNodeSlice.DeepCopy()}}
+	// Expected state after modifications are applied
+	modifiedState := NewSnapshot(
+		modifiedClaims,
+		modifiedLocalSlices,
+		initialGlobalSlices,
+		initialDeviceClasses,
+	)
+
+	applyModifications := func(t *testing.T, s *Snapshot) {
+		t.Helper()
+
+		addedSlices := []*resourceapi.ResourceSlice{addedNodeSlice.DeepCopy()}
+		if err := s.AddNodeResourceSlices(addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
+			t.Fatalf("failed to add %s resource slices: %v", addedNodeSlice.Spec.NodeName, err)
+		}
+		if err := s.AddClaims([]*resourceapi.ResourceClaim{addedClaim}); err != nil {
+			t.Fatalf("failed to add %s claim: %v", addedClaim.Name, err)
+		}
+		if err := s.ReservePodClaims(podToReserve); err != nil {
+			t.Fatalf("failed to reserve claim %s for pod %s: %v", sharedClaim1.Name, podToReserve.Name, err)
+		}
+
+		s.RemoveNodeResourceSlices(node1Slice1.Spec.NodeName)
+	}
+
+	compareSnapshots := func(t *testing.T, want, got *Snapshot, msg string) {
+		t.Helper()
+		if diff := cmp.Diff(want, got, SnapshotFlattenedComparer(), cmpopts.EquateEmpty()); diff != "" {
+			t.Errorf("%s: Snapshot state mismatch (-want +got):\n%s", msg, diff)
+		}
+	}
+
+	t.Run("Fork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork and Modify")
+	})
+
+	t.Run("ForkRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+	})
+
+	t.Run("ForkCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit")
+	})
+
+	t.Run("ForkForkRevertRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+
+		// Apply further modifications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Revert() // Revert first fork
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Revert")
+	})
+
+	t.Run("ForkForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit() // Commit second fork into first fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Fork, Modify, Commit")
+
+		snapshot.Revert() // Revert first fork (which now contains committed changes)
+		compareSnapshots(t, initialState, snapshot, "After Fork, Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkForkRevertCommit", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Fork()
+		// Apply further mofications in second fork (add claim3, slice3)
+		furtherModifiedClaim3 := sharedClaim3.DeepCopy()
+		furtherModifiedSlice3 := node3Slice1.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{furtherModifiedClaim3}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+		if err := snapshot.AddNodeResourceSlices("node3", []*resourceapi.ResourceSlice{furtherModifiedSlice3}); err != nil {
+			t.Fatalf("AddNodeResourceSlices failed in second fork: %v", err)
+		}
+
+		snapshot.Revert() // Revert second fork
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert")
+
+		snapshot.Commit() // Commit first fork (with original modifications)
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Fork, Modify, Revert, Commit")
+	})
+
+	t.Run("CommitNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Commit() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Commit with no Fork")
+	})
+
+	t.Run("RevertNoFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Revert() // Should be a no-op
+		compareSnapshots(t, initialState, snapshot, "After Revert with no Fork")
+	})
+
+	t.Run("ForkCommitRevert", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Commit()
+		// Now try to revert the committed changes (should be no-op as only base layer exists)
+		snapshot.Revert()
+		compareSnapshots(t, modifiedState, snapshot, "After Fork, Modify, Commit, Revert")
+	})
+
+	t.Run("ForkRevertFork", func(t *testing.T) {
+		snapshot := CloneTestSnapshot(initialState)
+		snapshot.Fork()
+		applyModifications(t, snapshot)
+		snapshot.Revert()
+		compareSnapshots(t, initialState, snapshot, "After Fork, Modify, Revert")
+
+		snapshot.Fork() // Fork again from the reverted (initial) state
+		differentClaim := sharedClaim3.DeepCopy()
+		if err := snapshot.AddClaims([]*resourceapi.ResourceClaim{differentClaim}); err != nil {
+			t.Fatalf("AddClaims failed in second fork: %v", err)
+		}
+
+		expectedState := CloneTestSnapshot(initialState)
+		expectedState.AddClaims([]*resourceapi.ResourceClaim{differentClaim.DeepCopy()}) // Apply same change to expected state
+
+		compareSnapshots(t, expectedState, snapshot, "After Fork, Modify, Revert, Fork, Modify")
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a8f2cc32bfca80258856d4c5dbef15f96883939
--- /dev/null
+++ b/cluster-autoscaler/simulator/dynamicresources/snapshot/test_utils.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshot
+
+import (
+	"github.com/google/go-cmp/cmp"
+	resourceapi "k8s.io/api/resource/v1beta1"
+	"k8s.io/autoscaler/cluster-autoscaler/simulator/common"
+)
+
+// CloneTestSnapshot creates a deep copy of the provided Snapshot.
+// This function is intended for testing purposes only.
+func CloneTestSnapshot(snapshot *Snapshot) *Snapshot {
+	cloneString := func(s string) string { return s }
+	cloneResourceClaimId := func(rc ResourceClaimId) ResourceClaimId { return rc }
+	cloneDeviceClass := func(dc *resourceapi.DeviceClass) *resourceapi.DeviceClass { return dc.DeepCopy() }
+	cloneResourceClaim := func(rc *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { return rc.DeepCopy() }
+	cloneResourceSlices := func(rcs []*resourceapi.ResourceSlice) []*resourceapi.ResourceSlice {
+		clone := make([]*resourceapi.ResourceSlice, len(rcs))
+		for i := range rcs {
+			clone[i] = rcs[i].DeepCopy()
+		}
+		return clone
+	}
+
+	deviceClasses := common.ClonePatchSet(snapshot.deviceClasses, cloneString, cloneDeviceClass)
+	resourceSlices := common.ClonePatchSet(snapshot.resourceSlices, cloneString, cloneResourceSlices)
+	resourceClaims := common.ClonePatchSet(snapshot.resourceClaims, cloneResourceClaimId, cloneResourceClaim)
+
+	return &Snapshot{
+		deviceClasses:  deviceClasses,
+		resourceSlices: resourceSlices,
+		resourceClaims: resourceClaims,
+	}
+}
+
+// SnapshotFlattenedComparer returns a cmp.Option that provides a custom comparer function
+// for comparing two *Snapshot objects based on their underlying data maps, it doesn't
+// compare the underlying patchsets, instead flattened objects are compared.
+// This function is intended for testing purposes only.
+func SnapshotFlattenedComparer() cmp.Option {
+	return cmp.Comparer(func(a, b *Snapshot) bool {
+		if a == nil || b == nil {
+			return a == b
+		}
+
+		devicesEqual := cmp.Equal(a.deviceClasses.AsMap(), b.deviceClasses.AsMap())
+		slicesEqual := cmp.Equal(a.resourceSlices.AsMap(), b.resourceSlices.AsMap())
+		claimsEqual := cmp.Equal(a.resourceClaims.AsMap(), b.resourceClaims.AsMap())
+
+		return devicesEqual && slicesEqual && claimsEqual
+	})
+}
diff --git a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
index 09cffb6d88f2b69887dc63ab78531169682dce4b..68d029b5b160b59f41be0d9bfc3bfac1979f8c91 100644
--- a/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
+++ b/cluster-autoscaler/simulator/dynamicresources/utils/resource_claims.go
@@ -23,7 +23,7 @@ import (
 	resourceapi "k8s.io/api/resource/v1beta1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/component-helpers/scheduling/corev1"
-	resourceclaim "k8s.io/dynamic-resource-allocation/resourceclaim"
+	"k8s.io/dynamic-resource-allocation/resourceclaim"
 )
 
 // ClaimAllocated returns whether the provided claim is allocated.