From 0002157b3a6d3484c05da7e59d79b02de708b3af Mon Sep 17 00:00:00 2001
From: MenD32 <amit.mendelevitch@gmail.com>
Date: Wed, 11 Jun 2025 09:38:49 +0300
Subject: [PATCH] nit: when scheduling fails on topology constraints, skip the
 last node that failed scheduling

Signed-off-by: MenD32 <amit.mendelevitch@gmail.com>
---
 cluster-autoscaler/estimator/binpacking_estimator.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/cluster-autoscaler/estimator/binpacking_estimator.go b/cluster-autoscaler/estimator/binpacking_estimator.go
index eb19aed304..10ed1dfa3d 100644
--- a/cluster-autoscaler/estimator/binpacking_estimator.go
+++ b/cluster-autoscaler/estimator/binpacking_estimator.go
@@ -190,7 +190,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
 				// If the pod can't be scheduled on the last node because of topology constraints, we can stop binpacking.
 				// The pod can't be scheduled on any new node either, because it has the same topology constraints.
 				nodeName, err := e.clusterSnapshot.SchedulePodOnAnyNodeMatching(pod, func(nodeInfo *framework.NodeInfo) bool {
-					return true // Node scale-up can cause old nodes to become schedulable, so we check all nodes.
+					return nodeInfo.Node().Name != estimationState.lastNodeName // only skip the last node that failed scheduling
 				})
 				if err != nil && err.Type() == clustersnapshot.SchedulingInternalError {
 					// Unexpected error.
-- 
GitLab