diff --git a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go
index 0a681fd5a903f3ac84dfdc63612ded5d759693e3..92e6a5235abc8f99e668250c2f35780c30821ff1 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go
@@ -50,7 +50,8 @@ type AsyncNodeGroupInitializer struct {
 	atomicScaleUp          bool
 }
 
-func newAsyncNodeGroupInitializer(
+// NewAsyncNodeGroupInitializer creates a new AsyncNodeGroupInitializer instance.
+func NewAsyncNodeGroupInitializer(
 	nodeGroup cloudprovider.NodeGroup,
 	nodeInfo *framework.NodeInfo,
 	scaleUpExecutor *scaleUpExecutor,
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/executor.go b/cluster-autoscaler/core/scaleup/orchestrator/executor.go
index 5c835e5d384d8e8353c3b3ae29938e5b54577445..15851b99594b8a999e4fece32d8b3389b0a8686e 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/executor.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/executor.go
@@ -18,8 +18,6 @@ package orchestrator
 
 import (
 	"fmt"
-	"sort"
-	"strings"
 	"sync"
 	"time"
 
@@ -138,7 +136,7 @@ func (e *scaleUpExecutor) executeScaleUpsParallel(
 			failedNodeGroups[i] = result.info.Group
 			scaleUpErrors[i] = result.err
 		}
-		return combineConcurrentScaleUpErrors(scaleUpErrors), failedNodeGroups
+		return errors.Combine(scaleUpErrors), failedNodeGroups
 	}
 	return nil, nil
 }
@@ -188,65 +186,6 @@ func (e *scaleUpExecutor) executeScaleUp(
 	return nil
 }
 
-func combineConcurrentScaleUpErrors(errs []errors.AutoscalerError) errors.AutoscalerError {
-	if len(errs) == 0 {
-		return nil
-	}
-	if len(errs) == 1 {
-		return errs[0]
-	}
-	uniqueMessages := make(map[string]bool)
-	uniqueTypes := make(map[errors.AutoscalerErrorType]bool)
-	for _, err := range errs {
-		uniqueTypes[err.Type()] = true
-		uniqueMessages[err.Error()] = true
-	}
-	if len(uniqueTypes) == 1 && len(uniqueMessages) == 1 {
-		return errs[0]
-	}
-	// sort to stabilize the results and easier log aggregation
-	sort.Slice(errs, func(i, j int) bool {
-		errA := errs[i]
-		errB := errs[j]
-		if errA.Type() == errB.Type() {
-			return errs[i].Error() < errs[j].Error()
-		}
-		return errA.Type() < errB.Type()
-	})
-	firstErr := errs[0]
-	printErrorTypes := len(uniqueTypes) > 1
-	message := formatMessageFromConcurrentErrors(errs, printErrorTypes)
-	return errors.NewAutoscalerError(firstErr.Type(), message)
-}
-
-func formatMessageFromConcurrentErrors(errs []errors.AutoscalerError, printErrorTypes bool) string {
-	firstErr := errs[0]
-	var builder strings.Builder
-	builder.WriteString(firstErr.Error())
-	builder.WriteString(" ...and other concurrent errors: [")
-	formattedErrs := map[errors.AutoscalerError]bool{
-		firstErr: true,
-	}
-	for _, err := range errs {
-		if _, has := formattedErrs[err]; has {
-			continue
-		}
-		formattedErrs[err] = true
-		var message string
-		if printErrorTypes {
-			message = fmt.Sprintf("[%s] %s", err.Type(), err.Error())
-		} else {
-			message = err.Error()
-		}
-		if len(formattedErrs) > 2 {
-			builder.WriteString(", ")
-		}
-		builder.WriteString(fmt.Sprintf("%q", message))
-	}
-	builder.WriteString("]")
-	return builder.String()
-}
-
 // Checks if all groups are scaled only once.
 // Scaling one group multiple times concurrently may cause problems.
 func checkUniqueNodeGroups(scaleUpInfos []nodegroupset.ScaleUpInfo) errors.AutoscalerError {
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/executor_test.go b/cluster-autoscaler/core/scaleup/orchestrator/executor_test.go
deleted file mode 100644
index a7ef5d60f575a408d4727c54c12836b6dbff5858..0000000000000000000000000000000000000000
--- a/cluster-autoscaler/core/scaleup/orchestrator/executor_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package orchestrator
-
-import (
-	"testing"
-
-	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestCombinedConcurrentScaleUpErrors(t *testing.T) {
-	cloudProviderErr := errors.NewAutoscalerError(errors.CloudProviderError, "provider error")
-	internalErr := errors.NewAutoscalerError(errors.InternalError, "internal error")
-	testCases := []struct {
-		desc        string
-		errors      []errors.AutoscalerError
-		expectedErr errors.AutoscalerError
-	}{
-		{
-			desc:        "no errors",
-			errors:      []errors.AutoscalerError{},
-			expectedErr: nil,
-		},
-		{
-			desc:        "single error",
-			errors:      []errors.AutoscalerError{internalErr},
-			expectedErr: internalErr,
-		},
-		{
-			desc: "two duplicated errors",
-			errors: []errors.AutoscalerError{
-				internalErr,
-				internalErr,
-			},
-			expectedErr: internalErr,
-		},
-		{
-			desc: "two different errors",
-			errors: []errors.AutoscalerError{
-				cloudProviderErr,
-				internalErr,
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.CloudProviderError,
-				"provider error ...and other concurrent errors: [\"[internalError] internal error\"]",
-			),
-		},
-		{
-			desc: "two different errors - reverse alphabetical order",
-			errors: []errors.AutoscalerError{
-				internalErr,
-				cloudProviderErr,
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.CloudProviderError,
-				"provider error ...and other concurrent errors: [\"[internalError] internal error\"]",
-			),
-		},
-		{
-			desc: "errors with the same type and different messages",
-			errors: []errors.AutoscalerError{
-				errors.NewAutoscalerError(errors.InternalError, "A"),
-				errors.NewAutoscalerError(errors.InternalError, "B"),
-				errors.NewAutoscalerError(errors.InternalError, "C"),
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.InternalError,
-				"A ...and other concurrent errors: [\"B\", \"C\"]"),
-		},
-		{
-			desc: "errors with the same type and some duplicated messages",
-			errors: []errors.AutoscalerError{
-				errors.NewAutoscalerError(errors.InternalError, "A"),
-				errors.NewAutoscalerError(errors.InternalError, "B"),
-				errors.NewAutoscalerError(errors.InternalError, "A"),
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.InternalError,
-				"A ...and other concurrent errors: [\"B\"]"),
-		},
-		{
-			desc: "some duplicated errors",
-			errors: []errors.AutoscalerError{
-				errors.NewAutoscalerError(errors.CloudProviderError, "A"),
-				errors.NewAutoscalerError(errors.CloudProviderError, "A"),
-				errors.NewAutoscalerError(errors.CloudProviderError, "B"),
-				errors.NewAutoscalerError(errors.InternalError, "A"),
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.CloudProviderError,
-				"A ...and other concurrent errors: [\"[cloudProviderError] B\", \"[internalError] A\"]"),
-		},
-		{
-			desc: "different errors with quotes in messages",
-			errors: []errors.AutoscalerError{
-				errors.NewAutoscalerError(errors.InternalError, "\"first\""),
-				errors.NewAutoscalerError(errors.InternalError, "\"second\""),
-			},
-			expectedErr: errors.NewAutoscalerError(
-				errors.InternalError,
-				"\"first\" ...and other concurrent errors: [\"\\\"second\\\"\"]"),
-		},
-	}
-
-	for _, testCase := range testCases {
-		t.Run(testCase.desc, func(t *testing.T) {
-			combinedErr := combineConcurrentScaleUpErrors(testCase.errors)
-			assert.Equal(t, testCase.expectedErr, combinedErr)
-		})
-	}
-}
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go
index d57c2a6299b3f7af7ebb5616de98a1028c6df1a5..58ae1ecd0babcf70a57d26a29b526965b846cb11 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go
@@ -222,7 +222,13 @@ func (o *ScaleUpOrchestrator) ScaleUp(
 			return buildNoOptionsAvailableStatus(markedEquivalenceGroups, skippedNodeGroups, nodeGroups), nil
 		}
 		var scaleUpStatus *status.ScaleUpStatus
-		createNodeGroupResults, scaleUpStatus, aErr = o.CreateNodeGroup(bestOption, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, allOrNothing)
+		oldId := bestOption.NodeGroup.Id()
+		if o.autoscalingContext.AsyncNodeGroupsEnabled {
+			initializer := NewAsyncNodeGroupInitializer(bestOption.NodeGroup, nodeInfos[oldId], o.scaleUpExecutor, o.taintConfig, daemonSets, o.processors.ScaleUpStatusProcessor, o.autoscalingContext, allOrNothing)
+			createNodeGroupResults, scaleUpStatus, aErr = o.CreateNodeGroupAsync(bestOption, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, initializer)
+		} else {
+			createNodeGroupResults, scaleUpStatus, aErr = o.CreateNodeGroup(bestOption, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets)
+		}
 		if aErr != nil {
 			return scaleUpStatus, aErr
 		}
@@ -501,46 +507,62 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
 	schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
 	podEquivalenceGroups []*equivalence.PodGroup,
 	daemonSets []*appsv1.DaemonSet,
-	allOrNothing bool,
 ) ([]nodegroups.CreateNodeGroupResult, *status.ScaleUpStatus, errors.AutoscalerError) {
-	createNodeGroupResults := make([]nodegroups.CreateNodeGroupResult, 0)
+	oldId := initialOption.NodeGroup.Id()
+	res, aErr := o.processors.NodeGroupManager.CreateNodeGroup(o.autoscalingContext, initialOption.NodeGroup)
+	return o.processCreateNodeGroupResult(initialOption, oldId, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, res, aErr)
+}
 
+// CreateNodeGroupAsync will try to create a new node group asynchronously based on the initialOption.
+func (o *ScaleUpOrchestrator) CreateNodeGroupAsync(
+	initialOption *expander.Option,
+	nodeInfos map[string]*framework.NodeInfo,
+	schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
+	podEquivalenceGroups []*equivalence.PodGroup,
+	daemonSets []*appsv1.DaemonSet,
+	initializer nodegroups.AsyncNodeGroupInitializer,
+) ([]nodegroups.CreateNodeGroupResult, *status.ScaleUpStatus, errors.AutoscalerError) {
 	oldId := initialOption.NodeGroup.Id()
-	var createNodeGroupResult nodegroups.CreateNodeGroupResult
-	var aErr errors.AutoscalerError
-	if o.autoscalingContext.AsyncNodeGroupsEnabled {
-		initializer := newAsyncNodeGroupInitializer(initialOption.NodeGroup, nodeInfos[oldId], o.scaleUpExecutor, o.taintConfig, daemonSets, o.processors.ScaleUpStatusProcessor, o.autoscalingContext, allOrNothing)
-		createNodeGroupResult, aErr = o.processors.NodeGroupManager.CreateNodeGroupAsync(o.autoscalingContext, initialOption.NodeGroup, initializer)
-	} else {
-		createNodeGroupResult, aErr = o.processors.NodeGroupManager.CreateNodeGroup(o.autoscalingContext, initialOption.NodeGroup)
-	}
+	res, aErr := o.processors.NodeGroupManager.CreateNodeGroupAsync(o.autoscalingContext, initialOption.NodeGroup, initializer)
+	return o.processCreateNodeGroupResult(initialOption, oldId, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, res, aErr)
+}
+
+func (o *ScaleUpOrchestrator) processCreateNodeGroupResult(
+	initialOption *expander.Option,
+	initialOptionId string,
+	nodeInfos map[string]*framework.NodeInfo,
+	schedulablePodGroups map[string][]estimator.PodEquivalenceGroup,
+	podEquivalenceGroups []*equivalence.PodGroup,
+	daemonSets []*appsv1.DaemonSet,
+	result nodegroups.CreateNodeGroupResult,
+	aErr errors.AutoscalerError,
+) ([]nodegroups.CreateNodeGroupResult, *status.ScaleUpStatus, errors.AutoscalerError) {
 	if aErr != nil {
 		status, err := status.UpdateScaleUpError(
 			&status.ScaleUpStatus{FailedCreationNodeGroups: []cloudprovider.NodeGroup{initialOption.NodeGroup}, PodsTriggeredScaleUp: initialOption.Pods},
 			aErr)
-		return createNodeGroupResults, status, err
+		return []nodegroups.CreateNodeGroupResult{}, status, err
 	}
 
-	createNodeGroupResults = append(createNodeGroupResults, createNodeGroupResult)
-	initialOption.NodeGroup = createNodeGroupResult.MainCreatedNodeGroup
+	initialOption.NodeGroup = result.MainCreatedNodeGroup
 
 	// If possible replace candidate node-info with node info based on crated node group. The latter
 	// one should be more in line with nodes which will be created by node group.
-	mainCreatedNodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, o.taintConfig)
+	mainCreatedNodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(result.MainCreatedNodeGroup, daemonSets, o.taintConfig)
 	if aErr == nil {
-		nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo
-		schedulablePodGroups[createNodeGroupResult.MainCreatedNodeGroup.Id()] = o.SchedulablePodGroups(podEquivalenceGroups, createNodeGroupResult.MainCreatedNodeGroup, mainCreatedNodeInfo)
+		nodeInfos[result.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo
+		schedulablePodGroups[result.MainCreatedNodeGroup.Id()] = o.SchedulablePodGroups(podEquivalenceGroups, result.MainCreatedNodeGroup, mainCreatedNodeInfo)
 	} else {
-		klog.Warningf("Cannot build node info for newly created main node group %v; balancing similar node groups may not work; err=%v", createNodeGroupResult.MainCreatedNodeGroup.Id(), aErr)
+		klog.Warningf("Cannot build node info for newly created main node group %v; balancing similar node groups may not work; err=%v", result.MainCreatedNodeGroup.Id(), aErr)
 		// Use node info based on expansion candidate but update Id which likely changed when node group was created.
-		nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = nodeInfos[oldId]
-		schedulablePodGroups[createNodeGroupResult.MainCreatedNodeGroup.Id()] = schedulablePodGroups[oldId]
+		nodeInfos[result.MainCreatedNodeGroup.Id()] = nodeInfos[initialOptionId]
+		schedulablePodGroups[result.MainCreatedNodeGroup.Id()] = schedulablePodGroups[initialOptionId]
 	}
-	if oldId != createNodeGroupResult.MainCreatedNodeGroup.Id() {
-		delete(nodeInfos, oldId)
-		delete(schedulablePodGroups, oldId)
+	if initialOptionId != result.MainCreatedNodeGroup.Id() {
+		delete(nodeInfos, initialOptionId)
+		delete(schedulablePodGroups, initialOptionId)
 	}
-	for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups {
+	for _, nodeGroup := range result.ExtraCreatedNodeGroups {
 		nodeInfo, aErr := simulator.SanitizedTemplateNodeInfoFromNodeGroup(nodeGroup, daemonSets, o.taintConfig)
 		if aErr != nil {
 			klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), aErr)
@@ -554,7 +576,7 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup(
 	// TODO(lukaszos) when pursuing scalability update this call with one which takes list of changed node groups so we do not
 	//                do extra API calls. (the call at the bottom of ScaleUp() could be also changed then)
 	o.clusterStateRegistry.Recalculate()
-	return createNodeGroupResults, nil, nil
+	return []nodegroups.CreateNodeGroupResult{result}, nil, nil
 }
 
 // SchedulablePodGroups returns a list of pods that could be scheduled
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
index a6ea4d92aaa83ecb20a41fd0d41a7609a7337ddf..3d0d2f34229cb2bef17e7221397b10c26cb81f2a 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
@@ -625,7 +625,7 @@ func TestCloudProviderFailingToScaleUpGroups(t *testing.T) {
 			assert.False(t, result.ScaleUpStatus.WasSuccessful())
 			assert.Equal(t, errors.CloudProviderError, result.ScaleUpError.Type())
 			assert.Equal(t, tc.expectedTotalTargetSizes, result.GroupTargetSizes["ng1"]+result.GroupTargetSizes["ng2"])
-			assert.Equal(t, tc.expectConcurrentErrors, strings.Contains(result.ScaleUpError.Error(), "...and other concurrent errors"))
+			assert.Equal(t, tc.expectConcurrentErrors, strings.Contains(result.ScaleUpError.Error(), "...and other errors"))
 		})
 	}
 }
diff --git a/cluster-autoscaler/utils/errors/errors.go b/cluster-autoscaler/utils/errors/errors.go
index 24bc0c8355c6263a006515256316649bae56ac8b..3252effbc131c38c39f4c925520d0de42b37c338 100644
--- a/cluster-autoscaler/utils/errors/errors.go
+++ b/cluster-autoscaler/utils/errors/errors.go
@@ -18,6 +18,8 @@ package errors
 
 import (
 	"fmt"
+	"sort"
+	"strings"
 )
 
 // AutoscalerErrorType describes a high-level category of a given error
@@ -131,3 +133,63 @@ func (e autoscalerErrorImpl) Type() AutoscalerErrorType {
 func (e autoscalerErrorImpl) AddPrefix(msg string, args ...interface{}) AutoscalerError {
 	return autoscalerErrorImpl{errorType: e.errorType, wrappedErr: e, msg: fmt.Sprintf(msg, args...)}
 }
+
+// Combine returns combined error to report from multiple errors.
+func Combine(errs []AutoscalerError) AutoscalerError {
+	if len(errs) == 0 {
+		return nil
+	}
+	if len(errs) == 1 {
+		return errs[0]
+	}
+	uniqueMessages := make(map[string]bool)
+	uniqueTypes := make(map[AutoscalerErrorType]bool)
+	for _, err := range errs {
+		uniqueTypes[err.Type()] = true
+		uniqueMessages[err.Error()] = true
+	}
+	if len(uniqueTypes) == 1 && len(uniqueMessages) == 1 {
+		return errs[0]
+	}
+	// sort to stabilize the results and easier log aggregation
+	sort.Slice(errs, func(i, j int) bool {
+		errA := errs[i]
+		errB := errs[j]
+		if errA.Type() == errB.Type() {
+			return errs[i].Error() < errs[j].Error()
+		}
+		return errA.Type() < errB.Type()
+	})
+	firstErr := errs[0]
+	printErrorTypes := len(uniqueTypes) > 1
+	message := formatMessageFromErrors(errs, printErrorTypes)
+	return NewAutoscalerError(firstErr.Type(), message)
+}
+
+func formatMessageFromErrors(errs []AutoscalerError, printErrorTypes bool) string {
+	firstErr := errs[0]
+	var builder strings.Builder
+	builder.WriteString(firstErr.Error())
+	builder.WriteString(" ...and other errors: [")
+	formattedErrs := map[AutoscalerError]bool{
+		firstErr: true,
+	}
+	for _, err := range errs {
+		if _, has := formattedErrs[err]; has {
+			continue
+		}
+		formattedErrs[err] = true
+		var message string
+		if printErrorTypes {
+			message = fmt.Sprintf("[%s] %s", err.Type(), err.Error())
+		} else {
+			message = err.Error()
+		}
+		if len(formattedErrs) > 2 {
+			builder.WriteString(", ")
+		}
+		builder.WriteString(fmt.Sprintf("%q", message))
+	}
+	builder.WriteString("]")
+	return builder.String()
+}
diff --git a/cluster-autoscaler/utils/errors/errors_test.go b/cluster-autoscaler/utils/errors/errors_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2aa47d12c604d44f3de5e7c51949f2b879af485
--- /dev/null
+++ b/cluster-autoscaler/utils/errors/errors_test.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestCombine(t *testing.T) {
+	cloudProviderErr := NewAutoscalerError(CloudProviderError, "provider error")
+	internalErr := NewAutoscalerError(InternalError, "internal error")
+	testCases := []struct {
+		desc        string
+		errors      []AutoscalerError
+		expectedErr AutoscalerError
+	}{
+		{
+			desc:        "no errors",
+			errors:      []AutoscalerError{},
+			expectedErr: nil,
+		},
+		{
+			desc:        "single error",
+			errors:      []AutoscalerError{internalErr},
+			expectedErr: internalErr,
+		},
+		{
+			desc: "two duplicated errors",
+			errors: []AutoscalerError{
+				internalErr,
+				internalErr,
+			},
+			expectedErr: internalErr,
+		},
+		{
+			desc: "two different errors",
+			errors: []AutoscalerError{
+				cloudProviderErr,
+				internalErr,
+			},
+			expectedErr: NewAutoscalerError(
+				CloudProviderError,
+				"provider error ...and other errors: [\"[internalError] internal error\"]",
+			),
+		},
+		{
+			desc: "two different errors - reverse alphabetical order",
+			errors: []AutoscalerError{
+				internalErr,
+				cloudProviderErr,
+			},
+			expectedErr: NewAutoscalerError(
+				CloudProviderError,
+				"provider error ...and other errors: [\"[internalError] internal error\"]",
+			),
+		},
+		{
+			desc: "errors with the same type and different messages",
+			errors: []AutoscalerError{
+				NewAutoscalerError(InternalError, "A"),
+				NewAutoscalerError(InternalError, "B"),
+				NewAutoscalerError(InternalError, "C"),
+			},
+			expectedErr: NewAutoscalerError(
+				InternalError,
+				"A ...and other errors: [\"B\", \"C\"]"),
+		},
+		{
+			desc: "errors with the same type and some duplicated messages",
+			errors: []AutoscalerError{
+				NewAutoscalerError(InternalError, "A"),
+				NewAutoscalerError(InternalError, "B"),
+				NewAutoscalerError(InternalError, "A"),
+			},
+			expectedErr: NewAutoscalerError(
+				InternalError,
+				"A ...and other errors: [\"B\"]"),
+		},
+		{
+			desc: "some duplicated errors",
+			errors: []AutoscalerError{
+				NewAutoscalerError(CloudProviderError, "A"),
+				NewAutoscalerError(CloudProviderError, "A"),
+				NewAutoscalerError(CloudProviderError, "B"),
+				NewAutoscalerError(InternalError, "A"),
+			},
+			expectedErr: NewAutoscalerError(
+				CloudProviderError,
+				"A ...and other errors: [\"[cloudProviderError] B\", \"[internalError] A\"]"),
+		},
+		{
+			desc: "different errors with quotes in messages",
+			errors: []AutoscalerError{
+				NewAutoscalerError(InternalError, "\"first\""),
+				NewAutoscalerError(InternalError, "\"second\""),
+			},
+			expectedErr: NewAutoscalerError(
+				InternalError,
+				"\"first\" ...and other errors: [\"\\\"second\\\"\"]"),
+		},
+	}
+
+	for _, testCase := range testCases {
+		t.Run(testCase.desc, func(t *testing.T) {
+			combinedErr := Combine(testCase.errors)
+			assert.Equal(t, testCase.expectedErr, combinedErr)
+		})
+	}
+}