diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go
index 6f00fda037865b54c61664cb96ce9e3ab8800a7b..d9d139763a070d7e6ee361477f1ba8bd3a20bd27 100644
--- a/cluster-autoscaler/config/autoscaling_options.go
+++ b/cluster-autoscaler/config/autoscaling_options.go
@@ -104,8 +104,6 @@ type AutoscalingOptions struct {
 	// NodeGroupDefaults are default values for per NodeGroup options.
 	// They will be used any time a specific value is not provided for a given NodeGroup.
 	NodeGroupDefaults NodeGroupAutoscalingOptions
-	// MaxEmptyBulkDelete is a number of empty nodes that can be removed at the same time.
-	MaxEmptyBulkDelete int
 	// MaxNodesTotal sets the maximum number of nodes in the whole cluster
 	MaxNodesTotal int
 	// MaxCoresTotal sets the maximum number of cores in the whole cluster
@@ -198,10 +196,6 @@ type AutoscalingOptions struct {
 	ConfigNamespace string
 	// ClusterName if available
 	ClusterName string
-	// NodeAutoprovisioningEnabled tells whether the node auto-provisioning is enabled for this cluster.
-	NodeAutoprovisioningEnabled bool
-	// MaxAutoprovisionedNodeGroupCount is the maximum number of autoprovisioned groups in the cluster.
-	MaxAutoprovisionedNodeGroupCount int
 	// UnremovableNodeRecheckTimeout is the timeout before we check again a node that couldn't be removed before
 	UnremovableNodeRecheckTimeout time.Duration
 	// Pods with priority below cutoff are expendable. They can be killed without any consideration during scale down and they don't cause scale-up.
@@ -318,15 +312,15 @@ type AutoscalingOptions struct {
 	// It only refers to check capacity ProvisioningRequests, but if not empty, best-effort atomic ProvisioningRequests processing is disabled in this instance.
 	// Not recommended: Until CA 1.35, ProvisioningRequests with this name as prefix in their class will be also processed.
 	CheckCapacityProcessorInstance string
-	// MaxInactivityTime is the maximum time from last recorded autoscaler activity before automatic restart.
+	// MaxInactivityTime is the maximum duration without recorded autoscaler activity before it is considered unhealthy.
 	MaxInactivityTime time.Duration
-	// MaxFailingTime is the maximum time from last recorded successful autoscaler run before automatic restart.
+	// MaxFailingTime is the maximum duration without a successful autoscaler run before it is considered unhealthy.
 	MaxFailingTime time.Duration
 	// DebuggingSnapshotEnabled is used to enable/disable debugging snapshot creation.
 	DebuggingSnapshotEnabled bool
 	// EnableProfiling is debug/pprof endpoint enabled.
 	EnableProfiling bool
-	// Address is the address to expose prometheus metrics.
+	// Address is the address of an auxiliary endpoint exposing process information like metrics, health checks and profiling data.
 	Address string
 	// EmitPerNodeGroupMetrics is used to enable/disable emitting per node group metrics.
 	EmitPerNodeGroupMetrics bool
diff --git a/cluster-autoscaler/config/flags/flags.go b/cluster-autoscaler/config/flags/flags.go
index b0ee51cb6d8fb8ad3686edaf44c452919cc59d9f..0838a8a365221727d60c6debbec31042e6d622a9 100644
--- a/cluster-autoscaler/config/flags/flags.go
+++ b/cluster-autoscaler/config/flags/flags.go
@@ -28,7 +28,6 @@ import (
 	cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
 	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce/localssdsize"
 	"k8s.io/autoscaler/cluster-autoscaler/config"
-	"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation"
 	"k8s.io/autoscaler/cluster-autoscaler/estimator"
 	"k8s.io/autoscaler/cluster-autoscaler/expander"
 	scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
@@ -228,11 +227,7 @@ var (
 	checkCapacityProcessorInstance               = flag.String("check-capacity-processor-instance", "", "Name of the processor instance. Only ProvisioningRequests that define this name in their parameters with the key \"processorInstance\" will be processed by this CA instance. It only refers to check capacity ProvisioningRequests, but if not empty, best-effort atomic ProvisioningRequests processing is disabled in this instance. Not recommended: Until CA 1.35, ProvisioningRequests with this name as prefix in their class will be also processed.")
 
 	// Deprecated flags
-	_                                = flag.Bool("gce-expander-ephemeral-storage-support", true, "Whether scale-up takes ephemeral storage resources into account for GCE cloud provider (Deprecated, to be removed in 1.30+)")
-	ignoreTaintsFlag                 = multiStringFlag("ignore-taint", "Specifies a taint to ignore in node templates when considering to scale a node group (Deprecated, use startup-taints instead)")
-	maxAutoprovisionedNodeGroupCount = flag.Int("max-autoprovisioned-node-group-count", 15, "The maximum number of autoprovisioned groups in the cluster.This flag is deprecated and will be removed in future releases.")
-	maxEmptyBulkDeleteFlag           = flag.Int("max-empty-bulk-delete", 10, "Maximum number of empty nodes that can be deleted at the same time. DEPRECATED: Use --max-scale-down-parallelism instead.")
-	nodeAutoprovisioningEnabled      = flag.Bool("node-autoprovisioning-enabled", false, "Should CA autoprovision node groups when needed.This flag is deprecated and will be removed in future releases.")
+	ignoreTaintsFlag = multiStringFlag("ignore-taint", "Specifies a taint to ignore in node templates when considering to scale a node group (Deprecated, use startup-taints instead)")
 )
 
 var autoscalingOptions *config.AutoscalingOptions
@@ -264,24 +259,6 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 		klog.Fatalf("Failed to parse flags: %v", err)
 	}
 
-	// in order to avoid inconsistent deletion thresholds for the legacy planner and the new actuator, the max-empty-bulk-delete,
-	// and max-scale-down-parallelism flags must be set to the same value.
-	if isFlagPassed("max-empty-bulk-delete") && !isFlagPassed("max-scale-down-parallelism") {
-		*maxScaleDownParallelismFlag = *maxEmptyBulkDeleteFlag
-		klog.Warning("The max-empty-bulk-delete flag will be deprecated in k8s version 1.29. Please use max-scale-down-parallelism instead.")
-		klog.Infof("Setting max-scale-down-parallelism to %d, based on the max-empty-bulk-delete value %d", *maxScaleDownParallelismFlag, *maxEmptyBulkDeleteFlag)
-	} else if !isFlagPassed("max-empty-bulk-delete") && isFlagPassed("max-scale-down-parallelism") {
-		*maxEmptyBulkDeleteFlag = *maxScaleDownParallelismFlag
-	}
-
-	if isFlagPassed("node-autoprovisioning-enabled") {
-		klog.Warning("The node-autoprovisioning-enabled flag is deprecated and will be removed in k8s version 1.31.")
-	}
-
-	if isFlagPassed("max-autoprovisioned-node-group-count") {
-		klog.Warning("The max-autoprovisioned-node-group-count flag is deprecated and will be removed in k8s version 1.31.")
-	}
-
 	var parsedSchedConfig *scheduler_config.KubeSchedulerConfiguration
 	// if scheduler config flag was set by the user
 	if pflag.CommandLine.Changed(config.SchedulerConfigFileFlag) {
@@ -297,7 +274,7 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 
 	var drainPriorityConfigMap []kubelet_config.ShutdownGracePeriodByPodPriority
 	if isFlagPassed("drain-priority-config") {
-		drainPriorityConfigMap = actuation.ParseShutdownGracePeriodsAndPriorities(*drainPriorityConfig)
+		drainPriorityConfigMap = parseShutdownGracePeriodsAndPriorities(*drainPriorityConfig)
 		if len(drainPriorityConfigMap) == 0 {
 			klog.Fatalf("Invalid configuration, parsing --drain-priority-config")
 		}
@@ -326,7 +303,6 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 		IgnoreMirrorPodsUtilization:      *ignoreMirrorPodsUtilization,
 		MaxBulkSoftTaintCount:            *maxBulkSoftTaintCount,
 		MaxBulkSoftTaintTime:             *maxBulkSoftTaintTime,
-		MaxEmptyBulkDelete:               *maxEmptyBulkDeleteFlag,
 		MaxGracefulTerminationSec:        *maxGracefulTerminationFlag,
 		MaxPodEvictionTime:               *maxPodEvictionTime,
 		MaxNodesTotal:                    *maxNodesTotal,
@@ -353,8 +329,6 @@ func createAutoscalingOptions() config.AutoscalingOptions {
 		BalanceSimilarNodeGroups:         *balanceSimilarNodeGroupsFlag,
 		ConfigNamespace:                  *namespace,
 		ClusterName:                      *clusterName,
-		NodeAutoprovisioningEnabled:      *nodeAutoprovisioningEnabled,
-		MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount,
 		UnremovableNodeRecheckTimeout:    *unremovableNodeRecheckTimeout,
 		ExpendablePodsPriorityCutoff:     *expendablePodsPriorityCutoff,
 		Regional:                         *regional,
@@ -523,3 +497,36 @@ func parseSingleGpuLimit(limits string) (config.GpuLimits, error) {
 	}
 	return parsedGpuLimits, nil
 }
+
+// parseShutdownGracePeriodsAndPriorities parse priorityGracePeriodStr and returns an array of ShutdownGracePeriodByPodPriority if succeeded.
+// Otherwise, returns an empty list
+func parseShutdownGracePeriodsAndPriorities(priorityGracePeriodStr string) []kubelet_config.ShutdownGracePeriodByPodPriority {
+	var priorityGracePeriodMap, emptyMap []kubelet_config.ShutdownGracePeriodByPodPriority
+
+	if priorityGracePeriodStr == "" {
+		return emptyMap
+	}
+	priorityGracePeriodStrArr := strings.Split(priorityGracePeriodStr, ",")
+	for _, item := range priorityGracePeriodStrArr {
+		priorityAndPeriod := strings.Split(item, ":")
+		if len(priorityAndPeriod) != 2 {
+			klog.Errorf("Parsing shutdown grace periods failed because '%s' is not a priority and grace period couple separated by ':'", item)
+			return emptyMap
+		}
+		priority, err := strconv.Atoi(priorityAndPeriod[0])
+		if err != nil {
+			klog.Errorf("Parsing shutdown grace periods and priorities failed: %v", err)
+			return emptyMap
+		}
+		shutDownGracePeriod, err := strconv.Atoi(priorityAndPeriod[1])
+		if err != nil {
+			klog.Errorf("Parsing shutdown grace periods and priorities failed: %v", err)
+			return emptyMap
+		}
+		priorityGracePeriodMap = append(priorityGracePeriodMap, kubelet_config.ShutdownGracePeriodByPodPriority{
+			Priority:                   int32(priority),
+			ShutdownGracePeriodSeconds: int64(shutDownGracePeriod),
+		})
+	}
+	return priorityGracePeriodMap
+}
diff --git a/cluster-autoscaler/config/flags/flags_test.go b/cluster-autoscaler/config/flags/flags_test.go
index dd0724c03f31f3cc150af4cd94c69e5e651d1e60..a10678426bf608310769f1104311313dcb99092b 100644
--- a/cluster-autoscaler/config/flags/flags_test.go
+++ b/cluster-autoscaler/config/flags/flags_test.go
@@ -20,6 +20,7 @@ import (
 	"testing"
 
 	"k8s.io/autoscaler/cluster-autoscaler/config"
+	kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
 
 	"github.com/stretchr/testify/assert"
 )
@@ -91,3 +92,57 @@ func TestParseSingleGpuLimit(t *testing.T) {
 		}
 	}
 }
+
+func TestParseShutdownGracePeriodsAndPriorities(t *testing.T) {
+	testCases := []struct {
+		name  string
+		input string
+		want  []kubelet_config.ShutdownGracePeriodByPodPriority
+	}{
+		{
+			name:  "empty input",
+			input: "",
+			want:  nil,
+		},
+		{
+			name:  "Incorrect string - incorrect priority grace period pairs",
+			input: "1:2,34",
+			want:  nil,
+		},
+		{
+			name:  "Incorrect string - trailing ,",
+			input: "1:2, 3:4,",
+			want:  nil,
+		},
+		{
+			name:  "Incorrect string - trailing space",
+			input: "1:2,3:4 ",
+			want:  nil,
+		},
+		{
+			name:  "Non integers - 1",
+			input: "1:2,3:a",
+			want:  nil,
+		},
+		{
+			name:  "Non integers - 2",
+			input: "1:2,3:23.2",
+			want:  nil,
+		},
+		{
+			name:  "parsable input",
+			input: "1:2,3:4",
+			want: []kubelet_config.ShutdownGracePeriodByPodPriority{
+				{1, 2},
+				{3, 4},
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			shutdownGracePeriodByPodPriority := parseShutdownGracePeriodsAndPriorities(tc.input)
+			assert.Equal(t, tc.want, shutdownGracePeriodByPodPriority)
+		})
+	}
+}
diff --git a/cluster-autoscaler/core/scaledown/actuation/priority.go b/cluster-autoscaler/core/scaledown/actuation/priority.go
index 662a783eda1c2f5aaf210a93913858c8e7b09f72..41de26cb3c99d0d40900e14f97d5999b34f13931 100644
--- a/cluster-autoscaler/core/scaledown/actuation/priority.go
+++ b/cluster-autoscaler/core/scaledown/actuation/priority.go
@@ -19,11 +19,8 @@ package actuation
 import (
 	"math"
 	"sort"
-	"strconv"
-	"strings"
 
 	apiv1 "k8s.io/api/core/v1"
-	"k8s.io/klog/v2"
 	kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config"
 )
 
@@ -73,39 +70,6 @@ func groupIndex(pod *apiv1.Pod, groups []podEvictionGroup) int {
 	return index
 }
 
-// ParseShutdownGracePeriodsAndPriorities parse priorityGracePeriodStr and returns an array of ShutdownGracePeriodByPodPriority if succeeded.
-// Otherwise, returns an empty list
-func ParseShutdownGracePeriodsAndPriorities(priorityGracePeriodStr string) []kubelet_config.ShutdownGracePeriodByPodPriority {
-	var priorityGracePeriodMap, emptyMap []kubelet_config.ShutdownGracePeriodByPodPriority
-
-	if priorityGracePeriodStr == "" {
-		return emptyMap
-	}
-	priorityGracePeriodStrArr := strings.Split(priorityGracePeriodStr, ",")
-	for _, item := range priorityGracePeriodStrArr {
-		priorityAndPeriod := strings.Split(item, ":")
-		if len(priorityAndPeriod) != 2 {
-			klog.Errorf("Parsing shutdown grace periods failed because '%s' is not a priority and grace period couple separated by ':'", item)
-			return emptyMap
-		}
-		priority, err := strconv.Atoi(priorityAndPeriod[0])
-		if err != nil {
-			klog.Errorf("Parsing shutdown grace periods and priorities failed: %v", err)
-			return emptyMap
-		}
-		shutDownGracePeriod, err := strconv.Atoi(priorityAndPeriod[1])
-		if err != nil {
-			klog.Errorf("Parsing shutdown grace periods and priorities failed: %v", err)
-			return emptyMap
-		}
-		priorityGracePeriodMap = append(priorityGracePeriodMap, kubelet_config.ShutdownGracePeriodByPodPriority{
-			Priority:                   int32(priority),
-			ShutdownGracePeriodSeconds: int64(shutDownGracePeriod),
-		})
-	}
-	return priorityGracePeriodMap
-}
-
 // SingleRuleDrainConfig returns an array of ShutdownGracePeriodByPodPriority with a single ShutdownGracePeriodByPodPriority
 func SingleRuleDrainConfig(shutdownGracePeriodSeconds int) []kubelet_config.ShutdownGracePeriodByPodPriority {
 	return []kubelet_config.ShutdownGracePeriodByPodPriority{
diff --git a/cluster-autoscaler/core/scaledown/actuation/priority_test.go b/cluster-autoscaler/core/scaledown/actuation/priority_test.go
index de743584560c11469650d15f742d515f58dd65f7..da03b9693c869e79861959aa3e603c8162175b5e 100644
--- a/cluster-autoscaler/core/scaledown/actuation/priority_test.go
+++ b/cluster-autoscaler/core/scaledown/actuation/priority_test.go
@@ -183,57 +183,3 @@ func TestGroupByPriority(t *testing.T) {
 	groups := groupByPriority(shutdownGracePeriodByPodPriority, []*apiv1.Pod{p1, p2, p3, p4, p5}, []*apiv1.Pod{p6, p7, p8, p9, p10})
 	assert.Equal(t, wantGroups, groups)
 }
-
-func TestParseShutdownGracePeriodsAndPriorities(t *testing.T) {
-	testCases := []struct {
-		name  string
-		input string
-		want  []kubelet_config.ShutdownGracePeriodByPodPriority
-	}{
-		{
-			name:  "empty input",
-			input: "",
-			want:  nil,
-		},
-		{
-			name:  "Incorrect string - incorrect priority grace period pairs",
-			input: "1:2,34",
-			want:  nil,
-		},
-		{
-			name:  "Incorrect string - trailing ,",
-			input: "1:2, 3:4,",
-			want:  nil,
-		},
-		{
-			name:  "Incorrect string - trailing space",
-			input: "1:2,3:4 ",
-			want:  nil,
-		},
-		{
-			name:  "Non integers - 1",
-			input: "1:2,3:a",
-			want:  nil,
-		},
-		{
-			name:  "Non integers - 2",
-			input: "1:2,3:23.2",
-			want:  nil,
-		},
-		{
-			name:  "parsable input",
-			input: "1:2,3:4",
-			want: []kubelet_config.ShutdownGracePeriodByPodPriority{
-				{1, 2},
-				{3, 4},
-			},
-		},
-	}
-
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			shutdownGracePeriodByPodPriority := ParseShutdownGracePeriodsAndPriorities(tc.input)
-			assert.Equal(t, tc.want, shutdownGracePeriodByPodPriority)
-		})
-	}
-}
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer_test.go b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer_test.go
index d89ba532120d87a8232fd6934a23b9ab5f9d8fea..c566328f9853472157c0fdf857c94f3683bbd6fa 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer_test.go
@@ -67,8 +67,7 @@ func TestNodePoolAsyncInitialization(t *testing.T) {
 					return nil
 				}, nil)
 			options := config.AutoscalingOptions{
-				NodeAutoprovisioningEnabled: true,
-				AsyncNodeGroupsEnabled:      true,
+				AsyncNodeGroupsEnabled: true,
 			}
 			listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
 			context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
index 3d0d2f34229cb2bef17e7221397b10c26cb81f2a..d8a170f083b6e2f9f9837ca9618359f3f18948db 100644
--- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
+++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
@@ -826,7 +826,6 @@ func TestNoCreateNodeGroupMaxCoresLimitHit(t *testing.T) {
 	options := defaultOptions
 	options.MaxCoresTotal = 7
 	options.MaxMemoryTotal = 100000
-	options.NodeAutoprovisioningEnabled = true
 
 	largeNode := BuildTestNode("n", 8000, 8000)
 	SetNodeReadyState(largeNode, true, time.Time{})
@@ -1555,11 +1554,9 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
 		}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
 
 	options := config.AutoscalingOptions{
-		EstimatorName:                    estimator.BinpackingEstimatorName,
-		MaxCoresTotal:                    5000 * 64,
-		MaxMemoryTotal:                   5000 * 64 * 20,
-		NodeAutoprovisioningEnabled:      true,
-		MaxAutoprovisionedNodeGroupCount: 10,
+		EstimatorName:  estimator.BinpackingEstimatorName,
+		MaxCoresTotal:  5000 * 64,
+		MaxMemoryTotal: 5000 * 64 * 20,
 	}
 	podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
 	listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
@@ -1608,12 +1605,10 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
 		}, nil, []string{"T1"}, map[string]*framework.NodeInfo{"T1": ti1})
 
 	options := config.AutoscalingOptions{
-		BalanceSimilarNodeGroups:         true,
-		EstimatorName:                    estimator.BinpackingEstimatorName,
-		MaxCoresTotal:                    5000 * 64,
-		MaxMemoryTotal:                   5000 * 64 * 20,
-		NodeAutoprovisioningEnabled:      true,
-		MaxAutoprovisionedNodeGroupCount: 10,
+		BalanceSimilarNodeGroups: true,
+		EstimatorName:            estimator.BinpackingEstimatorName,
+		MaxCoresTotal:            5000 * 64,
+		MaxMemoryTotal:           5000 * 64 * 20,
 	}
 	podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
 	listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
@@ -1762,8 +1757,7 @@ func TestScaleupAsyncNodeGroupsEnabled(t *testing.T) {
 		}
 
 		options := config.AutoscalingOptions{
-			NodeAutoprovisioningEnabled: true,
-			AsyncNodeGroupsEnabled:      true,
+			AsyncNodeGroupsEnabled: true,
 		}
 		podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
 		listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
diff --git a/cluster-autoscaler/core/static_autoscaler_test.go b/cluster-autoscaler/core/static_autoscaler_test.go
index b05c914c113e5bc787b54e556e0f92fee8f4aa25..ec0e549dffdca6b8d41f2dfa30d25d8cfbaae3ff 100644
--- a/cluster-autoscaler/core/static_autoscaler_test.go
+++ b/cluster-autoscaler/core/static_autoscaler_test.go
@@ -807,13 +807,11 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) {
 			ScaleDownUtilizationThreshold: 0.5,
 			MaxNodeProvisionTime:          10 * time.Second,
 		},
-		EstimatorName:                    estimator.BinpackingEstimatorName,
-		ScaleDownEnabled:                 true,
-		MaxNodesTotal:                    100,
-		MaxCoresTotal:                    100,
-		MaxMemoryTotal:                   100000,
-		NodeAutoprovisioningEnabled:      true,
-		MaxAutoprovisionedNodeGroupCount: 10,
+		EstimatorName:    estimator.BinpackingEstimatorName,
+		ScaleDownEnabled: true,
+		MaxNodesTotal:    100,
+		MaxCoresTotal:    100,
+		MaxMemoryTotal:   100000,
 	}
 	processorCallbacks := newStaticAutoscalerProcessorCallbacks()
 
diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go
index 8a210f6dcd49a99593ea0d1727594827789ef13c..01164c00c38dfec16476ef23f21cbd87b75ac15b 100644
--- a/cluster-autoscaler/core/test/common.go
+++ b/cluster-autoscaler/core/test/common.go
@@ -248,9 +248,6 @@ func (p *MockAutoprovisioningNodeGroupManager) createNodeGroup(context *context.
 
 // RemoveUnneededNodeGroups removes uneeded node groups
 func (p *MockAutoprovisioningNodeGroupManager) RemoveUnneededNodeGroups(context *context.AutoscalingContext) (removedNodeGroups []cloudprovider.NodeGroup, err error) {
-	if !context.AutoscalingOptions.NodeAutoprovisioningEnabled {
-		return nil, nil
-	}
 	removedNodeGroups = make([]cloudprovider.NodeGroup, 0)
 	nodeGroups := context.CloudProvider.NodeGroups()
 	for _, nodeGroup := range nodeGroups {
diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go
index d4be3d000d70af79ab8ace9844fe088722a958fb..2c1272525bcaf273631f5d6b37a476ef2e4ca3cd 100644
--- a/cluster-autoscaler/main.go
+++ b/cluster-autoscaler/main.go
@@ -18,6 +18,7 @@ package main
 
 import (
 	ctx "context"
+	"fmt"
 	"net/http"
 	"os"
 	"os/signal"
@@ -230,7 +231,6 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot
 	}
 
 	// These metrics should be published only once.
-	metrics.UpdateNapEnabled(autoscalingOptions.NodeAutoprovisioningEnabled)
 	metrics.UpdateCPULimitsCores(autoscalingOptions.MinCoresTotal, autoscalingOptions.MaxCoresTotal)
 	metrics.UpdateMemoryLimitsBytes(autoscalingOptions.MinMemoryTotal, autoscalingOptions.MaxMemoryTotal)
 
diff --git a/cluster-autoscaler/metrics/metrics.go b/cluster-autoscaler/metrics/metrics.go
index fe209087f673e3cd7b41857a8f3ecaf7d4168662..85bb9aae08228e48bc887931ed1379d6366a3228 100644
--- a/cluster-autoscaler/metrics/metrics.go
+++ b/cluster-autoscaler/metrics/metrics.go
@@ -384,15 +384,6 @@ var (
 		[]string{"direction", "reason"},
 	)
 
-	/**** Metrics related to NodeAutoprovisioning ****/
-	napEnabled = k8smetrics.NewGauge(
-		&k8smetrics.GaugeOpts{
-			Namespace: caNamespace,
-			Name:      "nap_enabled",
-			Help:      "Whether or not Node Autoprovisioning is enabled. 1 if it is, 0 otherwise.",
-		},
-	)
-
 	nodeGroupCreationCount = k8smetrics.NewCounterVec(
 		&k8smetrics.CounterOpts{
 			Namespace: caNamespace,
@@ -457,7 +448,6 @@ func RegisterAll(emitPerNodeGroupMetrics bool) {
 	legacyregistry.MustRegister(oldUnregisteredNodesRemovedCount)
 	legacyregistry.MustRegister(overflowingControllersCount)
 	legacyregistry.MustRegister(skippedScaleEventsCount)
-	legacyregistry.MustRegister(napEnabled)
 	legacyregistry.MustRegister(nodeGroupCreationCount)
 	legacyregistry.MustRegister(nodeGroupDeletionCount)
 	legacyregistry.MustRegister(pendingNodeDeletions)
@@ -668,15 +658,6 @@ func UpdateUnremovableNodesCount(unremovableReasonCounts map[simulator.Unremovab
 	}
 }
 
-// UpdateNapEnabled records if NodeAutoprovisioning is enabled
-func UpdateNapEnabled(enabled bool) {
-	if enabled {
-		napEnabled.Set(1)
-	} else {
-		napEnabled.Set(0)
-	}
-}
-
 // RegisterNodeGroupCreation registers node group creation
 func RegisterNodeGroupCreation() {
 	RegisterNodeGroupCreationWithLabelValues("")