diff --git a/cluster-autoscaler/cloudprovider/azure/azure_config.go b/cluster-autoscaler/cloudprovider/azure/azure_config.go
index 0729ee8a2b020616941ea10cfff0831ecbf08c45..f1602e67c3d8a4209e503938fe9be31807b2974c 100644
--- a/cluster-autoscaler/cloudprovider/azure/azure_config.go
+++ b/cluster-autoscaler/cloudprovider/azure/azure_config.go
@@ -23,6 +23,7 @@ import (
 	"io"
 	"io/ioutil"
 	"os"
+
 	"strconv"
 	"strings"
 	"time"
@@ -31,6 +32,7 @@ import (
 	"github.com/Azure/go-autorest/autorest/azure"
 	"k8s.io/klog/v2"
 	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider"
 	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
 )
 
@@ -38,7 +40,7 @@ const (
 	// The path of deployment parameters for standard vm.
 	deploymentParametersPath = "/var/lib/azure/azuredeploy.parameters.json"
 
-	metadataURL = "http://169.254.169.254/metadata/instance"
+	imdsServerURL = "http://169.254.169.254"
 
 	// backoff
 	backoffRetriesDefault  = 6
@@ -146,7 +148,6 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
 		cfg.Cloud = os.Getenv("ARM_CLOUD")
 		cfg.Location = os.Getenv("LOCATION")
 		cfg.ResourceGroup = os.Getenv("ARM_RESOURCE_GROUP")
-		cfg.SubscriptionID = os.Getenv("ARM_SUBSCRIPTION_ID")
 		cfg.TenantID = os.Getenv("ARM_TENANT_ID")
 		cfg.AADClientID = os.Getenv("ARM_CLIENT_ID")
 		cfg.AADClientSecret = os.Getenv("ARM_CLIENT_SECRET")
@@ -157,6 +158,12 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
 		cfg.ClusterName = os.Getenv("AZURE_CLUSTER_NAME")
 		cfg.NodeResourceGroup = os.Getenv("AZURE_NODE_RESOURCE_GROUP")
 
+		subscriptionID, err := getSubscriptionIdFromInstanceMetadata()
+		if err != nil {
+			return nil, err
+		}
+		cfg.SubscriptionID = subscriptionID
+
 		useManagedIdentityExtensionFromEnv := os.Getenv("ARM_USE_MANAGED_IDENTITY_EXTENSION")
 		if len(useManagedIdentityExtensionFromEnv) > 0 {
 			cfg.UseManagedIdentityExtension, err = strconv.ParseBool(useManagedIdentityExtensionFromEnv)
@@ -473,3 +480,22 @@ func (cfg *Config) validate() error {
 
 	return nil
 }
+
+// getSubscriptionId reads the Subscription ID from the instance metadata.
+func getSubscriptionIdFromInstanceMetadata() (string, error) {
+	subscriptionID, present := os.LookupEnv("ARM_SUBSCRIPTION_ID")
+	if !present {
+		metadataService, err := providerazure.NewInstanceMetadataService(imdsServerURL)
+		if err != nil {
+			return "", err
+		}
+
+		metadata, err := metadataService.GetMetadata(0)
+		if err != nil {
+			return "", err
+		}
+
+		return metadata.Compute.SubscriptionID, nil
+	}
+	return subscriptionID, nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..67303c77365bd81b1ec6c3df355211128cedabae
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md
@@ -0,0 +1,9 @@
+# Change History
+
+## Additive Changes
+
+### New Funcs
+
+1. PrivateZoneProperties.MarshalJSON() ([]byte, error)
+1. ProxyResource.MarshalJSON() ([]byte, error)
+1. Resource.MarshalJSON() ([]byte, error)
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json
new file mode 100644
index 0000000000000000000000000000000000000000..af0c1e4a7c9e0518e0ae25b0abe200f8596be094
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json
@@ -0,0 +1,11 @@
+{
+  "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82",
+  "readme": "/_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md",
+  "tag": "package-2018-09",
+  "use": "@microsoft.azure/autorest.go@2.1.183",
+  "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
+  "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2018-09 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md",
+  "additional_properties": {
+    "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION"
+  }
+}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..48bd23da58c1a288c6e86558d2cdb6848c9d3be5
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go
@@ -0,0 +1,41 @@
+// Package privatedns implements the Azure ARM Privatedns service API version 2018-09-01.
+//
+// The Private DNS Management Client.
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+	"github.com/Azure/go-autorest/autorest"
+)
+
+const (
+	// DefaultBaseURI is the default URI used for the service Privatedns
+	DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Privatedns.
+type BaseClient struct {
+	autorest.Client
+	BaseURI        string
+	SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+	return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint.  Use this when interacting with
+// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+	return BaseClient{
+		Client:         autorest.NewClientWithUserAgent(UserAgent()),
+		BaseURI:        baseURI,
+		SubscriptionID: subscriptionID,
+	}
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4cbf2a65eaa6dbea9467bb0eb9dbfa381c36f2a
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go
@@ -0,0 +1,72 @@
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+	// Canceled ...
+	Canceled ProvisioningState = "Canceled"
+	// Creating ...
+	Creating ProvisioningState = "Creating"
+	// Deleting ...
+	Deleting ProvisioningState = "Deleting"
+	// Failed ...
+	Failed ProvisioningState = "Failed"
+	// Succeeded ...
+	Succeeded ProvisioningState = "Succeeded"
+	// Updating ...
+	Updating ProvisioningState = "Updating"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+	return []ProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating}
+}
+
+// RecordType enumerates the values for record type.
+type RecordType string
+
+const (
+	// A ...
+	A RecordType = "A"
+	// AAAA ...
+	AAAA RecordType = "AAAA"
+	// CNAME ...
+	CNAME RecordType = "CNAME"
+	// MX ...
+	MX RecordType = "MX"
+	// PTR ...
+	PTR RecordType = "PTR"
+	// SOA ...
+	SOA RecordType = "SOA"
+	// SRV ...
+	SRV RecordType = "SRV"
+	// TXT ...
+	TXT RecordType = "TXT"
+)
+
+// PossibleRecordTypeValues returns an array of possible values for the RecordType const type.
+func PossibleRecordTypeValues() []RecordType {
+	return []RecordType{A, AAAA, CNAME, MX, PTR, SOA, SRV, TXT}
+}
+
+// VirtualNetworkLinkState enumerates the values for virtual network link state.
+type VirtualNetworkLinkState string
+
+const (
+	// Completed ...
+	Completed VirtualNetworkLinkState = "Completed"
+	// InProgress ...
+	InProgress VirtualNetworkLinkState = "InProgress"
+)
+
+// PossibleVirtualNetworkLinkStateValues returns an array of possible values for the VirtualNetworkLinkState const type.
+func PossibleVirtualNetworkLinkStateValues() []VirtualNetworkLinkState {
+	return []VirtualNetworkLinkState{Completed, InProgress}
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c23f9f65fcc8f42079ac17d0445dfba7240d9c0
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go
@@ -0,0 +1,1350 @@
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+	"context"
+	"encoding/json"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+	"github.com/Azure/go-autorest/tracing"
+	"net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+
+// AaaaRecord an AAAA record.
+type AaaaRecord struct {
+	// Ipv6Address - The IPv6 address of this AAAA record.
+	Ipv6Address *string `json:"ipv6Address,omitempty"`
+}
+
+// ARecord an A record.
+type ARecord struct {
+	// Ipv4Address - The IPv4 address of this A record.
+	Ipv4Address *string `json:"ipv4Address,omitempty"`
+}
+
+// CloudError an error response from the service.
+type CloudError struct {
+	// Error - Cloud error body.
+	Error *CloudErrorBody `json:"error,omitempty"`
+}
+
+// CloudErrorBody an error response from the service.
+type CloudErrorBody struct {
+	// Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+	Code *string `json:"code,omitempty"`
+	// Message - A message describing the error, intended to be suitable for display in a user interface.
+	Message *string `json:"message,omitempty"`
+	// Target - The target of the particular error. For example, the name of the property in error.
+	Target *string `json:"target,omitempty"`
+	// Details - A list of additional details about the error.
+	Details *[]CloudErrorBody `json:"details,omitempty"`
+}
+
+// CnameRecord a CNAME record.
+type CnameRecord struct {
+	// Cname - The canonical name for this CNAME record.
+	Cname *string `json:"cname,omitempty"`
+}
+
+// MxRecord an MX record.
+type MxRecord struct {
+	// Preference - The preference value for this MX record.
+	Preference *int32 `json:"preference,omitempty"`
+	// Exchange - The domain name of the mail host for this MX record.
+	Exchange *string `json:"exchange,omitempty"`
+}
+
+// PrivateZone describes a Private DNS zone.
+type PrivateZone struct {
+	autorest.Response `json:"-"`
+	// Etag - The ETag of the zone.
+	Etag *string `json:"etag,omitempty"`
+	// PrivateZoneProperties - Properties of the Private DNS zone.
+	*PrivateZoneProperties `json:"properties,omitempty"`
+	// Tags - Resource tags.
+	Tags map[string]*string `json:"tags"`
+	// Location - The Azure Region where the resource lives
+	Location *string `json:"location,omitempty"`
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateZone.
+func (pz PrivateZone) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if pz.Etag != nil {
+		objectMap["etag"] = pz.Etag
+	}
+	if pz.PrivateZoneProperties != nil {
+		objectMap["properties"] = pz.PrivateZoneProperties
+	}
+	if pz.Tags != nil {
+		objectMap["tags"] = pz.Tags
+	}
+	if pz.Location != nil {
+		objectMap["location"] = pz.Location
+	}
+	return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PrivateZone struct.
+func (pz *PrivateZone) UnmarshalJSON(body []byte) error {
+	var m map[string]*json.RawMessage
+	err := json.Unmarshal(body, &m)
+	if err != nil {
+		return err
+	}
+	for k, v := range m {
+		switch k {
+		case "etag":
+			if v != nil {
+				var etag string
+				err = json.Unmarshal(*v, &etag)
+				if err != nil {
+					return err
+				}
+				pz.Etag = &etag
+			}
+		case "properties":
+			if v != nil {
+				var privateZoneProperties PrivateZoneProperties
+				err = json.Unmarshal(*v, &privateZoneProperties)
+				if err != nil {
+					return err
+				}
+				pz.PrivateZoneProperties = &privateZoneProperties
+			}
+		case "tags":
+			if v != nil {
+				var tags map[string]*string
+				err = json.Unmarshal(*v, &tags)
+				if err != nil {
+					return err
+				}
+				pz.Tags = tags
+			}
+		case "location":
+			if v != nil {
+				var location string
+				err = json.Unmarshal(*v, &location)
+				if err != nil {
+					return err
+				}
+				pz.Location = &location
+			}
+		case "id":
+			if v != nil {
+				var ID string
+				err = json.Unmarshal(*v, &ID)
+				if err != nil {
+					return err
+				}
+				pz.ID = &ID
+			}
+		case "name":
+			if v != nil {
+				var name string
+				err = json.Unmarshal(*v, &name)
+				if err != nil {
+					return err
+				}
+				pz.Name = &name
+			}
+		case "type":
+			if v != nil {
+				var typeVar string
+				err = json.Unmarshal(*v, &typeVar)
+				if err != nil {
+					return err
+				}
+				pz.Type = &typeVar
+			}
+		}
+	}
+
+	return nil
+}
+
+// PrivateZoneListResult the response to a Private DNS zone list operation.
+type PrivateZoneListResult struct {
+	autorest.Response `json:"-"`
+	// Value - Information about the Private DNS zones.
+	Value *[]PrivateZone `json:"value,omitempty"`
+	// NextLink - READ-ONLY; The continuation token for the next page of results.
+	NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateZoneListResult.
+func (pzlr PrivateZoneListResult) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if pzlr.Value != nil {
+		objectMap["value"] = pzlr.Value
+	}
+	return json.Marshal(objectMap)
+}
+
+// PrivateZoneListResultIterator provides access to a complete listing of PrivateZone values.
+type PrivateZoneListResultIterator struct {
+	i    int
+	page PrivateZoneListResultPage
+}
+
+// NextWithContext advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PrivateZoneListResultIterator) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZoneListResultIterator.NextWithContext")
+		defer func() {
+			sc := -1
+			if iter.Response().Response.Response != nil {
+				sc = iter.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	iter.i++
+	if iter.i < len(iter.page.Values()) {
+		return nil
+	}
+	err = iter.page.NextWithContext(ctx)
+	if err != nil {
+		iter.i--
+		return err
+	}
+	iter.i = 0
+	return nil
+}
+
+// Next advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PrivateZoneListResultIterator) Next() error {
+	return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PrivateZoneListResultIterator) NotDone() bool {
+	return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PrivateZoneListResultIterator) Response() PrivateZoneListResult {
+	return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PrivateZoneListResultIterator) Value() PrivateZone {
+	if !iter.page.NotDone() {
+		return PrivateZone{}
+	}
+	return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PrivateZoneListResultIterator type.
+func NewPrivateZoneListResultIterator(page PrivateZoneListResultPage) PrivateZoneListResultIterator {
+	return PrivateZoneListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (pzlr PrivateZoneListResult) IsEmpty() bool {
+	return pzlr.Value == nil || len(*pzlr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (pzlr PrivateZoneListResult) hasNextLink() bool {
+	return pzlr.NextLink != nil && len(*pzlr.NextLink) != 0
+}
+
+// privateZoneListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (pzlr PrivateZoneListResult) privateZoneListResultPreparer(ctx context.Context) (*http.Request, error) {
+	if !pzlr.hasNextLink() {
+		return nil, nil
+	}
+	return autorest.Prepare((&http.Request{}).WithContext(ctx),
+		autorest.AsJSON(),
+		autorest.AsGet(),
+		autorest.WithBaseURL(to.String(pzlr.NextLink)))
+}
+
+// PrivateZoneListResultPage contains a page of PrivateZone values.
+type PrivateZoneListResultPage struct {
+	fn   func(context.Context, PrivateZoneListResult) (PrivateZoneListResult, error)
+	pzlr PrivateZoneListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PrivateZoneListResultPage) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZoneListResultPage.NextWithContext")
+		defer func() {
+			sc := -1
+			if page.Response().Response.Response != nil {
+				sc = page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	for {
+		next, err := page.fn(ctx, page.pzlr)
+		if err != nil {
+			return err
+		}
+		page.pzlr = next
+		if !next.hasNextLink() || !next.IsEmpty() {
+			break
+		}
+	}
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PrivateZoneListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PrivateZoneListResultPage) NotDone() bool {
+	return !page.pzlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PrivateZoneListResultPage) Response() PrivateZoneListResult {
+	return page.pzlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PrivateZoneListResultPage) Values() []PrivateZone {
+	if page.pzlr.IsEmpty() {
+		return nil
+	}
+	return *page.pzlr.Value
+}
+
+// Creates a new instance of the PrivateZoneListResultPage type.
+func NewPrivateZoneListResultPage(cur PrivateZoneListResult, getNextPage func(context.Context, PrivateZoneListResult) (PrivateZoneListResult, error)) PrivateZoneListResultPage {
+	return PrivateZoneListResultPage{
+		fn:   getNextPage,
+		pzlr: cur,
+	}
+}
+
+// PrivateZoneProperties represents the properties of the Private DNS zone.
+type PrivateZoneProperties struct {
+	// MaxNumberOfRecordSets - READ-ONLY; The maximum number of record sets that can be created in this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored.
+	MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"`
+	// NumberOfRecordSets - READ-ONLY; The current number of record sets in this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored.
+	NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"`
+	// MaxNumberOfVirtualNetworkLinks - READ-ONLY; The maximum number of virtual networks that can be linked to this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored.
+	MaxNumberOfVirtualNetworkLinks *int64 `json:"maxNumberOfVirtualNetworkLinks,omitempty"`
+	// NumberOfVirtualNetworkLinks - READ-ONLY; The current number of virtual networks that are linked to this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored.
+	NumberOfVirtualNetworkLinks *int64 `json:"numberOfVirtualNetworkLinks,omitempty"`
+	// MaxNumberOfVirtualNetworkLinksWithRegistration - READ-ONLY; The maximum number of virtual networks that can be linked to this Private DNS zone with registration enabled. This is a read-only property and any attempt to set this value will be ignored.
+	MaxNumberOfVirtualNetworkLinksWithRegistration *int64 `json:"maxNumberOfVirtualNetworkLinksWithRegistration,omitempty"`
+	// NumberOfVirtualNetworkLinksWithRegistration - READ-ONLY; The current number of virtual networks that are linked to this Private DNS zone with registration enabled. This is a read-only property and any attempt to set this value will be ignored.
+	NumberOfVirtualNetworkLinksWithRegistration *int64 `json:"numberOfVirtualNetworkLinksWithRegistration,omitempty"`
+	// ProvisioningState - READ-ONLY; The provisioning state of the resource. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+	ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PrivateZoneProperties.
+func (pzp PrivateZoneProperties) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	return json.Marshal(objectMap)
+}
+
+// PrivateZonesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type PrivateZonesCreateOrUpdateFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(PrivateZonesClient) (PrivateZone, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PrivateZonesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for PrivateZonesCreateOrUpdateFuture.Result.
+func (future *PrivateZonesCreateOrUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		pz.Response.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesCreateOrUpdateFuture")
+		return
+	}
+	sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+	if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent {
+		pz, err = client.CreateOrUpdateResponder(pz.Response.Response)
+		if err != nil {
+			err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", pz.Response.Response, "Failure responding to request")
+		}
+	}
+	return
+}
+
+// PrivateZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type PrivateZonesDeleteFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(PrivateZonesClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PrivateZonesDeleteFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for PrivateZonesDeleteFuture.Result.
+func (future *PrivateZonesDeleteFuture) result(client PrivateZonesClient) (ar autorest.Response, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesDeleteFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		ar.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesDeleteFuture")
+		return
+	}
+	ar.Response = future.Response()
+	return
+}
+
+// PrivateZonesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type PrivateZonesUpdateFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(PrivateZonesClient) (PrivateZone, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *PrivateZonesUpdateFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for PrivateZonesUpdateFuture.Result.
+func (future *PrivateZonesUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		pz.Response.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesUpdateFuture")
+		return
+	}
+	sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+	if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent {
+		pz, err = client.UpdateResponder(pz.Response.Response)
+		if err != nil {
+			err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", pz.Response.Response, "Failure responding to request")
+		}
+	}
+	return
+}
+
+// ProxyResource the resource model definition for an ARM proxy resource.
+type ProxyResource struct {
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProxyResource.
+func (pr ProxyResource) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	return json.Marshal(objectMap)
+}
+
+// PtrRecord a PTR record.
+type PtrRecord struct {
+	// Ptrdname - The PTR target domain name for this PTR record.
+	Ptrdname *string `json:"ptrdname,omitempty"`
+}
+
+// RecordSet describes a DNS record set (a collection of DNS records with the same name and type) in a
+// Private DNS zone.
+type RecordSet struct {
+	autorest.Response `json:"-"`
+	// Etag - The ETag of the record set.
+	Etag *string `json:"etag,omitempty"`
+	// RecordSetProperties - The properties of the record set.
+	*RecordSetProperties `json:"properties,omitempty"`
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RecordSet.
+func (rs RecordSet) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if rs.Etag != nil {
+		objectMap["etag"] = rs.Etag
+	}
+	if rs.RecordSetProperties != nil {
+		objectMap["properties"] = rs.RecordSetProperties
+	}
+	return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for RecordSet struct.
+func (rs *RecordSet) UnmarshalJSON(body []byte) error {
+	var m map[string]*json.RawMessage
+	err := json.Unmarshal(body, &m)
+	if err != nil {
+		return err
+	}
+	for k, v := range m {
+		switch k {
+		case "etag":
+			if v != nil {
+				var etag string
+				err = json.Unmarshal(*v, &etag)
+				if err != nil {
+					return err
+				}
+				rs.Etag = &etag
+			}
+		case "properties":
+			if v != nil {
+				var recordSetProperties RecordSetProperties
+				err = json.Unmarshal(*v, &recordSetProperties)
+				if err != nil {
+					return err
+				}
+				rs.RecordSetProperties = &recordSetProperties
+			}
+		case "id":
+			if v != nil {
+				var ID string
+				err = json.Unmarshal(*v, &ID)
+				if err != nil {
+					return err
+				}
+				rs.ID = &ID
+			}
+		case "name":
+			if v != nil {
+				var name string
+				err = json.Unmarshal(*v, &name)
+				if err != nil {
+					return err
+				}
+				rs.Name = &name
+			}
+		case "type":
+			if v != nil {
+				var typeVar string
+				err = json.Unmarshal(*v, &typeVar)
+				if err != nil {
+					return err
+				}
+				rs.Type = &typeVar
+			}
+		}
+	}
+
+	return nil
+}
+
+// RecordSetListResult the response to a record set list operation.
+type RecordSetListResult struct {
+	autorest.Response `json:"-"`
+	// Value - Information about the record sets in the response.
+	Value *[]RecordSet `json:"value,omitempty"`
+	// NextLink - READ-ONLY; The continuation token for the next page of results.
+	NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RecordSetListResult.
+func (rslr RecordSetListResult) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if rslr.Value != nil {
+		objectMap["value"] = rslr.Value
+	}
+	return json.Marshal(objectMap)
+}
+
+// RecordSetListResultIterator provides access to a complete listing of RecordSet values.
+type RecordSetListResultIterator struct {
+	i    int
+	page RecordSetListResultPage
+}
+
+// NextWithContext advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *RecordSetListResultIterator) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultIterator.NextWithContext")
+		defer func() {
+			sc := -1
+			if iter.Response().Response.Response != nil {
+				sc = iter.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	iter.i++
+	if iter.i < len(iter.page.Values()) {
+		return nil
+	}
+	err = iter.page.NextWithContext(ctx)
+	if err != nil {
+		iter.i--
+		return err
+	}
+	iter.i = 0
+	return nil
+}
+
+// Next advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *RecordSetListResultIterator) Next() error {
+	return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter RecordSetListResultIterator) NotDone() bool {
+	return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter RecordSetListResultIterator) Response() RecordSetListResult {
+	return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter RecordSetListResultIterator) Value() RecordSet {
+	if !iter.page.NotDone() {
+		return RecordSet{}
+	}
+	return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the RecordSetListResultIterator type.
+func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator {
+	return RecordSetListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (rslr RecordSetListResult) IsEmpty() bool {
+	return rslr.Value == nil || len(*rslr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (rslr RecordSetListResult) hasNextLink() bool {
+	return rslr.NextLink != nil && len(*rslr.NextLink) != 0
+}
+
+// recordSetListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (rslr RecordSetListResult) recordSetListResultPreparer(ctx context.Context) (*http.Request, error) {
+	if !rslr.hasNextLink() {
+		return nil, nil
+	}
+	return autorest.Prepare((&http.Request{}).WithContext(ctx),
+		autorest.AsJSON(),
+		autorest.AsGet(),
+		autorest.WithBaseURL(to.String(rslr.NextLink)))
+}
+
+// RecordSetListResultPage contains a page of RecordSet values.
+type RecordSetListResultPage struct {
+	fn   func(context.Context, RecordSetListResult) (RecordSetListResult, error)
+	rslr RecordSetListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *RecordSetListResultPage) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultPage.NextWithContext")
+		defer func() {
+			sc := -1
+			if page.Response().Response.Response != nil {
+				sc = page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	for {
+		next, err := page.fn(ctx, page.rslr)
+		if err != nil {
+			return err
+		}
+		page.rslr = next
+		if !next.hasNextLink() || !next.IsEmpty() {
+			break
+		}
+	}
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *RecordSetListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page RecordSetListResultPage) NotDone() bool {
+	return !page.rslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page RecordSetListResultPage) Response() RecordSetListResult {
+	return page.rslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page RecordSetListResultPage) Values() []RecordSet {
+	if page.rslr.IsEmpty() {
+		return nil
+	}
+	return *page.rslr.Value
+}
+
+// Creates a new instance of the RecordSetListResultPage type.
+func NewRecordSetListResultPage(cur RecordSetListResult, getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage {
+	return RecordSetListResultPage{
+		fn:   getNextPage,
+		rslr: cur,
+	}
+}
+
+// RecordSetProperties represents the properties of the records in the record set.
+type RecordSetProperties struct {
+	// Metadata - The metadata attached to the record set.
+	Metadata map[string]*string `json:"metadata"`
+	// TTL - The TTL (time-to-live) of the records in the record set.
+	TTL *int64 `json:"ttl,omitempty"`
+	// Fqdn - READ-ONLY; Fully qualified domain name of the record set.
+	Fqdn *string `json:"fqdn,omitempty"`
+	// IsAutoRegistered - READ-ONLY; Is the record set auto-registered in the Private DNS zone through a virtual network link?
+	IsAutoRegistered *bool `json:"isAutoRegistered,omitempty"`
+	// ARecords - The list of A records in the record set.
+	ARecords *[]ARecord `json:"aRecords,omitempty"`
+	// AaaaRecords - The list of AAAA records in the record set.
+	AaaaRecords *[]AaaaRecord `json:"aaaaRecords,omitempty"`
+	// CnameRecord - The CNAME record in the record set.
+	CnameRecord *CnameRecord `json:"cnameRecord,omitempty"`
+	// MxRecords - The list of MX records in the record set.
+	MxRecords *[]MxRecord `json:"mxRecords,omitempty"`
+	// PtrRecords - The list of PTR records in the record set.
+	PtrRecords *[]PtrRecord `json:"ptrRecords,omitempty"`
+	// SoaRecord - The SOA record in the record set.
+	SoaRecord *SoaRecord `json:"soaRecord,omitempty"`
+	// SrvRecords - The list of SRV records in the record set.
+	SrvRecords *[]SrvRecord `json:"srvRecords,omitempty"`
+	// TxtRecords - The list of TXT records in the record set.
+	TxtRecords *[]TxtRecord `json:"txtRecords,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RecordSetProperties.
+func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if rsp.Metadata != nil {
+		objectMap["metadata"] = rsp.Metadata
+	}
+	if rsp.TTL != nil {
+		objectMap["ttl"] = rsp.TTL
+	}
+	if rsp.ARecords != nil {
+		objectMap["aRecords"] = rsp.ARecords
+	}
+	if rsp.AaaaRecords != nil {
+		objectMap["aaaaRecords"] = rsp.AaaaRecords
+	}
+	if rsp.CnameRecord != nil {
+		objectMap["cnameRecord"] = rsp.CnameRecord
+	}
+	if rsp.MxRecords != nil {
+		objectMap["mxRecords"] = rsp.MxRecords
+	}
+	if rsp.PtrRecords != nil {
+		objectMap["ptrRecords"] = rsp.PtrRecords
+	}
+	if rsp.SoaRecord != nil {
+		objectMap["soaRecord"] = rsp.SoaRecord
+	}
+	if rsp.SrvRecords != nil {
+		objectMap["srvRecords"] = rsp.SrvRecords
+	}
+	if rsp.TxtRecords != nil {
+		objectMap["txtRecords"] = rsp.TxtRecords
+	}
+	return json.Marshal(objectMap)
+}
+
+// Resource the core properties of ARM resources
+type Resource struct {
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	return json.Marshal(objectMap)
+}
+
+// SoaRecord an SOA record.
+type SoaRecord struct {
+	// Host - The domain name of the authoritative name server for this SOA record.
+	Host *string `json:"host,omitempty"`
+	// Email - The email contact for this SOA record.
+	Email *string `json:"email,omitempty"`
+	// SerialNumber - The serial number for this SOA record.
+	SerialNumber *int64 `json:"serialNumber,omitempty"`
+	// RefreshTime - The refresh value for this SOA record.
+	RefreshTime *int64 `json:"refreshTime,omitempty"`
+	// RetryTime - The retry time for this SOA record.
+	RetryTime *int64 `json:"retryTime,omitempty"`
+	// ExpireTime - The expire time for this SOA record.
+	ExpireTime *int64 `json:"expireTime,omitempty"`
+	// MinimumTTL - The minimum value for this SOA record. By convention this is used to determine the negative caching duration.
+	MinimumTTL *int64 `json:"minimumTtl,omitempty"`
+}
+
+// SrvRecord an SRV record.
+type SrvRecord struct {
+	// Priority - The priority value for this SRV record.
+	Priority *int32 `json:"priority,omitempty"`
+	// Weight - The weight value for this SRV record.
+	Weight *int32 `json:"weight,omitempty"`
+	// Port - The port value for this SRV record.
+	Port *int32 `json:"port,omitempty"`
+	// Target - The target domain name for this SRV record.
+	Target *string `json:"target,omitempty"`
+}
+
+// SubResource reference to another subresource.
+type SubResource struct {
+	// ID - Resource ID.
+	ID *string `json:"id,omitempty"`
+}
+
+// TrackedResource the resource model definition for a ARM tracked top level resource
+type TrackedResource struct {
+	// Tags - Resource tags.
+	Tags map[string]*string `json:"tags"`
+	// Location - The Azure Region where the resource lives
+	Location *string `json:"location,omitempty"`
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for TrackedResource.
+func (tr TrackedResource) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if tr.Tags != nil {
+		objectMap["tags"] = tr.Tags
+	}
+	if tr.Location != nil {
+		objectMap["location"] = tr.Location
+	}
+	return json.Marshal(objectMap)
+}
+
+// TxtRecord a TXT record.
+type TxtRecord struct {
+	// Value - The text value of this TXT record.
+	Value *[]string `json:"value,omitempty"`
+}
+
+// VirtualNetworkLink describes a link to virtual network for a Private DNS zone.
+type VirtualNetworkLink struct {
+	autorest.Response `json:"-"`
+	// Etag - The ETag of the virtual network link.
+	Etag *string `json:"etag,omitempty"`
+	// VirtualNetworkLinkProperties - Properties of the virtual network link to the Private DNS zone.
+	*VirtualNetworkLinkProperties `json:"properties,omitempty"`
+	// Tags - Resource tags.
+	Tags map[string]*string `json:"tags"`
+	// Location - The Azure Region where the resource lives
+	Location *string `json:"location,omitempty"`
+	// ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
+	ID *string `json:"id,omitempty"`
+	// Name - READ-ONLY; The name of the resource
+	Name *string `json:"name,omitempty"`
+	// Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'.
+	Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualNetworkLink.
+func (vnl VirtualNetworkLink) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if vnl.Etag != nil {
+		objectMap["etag"] = vnl.Etag
+	}
+	if vnl.VirtualNetworkLinkProperties != nil {
+		objectMap["properties"] = vnl.VirtualNetworkLinkProperties
+	}
+	if vnl.Tags != nil {
+		objectMap["tags"] = vnl.Tags
+	}
+	if vnl.Location != nil {
+		objectMap["location"] = vnl.Location
+	}
+	return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for VirtualNetworkLink struct.
+func (vnl *VirtualNetworkLink) UnmarshalJSON(body []byte) error {
+	var m map[string]*json.RawMessage
+	err := json.Unmarshal(body, &m)
+	if err != nil {
+		return err
+	}
+	for k, v := range m {
+		switch k {
+		case "etag":
+			if v != nil {
+				var etag string
+				err = json.Unmarshal(*v, &etag)
+				if err != nil {
+					return err
+				}
+				vnl.Etag = &etag
+			}
+		case "properties":
+			if v != nil {
+				var virtualNetworkLinkProperties VirtualNetworkLinkProperties
+				err = json.Unmarshal(*v, &virtualNetworkLinkProperties)
+				if err != nil {
+					return err
+				}
+				vnl.VirtualNetworkLinkProperties = &virtualNetworkLinkProperties
+			}
+		case "tags":
+			if v != nil {
+				var tags map[string]*string
+				err = json.Unmarshal(*v, &tags)
+				if err != nil {
+					return err
+				}
+				vnl.Tags = tags
+			}
+		case "location":
+			if v != nil {
+				var location string
+				err = json.Unmarshal(*v, &location)
+				if err != nil {
+					return err
+				}
+				vnl.Location = &location
+			}
+		case "id":
+			if v != nil {
+				var ID string
+				err = json.Unmarshal(*v, &ID)
+				if err != nil {
+					return err
+				}
+				vnl.ID = &ID
+			}
+		case "name":
+			if v != nil {
+				var name string
+				err = json.Unmarshal(*v, &name)
+				if err != nil {
+					return err
+				}
+				vnl.Name = &name
+			}
+		case "type":
+			if v != nil {
+				var typeVar string
+				err = json.Unmarshal(*v, &typeVar)
+				if err != nil {
+					return err
+				}
+				vnl.Type = &typeVar
+			}
+		}
+	}
+
+	return nil
+}
+
+// VirtualNetworkLinkListResult the response to a list virtual network link to Private DNS zone operation.
+type VirtualNetworkLinkListResult struct {
+	autorest.Response `json:"-"`
+	// Value - Information about the virtual network links to the Private DNS zones.
+	Value *[]VirtualNetworkLink `json:"value,omitempty"`
+	// NextLink - READ-ONLY; The continuation token for the next page of results.
+	NextLink *string `json:"nextLink,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualNetworkLinkListResult.
+func (vnllr VirtualNetworkLinkListResult) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if vnllr.Value != nil {
+		objectMap["value"] = vnllr.Value
+	}
+	return json.Marshal(objectMap)
+}
+
+// VirtualNetworkLinkListResultIterator provides access to a complete listing of VirtualNetworkLink values.
+type VirtualNetworkLinkListResultIterator struct {
+	i    int
+	page VirtualNetworkLinkListResultPage
+}
+
+// NextWithContext advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *VirtualNetworkLinkListResultIterator) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinkListResultIterator.NextWithContext")
+		defer func() {
+			sc := -1
+			if iter.Response().Response.Response != nil {
+				sc = iter.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	iter.i++
+	if iter.i < len(iter.page.Values()) {
+		return nil
+	}
+	err = iter.page.NextWithContext(ctx)
+	if err != nil {
+		iter.i--
+		return err
+	}
+	iter.i = 0
+	return nil
+}
+
+// Next advances to the next value.  If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *VirtualNetworkLinkListResultIterator) Next() error {
+	return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter VirtualNetworkLinkListResultIterator) NotDone() bool {
+	return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter VirtualNetworkLinkListResultIterator) Response() VirtualNetworkLinkListResult {
+	return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter VirtualNetworkLinkListResultIterator) Value() VirtualNetworkLink {
+	if !iter.page.NotDone() {
+		return VirtualNetworkLink{}
+	}
+	return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the VirtualNetworkLinkListResultIterator type.
+func NewVirtualNetworkLinkListResultIterator(page VirtualNetworkLinkListResultPage) VirtualNetworkLinkListResultIterator {
+	return VirtualNetworkLinkListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (vnllr VirtualNetworkLinkListResult) IsEmpty() bool {
+	return vnllr.Value == nil || len(*vnllr.Value) == 0
+}
+
+// hasNextLink returns true if the NextLink is not empty.
+func (vnllr VirtualNetworkLinkListResult) hasNextLink() bool {
+	return vnllr.NextLink != nil && len(*vnllr.NextLink) != 0
+}
+
+// virtualNetworkLinkListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (vnllr VirtualNetworkLinkListResult) virtualNetworkLinkListResultPreparer(ctx context.Context) (*http.Request, error) {
+	if !vnllr.hasNextLink() {
+		return nil, nil
+	}
+	return autorest.Prepare((&http.Request{}).WithContext(ctx),
+		autorest.AsJSON(),
+		autorest.AsGet(),
+		autorest.WithBaseURL(to.String(vnllr.NextLink)))
+}
+
+// VirtualNetworkLinkListResultPage contains a page of VirtualNetworkLink values.
+type VirtualNetworkLinkListResultPage struct {
+	fn    func(context.Context, VirtualNetworkLinkListResult) (VirtualNetworkLinkListResult, error)
+	vnllr VirtualNetworkLinkListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *VirtualNetworkLinkListResultPage) NextWithContext(ctx context.Context) (err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinkListResultPage.NextWithContext")
+		defer func() {
+			sc := -1
+			if page.Response().Response.Response != nil {
+				sc = page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	for {
+		next, err := page.fn(ctx, page.vnllr)
+		if err != nil {
+			return err
+		}
+		page.vnllr = next
+		if !next.hasNextLink() || !next.IsEmpty() {
+			break
+		}
+	}
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *VirtualNetworkLinkListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page VirtualNetworkLinkListResultPage) NotDone() bool {
+	return !page.vnllr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page VirtualNetworkLinkListResultPage) Response() VirtualNetworkLinkListResult {
+	return page.vnllr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page VirtualNetworkLinkListResultPage) Values() []VirtualNetworkLink {
+	if page.vnllr.IsEmpty() {
+		return nil
+	}
+	return *page.vnllr.Value
+}
+
+// Creates a new instance of the VirtualNetworkLinkListResultPage type.
+func NewVirtualNetworkLinkListResultPage(cur VirtualNetworkLinkListResult, getNextPage func(context.Context, VirtualNetworkLinkListResult) (VirtualNetworkLinkListResult, error)) VirtualNetworkLinkListResultPage {
+	return VirtualNetworkLinkListResultPage{
+		fn:    getNextPage,
+		vnllr: cur,
+	}
+}
+
+// VirtualNetworkLinkProperties represents the properties of the Private DNS zone.
+type VirtualNetworkLinkProperties struct {
+	// VirtualNetwork - The reference of the virtual network.
+	VirtualNetwork *SubResource `json:"virtualNetwork,omitempty"`
+	// RegistrationEnabled - Is auto-registration of virtual machine records in the virtual network in the Private DNS zone enabled?
+	RegistrationEnabled *bool `json:"registrationEnabled,omitempty"`
+	// VirtualNetworkLinkState - READ-ONLY; The status of the virtual network link to the Private DNS zone. Possible values are 'InProgress' and 'Done'. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'InProgress', 'Completed'
+	VirtualNetworkLinkState VirtualNetworkLinkState `json:"virtualNetworkLinkState,omitempty"`
+	// ProvisioningState - READ-ONLY; The provisioning state of the resource. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+	ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualNetworkLinkProperties.
+func (vnlp VirtualNetworkLinkProperties) MarshalJSON() ([]byte, error) {
+	objectMap := make(map[string]interface{})
+	if vnlp.VirtualNetwork != nil {
+		objectMap["virtualNetwork"] = vnlp.VirtualNetwork
+	}
+	if vnlp.RegistrationEnabled != nil {
+		objectMap["registrationEnabled"] = vnlp.RegistrationEnabled
+	}
+	return json.Marshal(objectMap)
+}
+
+// VirtualNetworkLinksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type VirtualNetworkLinksCreateOrUpdateFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkLinksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for VirtualNetworkLinksCreateOrUpdateFuture.Result.
+func (future *VirtualNetworkLinksCreateOrUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		vnl.Response.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksCreateOrUpdateFuture")
+		return
+	}
+	sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+	if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent {
+		vnl, err = client.CreateOrUpdateResponder(vnl.Response.Response)
+		if err != nil {
+			err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request")
+		}
+	}
+	return
+}
+
+// VirtualNetworkLinksDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type VirtualNetworkLinksDeleteFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(VirtualNetworkLinksClient) (autorest.Response, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkLinksDeleteFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for VirtualNetworkLinksDeleteFuture.Result.
+func (future *VirtualNetworkLinksDeleteFuture) result(client VirtualNetworkLinksClient) (ar autorest.Response, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksDeleteFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		ar.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksDeleteFuture")
+		return
+	}
+	ar.Response = future.Response()
+	return
+}
+
+// VirtualNetworkLinksUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type VirtualNetworkLinksUpdateFuture struct {
+	azure.FutureAPI
+	// Result returns the result of the asynchronous operation.
+	// If the operation has not completed it will return an error.
+	Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error)
+}
+
+// UnmarshalJSON is the custom unmarshaller for CreateFuture.
+func (future *VirtualNetworkLinksUpdateFuture) UnmarshalJSON(body []byte) error {
+	var azFuture azure.Future
+	if err := json.Unmarshal(body, &azFuture); err != nil {
+		return err
+	}
+	future.FutureAPI = &azFuture
+	future.Result = future.result
+	return nil
+}
+
+// result is the default implementation for VirtualNetworkLinksUpdateFuture.Result.
+func (future *VirtualNetworkLinksUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) {
+	var done bool
+	done, err = future.DoneWithContext(context.Background(), client)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", future.Response(), "Polling failure")
+		return
+	}
+	if !done {
+		vnl.Response.Response = future.Response()
+		err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksUpdateFuture")
+		return
+	}
+	sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+	if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent {
+		vnl, err = client.UpdateResponder(vnl.Response.Response)
+		if err != nil {
+			err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request")
+		}
+	}
+	return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e10b275cd2d66ac00f5b7e77731ba3ae7adaddd
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go
@@ -0,0 +1,611 @@
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+	"context"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/tracing"
+	"net/http"
+)
+
+// PrivateZonesClient is the the Private DNS Management Client.
+type PrivateZonesClient struct {
+	BaseClient
+}
+
+// NewPrivateZonesClient creates an instance of the PrivateZonesClient client.
+func NewPrivateZonesClient(subscriptionID string) PrivateZonesClient {
+	return NewPrivateZonesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrivateZonesClientWithBaseURI creates an instance of the PrivateZonesClient client using a custom endpoint.  Use
+// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewPrivateZonesClientWithBaseURI(baseURI string, subscriptionID string) PrivateZonesClient {
+	return PrivateZonesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a Private DNS zone. Does not modify Links to virtual networks or DNS records
+// within the zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// parameters - parameters supplied to the CreateOrUpdate operation.
+// ifMatch - the ETag of the Private DNS zone. Omit this value to always overwrite the current zone. Specify
+// the last-seen ETag value to prevent accidentally overwriting any concurrent changes.
+// ifNoneMatch - set to '*' to allow a new Private DNS zone to be created, but to prevent updating an existing
+// zone. Other values will be ignored.
+func (client PrivateZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string, ifNoneMatch string) (result PrivateZonesCreateOrUpdateFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.CreateOrUpdate")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, parameters, ifMatch, ifNoneMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "CreateOrUpdate", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.CreateOrUpdateSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "CreateOrUpdate", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client PrivateZonesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPut(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	if len(ifNoneMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) CreateOrUpdateSender(req *http.Request) (future PrivateZonesCreateOrUpdateFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) CreateOrUpdateResponder(resp *http.Response) (result PrivateZone, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// Delete deletes a Private DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot
+// be undone. Private DNS zone cannot be deleted unless all virtual network links to it are removed.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// ifMatch - the ETag of the Private DNS zone. Omit this value to always delete the current zone. Specify the
+// last-seen ETag value to prevent accidentally deleting any concurrent changes.
+func (client PrivateZonesClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, ifMatch string) (result PrivateZonesDeleteFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Delete")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Delete", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.DeleteSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Delete", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PrivateZonesClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsDelete(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) DeleteSender(req *http.Request) (future PrivateZonesDeleteFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+		autorest.ByClosing())
+	result.Response = resp
+	return
+}
+
+// Get gets a Private DNS zone. Retrieves the zone properties, but not the virtual networks links or the record sets
+// within the zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+func (client PrivateZonesClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string) (result PrivateZone, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Get")
+		defer func() {
+			sc := -1
+			if result.Response.Response != nil {
+				sc = result.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.GetSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.GetResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// GetPreparer prepares the Get request.
+func (client PrivateZonesClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) GetSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) GetResponder(resp *http.Response) (result PrivateZone, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// List lists the Private DNS zones in all resource groups in a subscription.
+// Parameters:
+// top - the maximum number of Private DNS zones to return. If not specified, returns up to 100 zones.
+func (client PrivateZonesClient) List(ctx context.Context, top *int32) (result PrivateZoneListResultPage, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.List")
+		defer func() {
+			sc := -1
+			if result.pzlr.Response.Response != nil {
+				sc = result.pzlr.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.fn = client.listNextResults
+	req, err := client.ListPreparer(ctx, top)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.pzlr.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", resp, "Failure sending request")
+		return
+	}
+
+	result.pzlr, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", resp, "Failure responding to request")
+		return
+	}
+	if result.pzlr.hasNextLink() && result.pzlr.IsEmpty() {
+		err = result.NextWithContext(ctx)
+		return
+	}
+
+	return
+}
+
+// ListPreparer prepares the List request.
+func (client PrivateZonesClient) ListPreparer(ctx context.Context, top *int32) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+	if top != nil {
+		queryParameters["$top"] = autorest.Encode("query", *top)
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateDnsZones", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) ListSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) ListResponder(resp *http.Response) (result PrivateZoneListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client PrivateZonesClient) listNextResults(ctx context.Context, lastResults PrivateZoneListResult) (result PrivateZoneListResult, err error) {
+	req, err := lastResults.privateZoneListResultPreparer(ctx)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", resp, "Failure sending next results request")
+	}
+	result, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", resp, "Failure responding to next results request")
+	}
+	return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PrivateZonesClient) ListComplete(ctx context.Context, top *int32) (result PrivateZoneListResultIterator, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.List")
+		defer func() {
+			sc := -1
+			if result.Response().Response.Response != nil {
+				sc = result.page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.page, err = client.List(ctx, top)
+	return
+}
+
+// ListByResourceGroup lists the Private DNS zones within a resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
+func (client PrivateZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result PrivateZoneListResultPage, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.ListByResourceGroup")
+		defer func() {
+			sc := -1
+			if result.pzlr.Response.Response != nil {
+				sc = result.pzlr.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.fn = client.listByResourceGroupNextResults
+	req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.ListByResourceGroupSender(req)
+	if err != nil {
+		result.pzlr.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", resp, "Failure sending request")
+		return
+	}
+
+	result.pzlr, err = client.ListByResourceGroupResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", resp, "Failure responding to request")
+		return
+	}
+	if result.pzlr.hasNextLink() && result.pzlr.IsEmpty() {
+		err = result.NextWithContext(ctx)
+		return
+	}
+
+	return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client PrivateZonesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+	if top != nil {
+		queryParameters["$top"] = autorest.Encode("query", *top)
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) ListByResourceGroupResponder(resp *http.Response) (result PrivateZoneListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client PrivateZonesClient) listByResourceGroupNextResults(ctx context.Context, lastResults PrivateZoneListResult) (result PrivateZoneListResult, err error) {
+	req, err := lastResults.privateZoneListResultPreparer(ctx)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+	resp, err := client.ListByResourceGroupSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+	}
+	result, err = client.ListByResourceGroupResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+	}
+	return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PrivateZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result PrivateZoneListResultIterator, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.ListByResourceGroup")
+		defer func() {
+			sc := -1
+			if result.Response().Response.Response != nil {
+				sc = result.page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top)
+	return
+}
+
+// Update updates a Private DNS zone. Does not modify virtual network links or DNS records within the zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// parameters - parameters supplied to the Update operation.
+// ifMatch - the ETag of the Private DNS zone. Omit this value to always overwrite the current zone. Specify
+// the last-seen ETag value to prevent accidentally overwriting any concurrent changes.
+func (client PrivateZonesClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string) (result PrivateZonesUpdateFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Update")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, parameters, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Update", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.UpdateSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Update", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client PrivateZonesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPatch(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrivateZonesClient) UpdateSender(req *http.Request) (future PrivateZonesUpdateFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client PrivateZonesClient) UpdateResponder(resp *http.Response) (result PrivateZone, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c863316f27cc710be0283675a5443a2ac2fcb1c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go
@@ -0,0 +1,640 @@
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+	"context"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/tracing"
+	"net/http"
+)
+
+// RecordSetsClient is the the Private DNS Management Client.
+type RecordSetsClient struct {
+	BaseClient
+}
+
+// NewRecordSetsClient creates an instance of the RecordSetsClient client.
+func NewRecordSetsClient(subscriptionID string) RecordSetsClient {
+	return NewRecordSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient client using a custom endpoint.  Use this
+// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
+func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient {
+	return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a record set within a Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// recordType - the type of DNS record in this record set. Record sets of type SOA can be updated but not
+// created (they are created when the Private DNS zone is created).
+// relativeRecordSetName - the name of the record set, relative to the name of the zone.
+// parameters - parameters supplied to the CreateOrUpdate operation.
+// ifMatch - the ETag of the record set. Omit this value to always overwrite the current record set. Specify
+// the last-seen ETag value to prevent accidentally overwriting any concurrent changes.
+// ifNoneMatch - set to '*' to allow a new record set to be created, but to prevent updating an existing record
+// set. Other values will be ignored.
+func (client RecordSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.CreateOrUpdate")
+		defer func() {
+			sc := -1
+			if result.Response.Response != nil {
+				sc = result.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, parameters, ifMatch, ifNoneMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.CreateOrUpdateSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.CreateOrUpdateResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client RecordSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":       autorest.Encode("path", privateZoneName),
+		"recordType":            autorest.Encode("path", recordType),
+		"relativeRecordSetName": relativeRecordSetName,
+		"resourceGroupName":     autorest.Encode("path", resourceGroupName),
+		"subscriptionId":        autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPut(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	if len(ifNoneMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (result RecordSet, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// Delete deletes a record set from a Private DNS zone. This operation cannot be undone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// recordType - the type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are
+// deleted when the Private DNS zone is deleted).
+// relativeRecordSetName - the name of the record set, relative to the name of the zone.
+// ifMatch - the ETag of the record set. Omit this value to always delete the current record set. Specify the
+// last-seen ETag value to prevent accidentally deleting any concurrent changes.
+func (client RecordSetsClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (result autorest.Response, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Delete")
+		defer func() {
+			sc := -1
+			if result.Response != nil {
+				sc = result.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.DeleteSender(req)
+	if err != nil {
+		result.Response = resp
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.DeleteResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client RecordSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":       autorest.Encode("path", privateZoneName),
+		"recordType":            autorest.Encode("path", recordType),
+		"relativeRecordSetName": relativeRecordSetName,
+		"resourceGroupName":     autorest.Encode("path", resourceGroupName),
+		"subscriptionId":        autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsDelete(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+		autorest.ByClosing())
+	result.Response = resp
+	return
+}
+
+// Get gets a record set.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// recordType - the type of DNS record in this record set.
+// relativeRecordSetName - the name of the record set, relative to the name of the zone.
+func (client RecordSetsClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string) (result RecordSet, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Get")
+		defer func() {
+			sc := -1
+			if result.Response.Response != nil {
+				sc = result.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.GetSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.GetResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// GetPreparer prepares the Get request.
+func (client RecordSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":       autorest.Encode("path", privateZoneName),
+		"recordType":            autorest.Encode("path", recordType),
+		"relativeRecordSetName": relativeRecordSetName,
+		"resourceGroupName":     autorest.Encode("path", resourceGroupName),
+		"subscriptionId":        autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordSet, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// List lists all record sets in a Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
+// recordsetnamesuffix - the suffix label of the record set name to be used to filter the record set
+// enumeration. If this parameter is specified, the returned enumeration will only contain records that end
+// with ".<recordsetnamesuffix>".
+func (client RecordSetsClient) List(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.List")
+		defer func() {
+			sc := -1
+			if result.rslr.Response.Response != nil {
+				sc = result.rslr.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.fn = client.listNextResults
+	req, err := client.ListPreparer(ctx, resourceGroupName, privateZoneName, top, recordsetnamesuffix)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.rslr.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", resp, "Failure sending request")
+		return
+	}
+
+	result.rslr, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", resp, "Failure responding to request")
+		return
+	}
+	if result.rslr.hasNextLink() && result.rslr.IsEmpty() {
+		err = result.NextWithContext(ctx)
+		return
+	}
+
+	return
+}
+
+// ListPreparer prepares the List request.
+func (client RecordSetsClient) ListPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+	if top != nil {
+		queryParameters["$top"] = autorest.Encode("query", *top)
+	}
+	if len(recordsetnamesuffix) > 0 {
+		queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix)
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) ListSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) ListResponder(resp *http.Response) (result RecordSetListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client RecordSetsClient) listNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) {
+	req, err := lastResults.recordSetListResultPreparer(ctx)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", resp, "Failure sending next results request")
+	}
+	result, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", resp, "Failure responding to next results request")
+	}
+	return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client RecordSetsClient) ListComplete(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.List")
+		defer func() {
+			sc := -1
+			if result.Response().Response.Response != nil {
+				sc = result.page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.page, err = client.List(ctx, resourceGroupName, privateZoneName, top, recordsetnamesuffix)
+	return
+}
+
+// ListByType lists the record sets of a specified type in a Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// recordType - the type of record sets to enumerate.
+// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets.
+// recordsetnamesuffix - the suffix label of the record set name to be used to filter the record set
+// enumeration. If this parameter is specified, the returned enumeration will only contain records that end
+// with ".<recordsetnamesuffix>".
+func (client RecordSetsClient) ListByType(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType")
+		defer func() {
+			sc := -1
+			if result.rslr.Response.Response != nil {
+				sc = result.rslr.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.fn = client.listByTypeNextResults
+	req, err := client.ListByTypePreparer(ctx, resourceGroupName, privateZoneName, recordType, top, recordsetnamesuffix)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.ListByTypeSender(req)
+	if err != nil {
+		result.rslr.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", resp, "Failure sending request")
+		return
+	}
+
+	result.rslr, err = client.ListByTypeResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", resp, "Failure responding to request")
+		return
+	}
+	if result.rslr.hasNextLink() && result.rslr.IsEmpty() {
+		err = result.NextWithContext(ctx)
+		return
+	}
+
+	return
+}
+
+// ListByTypePreparer prepares the ListByType request.
+func (client RecordSetsClient) ListByTypePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"recordType":        autorest.Encode("path", recordType),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+	if top != nil {
+		queryParameters["$top"] = autorest.Encode("query", *top)
+	}
+	if len(recordsetnamesuffix) > 0 {
+		queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix)
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByTypeSender sends the ListByType request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListByTypeResponder handles the response to the ListByType request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// listByTypeNextResults retrieves the next set of results, if any.
+func (client RecordSetsClient) listByTypeNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) {
+	req, err := lastResults.recordSetListResultPreparer(ctx)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+	resp, err := client.ListByTypeSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", resp, "Failure sending next results request")
+	}
+	result, err = client.ListByTypeResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", resp, "Failure responding to next results request")
+	}
+	return
+}
+
+// ListByTypeComplete enumerates all values, automatically crossing page boundaries as required.
+func (client RecordSetsClient) ListByTypeComplete(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType")
+		defer func() {
+			sc := -1
+			if result.Response().Response.Response != nil {
+				sc = result.page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.page, err = client.ListByType(ctx, resourceGroupName, privateZoneName, recordType, top, recordsetnamesuffix)
+	return
+}
+
+// Update updates a record set within a Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// recordType - the type of DNS record in this record set.
+// relativeRecordSetName - the name of the record set, relative to the name of the zone.
+// parameters - parameters supplied to the Update operation.
+// ifMatch - the ETag of the record set. Omit this value to always overwrite the current record set. Specify
+// the last-seen ETag value to prevent accidentally overwriting concurrent changes.
+func (client RecordSetsClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string) (result RecordSet, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Update")
+		defer func() {
+			sc := -1
+			if result.Response.Response != nil {
+				sc = result.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, parameters, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.UpdateSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.UpdateResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client RecordSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":       autorest.Encode("path", privateZoneName),
+		"recordType":            autorest.Encode("path", recordType),
+		"relativeRecordSetName": relativeRecordSetName,
+		"resourceGroupName":     autorest.Encode("path", resourceGroupName),
+		"subscriptionId":        autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPatch(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..32bd8ba132eb5af30920f739d44b9ec7a6ff2281
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go
@@ -0,0 +1,19 @@
+package privatedns
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+	return "Azure-SDK-For-Go/" + Version() + " privatedns/2018-09-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+	return version.Number
+}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go
new file mode 100644
index 0000000000000000000000000000000000000000..e85b36982e0eaae9a9f721fbb16179d9edec2e1d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go
@@ -0,0 +1,506 @@
+package privatedns
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+	"context"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/tracing"
+	"net/http"
+)
+
+// VirtualNetworkLinksClient is the the Private DNS Management Client.
+type VirtualNetworkLinksClient struct {
+	BaseClient
+}
+
+// NewVirtualNetworkLinksClient creates an instance of the VirtualNetworkLinksClient client.
+func NewVirtualNetworkLinksClient(subscriptionID string) VirtualNetworkLinksClient {
+	return NewVirtualNetworkLinksClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewVirtualNetworkLinksClientWithBaseURI creates an instance of the VirtualNetworkLinksClient client using a custom
+// endpoint.  Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
+// stack).
+func NewVirtualNetworkLinksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkLinksClient {
+	return VirtualNetworkLinksClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a virtual network link to the specified Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// virtualNetworkLinkName - the name of the virtual network link.
+// parameters - parameters supplied to the CreateOrUpdate operation.
+// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always overwrite
+// the current virtual network link. Specify the last-seen ETag value to prevent accidentally overwriting any
+// concurrent changes.
+// ifNoneMatch - set to '*' to allow a new virtual network link to the Private DNS zone to be created, but to
+// prevent updating an existing link. Other values will be ignored.
+func (client VirtualNetworkLinksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string, ifNoneMatch string) (result VirtualNetworkLinksCreateOrUpdateFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.CreateOrUpdate")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, ifMatch, ifNoneMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "CreateOrUpdate", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.CreateOrUpdateSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "CreateOrUpdate", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client VirtualNetworkLinksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":        autorest.Encode("path", privateZoneName),
+		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
+		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
+		"virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPut(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	if len(ifNoneMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualNetworkLinksClient) CreateOrUpdateSender(req *http.Request) (future VirtualNetworkLinksCreateOrUpdateFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client VirtualNetworkLinksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkLink, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// Delete deletes a virtual network link to the specified Private DNS zone. WARNING: In case of a registration virtual
+// network, all auto-registered DNS records in the zone for the virtual network will also be deleted. This operation
+// cannot be undone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// virtualNetworkLinkName - the name of the virtual network link.
+// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always delete the
+// current zone. Specify the last-seen ETag value to prevent accidentally deleting any concurrent changes.
+func (client VirtualNetworkLinksClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, ifMatch string) (result VirtualNetworkLinksDeleteFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Delete")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Delete", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.DeleteSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Delete", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client VirtualNetworkLinksClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":        autorest.Encode("path", privateZoneName),
+		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
+		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
+		"virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsDelete(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualNetworkLinksClient) DeleteSender(req *http.Request) (future VirtualNetworkLinksDeleteFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client VirtualNetworkLinksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+		autorest.ByClosing())
+	result.Response = resp
+	return
+}
+
+// Get gets a virtual network link to the specified Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// virtualNetworkLinkName - the name of the virtual network link.
+func (client VirtualNetworkLinksClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (result VirtualNetworkLink, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Get")
+		defer func() {
+			sc := -1
+			if result.Response.Response != nil {
+				sc = result.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.GetSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", resp, "Failure sending request")
+		return
+	}
+
+	result, err = client.GetResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", resp, "Failure responding to request")
+		return
+	}
+
+	return
+}
+
+// GetPreparer prepares the Get request.
+func (client VirtualNetworkLinksClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":        autorest.Encode("path", privateZoneName),
+		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
+		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
+		"virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualNetworkLinksClient) GetSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client VirtualNetworkLinksClient) GetResponder(resp *http.Response) (result VirtualNetworkLink, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// List lists the virtual network links to the specified Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// top - the maximum number of virtual network links to return. If not specified, returns up to 100 virtual
+// network links.
+func (client VirtualNetworkLinksClient) List(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (result VirtualNetworkLinkListResultPage, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.List")
+		defer func() {
+			sc := -1
+			if result.vnllr.Response.Response != nil {
+				sc = result.vnllr.Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.fn = client.listNextResults
+	req, err := client.ListPreparer(ctx, resourceGroupName, privateZoneName, top)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", nil, "Failure preparing request")
+		return
+	}
+
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.vnllr.Response = autorest.Response{Response: resp}
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", resp, "Failure sending request")
+		return
+	}
+
+	result.vnllr, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", resp, "Failure responding to request")
+		return
+	}
+	if result.vnllr.hasNextLink() && result.vnllr.IsEmpty() {
+		err = result.NextWithContext(ctx)
+		return
+	}
+
+	return
+}
+
+// ListPreparer prepares the List request.
+func (client VirtualNetworkLinksClient) ListPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":   autorest.Encode("path", privateZoneName),
+		"resourceGroupName": autorest.Encode("path", resourceGroupName),
+		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+	if top != nil {
+		queryParameters["$top"] = autorest.Encode("query", *top)
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsGet(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks", pathParameters),
+		autorest.WithQueryParameters(queryParameters))
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualNetworkLinksClient) ListSender(req *http.Request) (*http.Response, error) {
+	return client.Send(req, azure.DoRetryWithRegistration(client.Client))
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client VirtualNetworkLinksClient) ListResponder(resp *http.Response) (result VirtualNetworkLinkListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client VirtualNetworkLinksClient) listNextResults(ctx context.Context, lastResults VirtualNetworkLinkListResult) (result VirtualNetworkLinkListResult, err error) {
+	req, err := lastResults.virtualNetworkLinkListResultPreparer(ctx)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+	resp, err := client.ListSender(req)
+	if err != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", resp, "Failure sending next results request")
+	}
+	result, err = client.ListResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", resp, "Failure responding to next results request")
+	}
+	return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client VirtualNetworkLinksClient) ListComplete(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (result VirtualNetworkLinkListResultIterator, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.List")
+		defer func() {
+			sc := -1
+			if result.Response().Response.Response != nil {
+				sc = result.page.Response().Response.Response.StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	result.page, err = client.List(ctx, resourceGroupName, privateZoneName, top)
+	return
+}
+
+// Update updates a virtual network link to the specified Private DNS zone.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// privateZoneName - the name of the Private DNS zone (without a terminating dot).
+// virtualNetworkLinkName - the name of the virtual network link.
+// parameters - parameters supplied to the Update operation.
+// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always overwrite
+// the current virtual network link. Specify the last-seen ETag value to prevent accidentally overwriting any
+// concurrent changes.
+func (client VirtualNetworkLinksClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string) (result VirtualNetworkLinksUpdateFuture, err error) {
+	if tracing.IsEnabled() {
+		ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Update")
+		defer func() {
+			sc := -1
+			if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
+				sc = result.FutureAPI.Response().StatusCode
+			}
+			tracing.EndSpan(ctx, sc, err)
+		}()
+	}
+	req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, ifMatch)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Update", nil, "Failure preparing request")
+		return
+	}
+
+	result, err = client.UpdateSender(req)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Update", nil, "Failure sending request")
+		return
+	}
+
+	return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client VirtualNetworkLinksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string) (*http.Request, error) {
+	pathParameters := map[string]interface{}{
+		"privateZoneName":        autorest.Encode("path", privateZoneName),
+		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
+		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
+		"virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName),
+	}
+
+	const APIVersion = "2018-09-01"
+	queryParameters := map[string]interface{}{
+		"api-version": APIVersion,
+	}
+
+	preparer := autorest.CreatePreparer(
+		autorest.AsContentType("application/json; charset=utf-8"),
+		autorest.AsPatch(),
+		autorest.WithBaseURL(client.BaseURI),
+		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters),
+		autorest.WithJSON(parameters),
+		autorest.WithQueryParameters(queryParameters))
+	if len(ifMatch) > 0 {
+		preparer = autorest.DecoratePreparer(preparer,
+			autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+	}
+	return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualNetworkLinksClient) UpdateSender(req *http.Request) (future VirtualNetworkLinksUpdateFuture, err error) {
+	var resp *http.Response
+	resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
+	if err != nil {
+		return
+	}
+	var azf azure.Future
+	azf, err = azure.NewFutureFromResponse(resp)
+	future.FutureAPI = &azf
+	future.Result = future.result
+	return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client VirtualNetworkLinksClient) UpdateResponder(resp *http.Response) (result VirtualNetworkLink, err error) {
+	err = autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+		autorest.ByUnmarshallingJSON(&result),
+		autorest.ByClosing())
+	result.Response = autorest.Response{Response: resp}
+	return
+}
diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt
index 4ad2ed8c3cfb0c523ea6a72df3c6bcb5049fb198..cce0e8f3ab2489ce294c30019a82b075ffc12e58 100644
--- a/cluster-autoscaler/vendor/modules.txt
+++ b/cluster-autoscaler/vendor/modules.txt
@@ -9,6 +9,7 @@ github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/con
 github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice
 github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network
 github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network
+github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns
 github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources
 github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage
 github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage
@@ -173,8 +174,6 @@ github.com/containerd/containerd/api/types
 github.com/containerd/containerd/api/types/task
 # github.com/containerd/ttrpc v1.0.2
 github.com/containerd/ttrpc
-# github.com/containernetworking/cni v0.8.1
-## explicit
 # github.com/coreos/go-semver v0.3.0
 github.com/coreos/go-semver/semver
 # github.com/coreos/go-systemd/v22 v22.3.2
@@ -1803,23 +1802,50 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
 # sigs.k8s.io/cloud-provider-azure v1.23.2
 ## explicit
+sigs.k8s.io/cloud-provider-azure/pkg/auth
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient/mockcontainerserviceclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient
 sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient
+sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient
+sigs.k8s.io/cloud-provider-azure/pkg/cache
 sigs.k8s.io/cloud-provider-azure/pkg/consts
 sigs.k8s.io/cloud-provider-azure/pkg/metrics
+sigs.k8s.io/cloud-provider-azure/pkg/provider
 sigs.k8s.io/cloud-provider-azure/pkg/retry
 sigs.k8s.io/cloud-provider-azure/pkg/version
 # sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go
new file mode 100644
index 0000000000000000000000000000000000000000..730a0a2c5b33c666e92bc93f0a1c16a7b54fd76d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/azure_auth.go
@@ -0,0 +1,290 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package auth
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"strings"
+
+	"github.com/Azure/go-autorest/autorest/adal"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	"golang.org/x/crypto/pkcs12"
+
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+var (
+	// ErrorNoAuth indicates that no credentials are provided.
+	ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider")
+)
+
+// AzureAuthConfig holds auth related part of cloud config
+type AzureAuthConfig struct {
+	// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
+	Cloud string `json:"cloud,omitempty" yaml:"cloud,omitempty"`
+	// The AAD Tenant ID for the Subscription that the cluster is deployed in
+	TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"`
+	// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
+	AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"`
+	// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
+	AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"`
+	// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
+	AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"`
+	// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
+	AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"`
+	// Use managed service identity for the virtual machine to access Azure ARM APIs
+	UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"`
+	// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
+	// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
+	// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
+	UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"`
+	// The ID of the Azure Subscription that the cluster is deployed in
+	SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"`
+	// IdentitySystem indicates the identity provider. Relevant only to hybrid clouds (Azure Stack).
+	// Allowed values are 'azure_ad' (default), 'adfs'.
+	IdentitySystem string `json:"identitySystem,omitempty" yaml:"identitySystem,omitempty"`
+	// ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint
+	// in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments.
+	ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"`
+	// The AAD Tenant ID for the Subscription that the network resources are deployed in
+	NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"`
+	// The ID of the Azure Subscription that the network resources are deployed in
+	NetworkResourceSubscriptionID string `json:"networkResourceSubscriptionID,omitempty" yaml:"networkResourceSubscriptionID,omitempty"`
+}
+
+// GetServicePrincipalToken creates a new service principal token based on the configuration.
+//
+// By default, the cluster and its network resources are deployed in the same AAD Tenant and Subscription,
+// and all azure clients use this method to fetch Service Principal Token.
+//
+// If NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
+// than only azure clients except VM/VMSS and network resource ones use this method to fetch Token.
+// For tokens for VM/VMSS and network resource ones, please check GetMultiTenantServicePrincipalToken and GetNetworkResourceServicePrincipalToken.
+func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment, resource string) (*adal.ServicePrincipalToken, error) {
+	var tenantID string
+	if strings.EqualFold(config.IdentitySystem, consts.ADFSIdentitySystem) {
+		tenantID = consts.ADFSIdentitySystem
+	} else {
+		tenantID = config.TenantID
+	}
+
+	if resource == "" {
+		resource = env.ServiceManagementEndpoint
+	}
+
+	if config.UseManagedIdentityExtension {
+		klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
+		msiEndpoint, err := adal.GetMSIVMEndpoint()
+		if err != nil {
+			return nil, fmt.Errorf("error getting the managed service identity endpoint: %w", err)
+		}
+		if len(config.UserAssignedIdentityID) > 0 {
+			klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
+			return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
+				resource,
+				config.UserAssignedIdentityID)
+		}
+		klog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
+		return adal.NewServicePrincipalTokenFromMSI(
+			msiEndpoint,
+			resource)
+	}
+
+	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, tenantID, nil)
+	if err != nil {
+		return nil, fmt.Errorf("error creating the OAuth config: %w", err)
+	}
+
+	if len(config.AADClientSecret) > 0 {
+		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
+		return adal.NewServicePrincipalToken(
+			*oauthConfig,
+			config.AADClientID,
+			config.AADClientSecret,
+			resource)
+	}
+
+	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
+		klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
+		certData, err := ioutil.ReadFile(config.AADClientCertPath)
+		if err != nil {
+			return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err)
+		}
+		certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
+		if err != nil {
+			return nil, fmt.Errorf("decoding the client certificate: %w", err)
+		}
+		return adal.NewServicePrincipalTokenFromCertificate(
+			*oauthConfig,
+			config.AADClientID,
+			certificate,
+			privateKey,
+			resource)
+	}
+
+	return nil, ErrorNoAuth
+}
+
+// GetMultiTenantServicePrincipalToken is used when (and only when) NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID.
+//
+// In that scenario, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
+// and this method creates a new multi-tenant service principal token based on the configuration.
+//
+// PrimaryToken of the returned multi-tenant token is for the AAD Tenant specified by TenantID, and AuxiliaryToken of the returned multi-tenant token is for the AAD Tenant specified by NetworkResourceTenantID.
+//
+// Azure VM/VMSS clients use this multi-tenant token, in order to operate those VM/VMSS in AAD Tenant specified by TenantID, and meanwhile in their payload they are referencing network resources (e.g. Load Balancer, Network Security Group, etc.) in AAD Tenant specified by NetworkResourceTenantID.
+func GetMultiTenantServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.MultiTenantServicePrincipalToken, error) {
+	err := config.checkConfigWhenNetworkResourceInDifferentTenant()
+	if err != nil {
+		return nil, fmt.Errorf("got error getting multi-tenant service principal token: %w", err)
+	}
+
+	multiTenantOAuthConfig, err := adal.NewMultiTenantOAuthConfig(
+		env.ActiveDirectoryEndpoint, config.TenantID, []string{config.NetworkResourceTenantID}, adal.OAuthOptions{})
+	if err != nil {
+		return nil, fmt.Errorf("creating the multi-tenant OAuth config: %w", err)
+	}
+
+	if len(config.AADClientSecret) > 0 {
+		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve multi-tenant access token")
+		return adal.NewMultiTenantServicePrincipalToken(
+			multiTenantOAuthConfig,
+			config.AADClientID,
+			config.AADClientSecret,
+			env.ServiceManagementEndpoint)
+	}
+
+	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
+		return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting multi-tenant service principal token")
+	}
+
+	return nil, ErrorNoAuth
+}
+
+// GetNetworkResourceServicePrincipalToken is used when (and only when) NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID.
+//
+// In that scenario, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
+// and this method creates a new service principal token for network resources tenant based on the configuration.
+//
+// Azure network resource (Load Balancer, Public IP, Route Table, Network Security Group and their sub level resources) clients use this multi-tenant token, in order to operate resources in AAD Tenant specified by NetworkResourceTenantID.
+func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
+	err := config.checkConfigWhenNetworkResourceInDifferentTenant()
+	if err != nil {
+		return nil, fmt.Errorf("got error(%w) in getting network resources service principal token", err)
+	}
+
+	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, config.NetworkResourceTenantID, nil)
+	if err != nil {
+		return nil, fmt.Errorf("creating the OAuth config for network resources tenant: %w", err)
+	}
+
+	if len(config.AADClientSecret) > 0 {
+		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token for network resources tenant")
+		return adal.NewServicePrincipalToken(
+			*oauthConfig,
+			config.AADClientID,
+			config.AADClientSecret,
+			env.ServiceManagementEndpoint)
+	}
+
+	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
+		return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting network resources service principal token")
+	}
+
+	return nil, ErrorNoAuth
+}
+
+// ParseAzureEnvironment returns the azure environment.
+// If 'resourceManagerEndpoint' is set, the environment is computed by querying the cloud's resource manager endpoint.
+// Otherwise, a pre-defined Environment is looked up by name.
+func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) {
+	var env azure.Environment
+	var err error
+	if resourceManagerEndpoint != "" {
+		klog.V(4).Infof("Loading environment from resource manager endpoint: %s", resourceManagerEndpoint)
+		nameOverride := azure.OverrideProperty{Key: azure.EnvironmentName, Value: cloudName}
+		env, err = azure.EnvironmentFromURL(resourceManagerEndpoint, nameOverride)
+		if err == nil {
+			azureStackOverrides(&env, resourceManagerEndpoint, identitySystem)
+		}
+	} else if cloudName == "" {
+		klog.V(4).Info("Using public cloud environment")
+		env = azure.PublicCloud
+	} else {
+		klog.V(4).Infof("Using %s environment", cloudName)
+		env, err = azure.EnvironmentFromName(cloudName)
+	}
+	return &env, err
+}
+
+// UsesNetworkResourceInDifferentTenantOrSubscription determines whether the AzureAuthConfig indicates to use network resources in different AAD Tenant and Subscription than those for the cluster
+// Return true when one of NetworkResourceTenantID and NetworkResourceSubscriptionID are specified
+// and equal to one defined in global configs
+func (config *AzureAuthConfig) UsesNetworkResourceInDifferentTenantOrSubscription() bool {
+	return (len(config.NetworkResourceTenantID) > 0 && !strings.EqualFold(config.NetworkResourceTenantID, config.TenantID)) ||
+		(len(config.NetworkResourceSubscriptionID) > 0 && !strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID))
+}
+
+// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
+// the private RSA key
+func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
+	privateKey, certificate, err := pkcs12.Decode(pkcs, password)
+	if err != nil {
+		return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %w", err)
+	}
+	rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
+	if !isRsaKey {
+		return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
+	}
+
+	return certificate, rsaPrivateKey, nil
+}
+
+// azureStackOverrides ensures that the Environment matches what AKSe currently generates for Azure Stack
+func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identitySystem string) {
+	env.ManagementPortalURL = strings.Replace(resourceManagerEndpoint, "https://management.", "https://portal.", -1)
+	env.ServiceManagementEndpoint = env.TokenAudience
+	env.ResourceManagerVMDNSSuffix = strings.Replace(resourceManagerEndpoint, "https://management.", "cloudapp.", -1)
+	env.ResourceManagerVMDNSSuffix = strings.TrimSuffix(env.ResourceManagerVMDNSSuffix, "/")
+	if strings.EqualFold(identitySystem, consts.ADFSIdentitySystem) {
+		env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "/")
+		env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "adfs")
+	}
+}
+
+// checkConfigWhenNetworkResourceInDifferentTenant checks configuration for the scenario of using network resource in different tenant
+func (config *AzureAuthConfig) checkConfigWhenNetworkResourceInDifferentTenant() error {
+	if !config.UsesNetworkResourceInDifferentTenantOrSubscription() {
+		return fmt.Errorf("NetworkResourceTenantID and NetworkResourceSubscriptionID must be configured")
+	}
+
+	if strings.EqualFold(config.IdentitySystem, consts.ADFSIdentitySystem) {
+		return fmt.Errorf("ADFS identity system is not supported")
+	}
+
+	if config.UseManagedIdentityExtension {
+		return fmt.Errorf("managed identity is not supported")
+	}
+
+	return nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..52e1d5da9aa116f0b73fae50085e15d5880f1ebe
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/auth/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package auth provides a general library to authorize Azure ARM clients.
+package auth // import "sigs.k8s.io/cloud-provider-azure/pkg/auth"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..9067d288ecd4863c8e0b616aa05949bc61bbcd7d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go
@@ -0,0 +1,459 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package deploymentclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements ContainerService client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new ContainerServiceClient client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	armClient := armclient.New(authorizer, *config, baseURI, APIVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure DeploymentClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure DeploymentClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+	}
+
+	return client
+}
+
+// Get gets a deployment
+func (c *Client) Get(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error) {
+	mc := metrics.NewMetricContext("deployments", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return resources.DeploymentExtended{}, retry.GetRateLimitError(false, "GetDeployment")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("GetDeployment", "client throttled", c.RetryAfterReader)
+		return resources.DeploymentExtended{}, rerr
+	}
+
+	result, rerr := c.getDeployment(ctx, resourceGroupName, deploymentName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getDeployment gets a deployment.
+func (c *Client) getDeployment(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Resources/deployments",
+		deploymentName,
+	)
+	result := resources.DeploymentExtended{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of deployments in the resource group.
+func (c *Client) List(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error) {
+	mc := metrics.NewMetricContext("deployments", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "ListDeployment")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("ListDeployment", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listDeployment(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listDeployment gets a list of deployments in the resource group.
+func (c *Client) listDeployment(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]resources.DeploymentExtended, 0)
+	page := &DeploymentResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.dplr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+func (c *Client) listResponder(resp *http.Response) (result resources.DeploymentListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// deploymentListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) deploymentListResultPreparer(ctx context.Context, dplr resources.DeploymentListResult) (*http.Request, error) {
+	if dplr.NextLink == nil || len(to.String(dplr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(dplr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults resources.DeploymentListResult) (result resources.DeploymentListResult, err error) {
+	req, err := c.deploymentListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "deploymentclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "deploymentclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "deploymentclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// DeploymentResultPage contains a page of deployments values.
+type DeploymentResultPage struct {
+	fn   func(context.Context, resources.DeploymentListResult) (resources.DeploymentListResult, error)
+	dplr resources.DeploymentListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *DeploymentResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.dplr)
+	if err != nil {
+		return err
+	}
+	page.dplr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *DeploymentResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page DeploymentResultPage) NotDone() bool {
+	return !page.dplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page DeploymentResultPage) Response() resources.DeploymentListResult {
+	return page.dplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page DeploymentResultPage) Values() []resources.DeploymentExtended {
+	if page.dplr.IsEmpty() {
+		return nil
+	}
+	return *page.dplr.Value
+}
+
+// CreateOrUpdate creates or updates a deployment.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("deployments", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "CreateOrUpdateDeployment")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("CreateOrUpdateDeployment", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateDeployment(ctx, resourceGroupName, deploymentName, parameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateDeployment(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Resources/deployments",
+		deploymentName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(parameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*resources.DeploymentExtended, *retry.Error) {
+	result := &resources.DeploymentExtended{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a deployment by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error {
+	mc := metrics.NewMetricContext("deployments", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "DeleteDeployment")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("DeleteDeployment", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteDeployment(ctx, resourceGroupName, deploymentName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteDeployment deletes a deployment by name.
+func (c *Client) deleteDeployment(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Resources/deployments",
+		deploymentName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+// ExportTemplate exports the template used for specified deployment
+func (c *Client) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, rerr *retry.Error) {
+	mc := metrics.NewMetricContext("deployments", "export_template", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return resources.DeploymentExportResult{}, retry.GetRateLimitError(true, "ExportTemplateDeployment")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("CreateOrUpdateDeployment", "client throttled", c.RetryAfterWriter)
+		return resources.DeploymentExportResult{}, rerr
+	}
+
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments/%s/exportTemplate",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName),
+		autorest.Encode("path", deploymentName))
+	response, rerr := c.armClient.PostResource(ctx, resourceID, "exportTemplate", struct{}{}, map[string]interface{}{})
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.exportTemplate.request", resourceID, rerr.Error())
+		return
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.exportTemplate.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, rerr
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..7aa04817672245f7e3ce84675741966e557fce6f
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package deploymentclient implements the client for azure deployments.
+package deploymentclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..3d381ef6487a39d287aafe8f2d4c0a8f9ea3db80
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/interface.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package deploymentclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for resources.
+	APIVersion = "2017-05-10"
+)
+
+// Interface is the client interface for Deployments.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	Get(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error)
+	List(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error)
+	ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, rerr *retry.Error)
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, managedClusterName string, parameters resources.Deployment, etag string) *retry.Error
+	Delete(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..bc3c2e09b3bb0435bc041e56ecfc5d06c27ca69a
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockdiskclient implements the mock client for Disks.
+package mockdiskclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb34edf4cbe7d28f235c2dcf5a2ad9ecfbb7cc3a
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
@@ -0,0 +1,126 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
+
+// Package mockdiskclient is a generated GoMock package.
+package mockdiskclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, diskName string) (compute.Disk, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, diskName)
+	ret0, _ := ret[0].(compute.Disk)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, diskName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, diskName)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, diskName, diskParameter)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter)
+}
+
+// Update mocks base method.
+func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Update indicates an expected call of Update.
+func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, diskName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName)
+}
+
+// ListByResourceGroup mocks base method.
+func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName)
+	ret0, _ := ret[0].([]compute.Disk)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// ListByResourceGroup indicates an expected call of ListByResourceGroup.
+func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b6a4d34c53c13cd200baa812ed4ed99848a0bb7
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go
@@ -0,0 +1,173 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileclient
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// Client implements the azure file client interface
+type Client struct {
+	fileSharesClient   storage.FileSharesClient
+	fileServicesClient storage.FileServicesClient
+
+	subscriptionID string
+}
+
+// ShareOptions contains the fields which are used to create file share.
+type ShareOptions struct {
+	Name       string
+	Protocol   storage.EnabledProtocols
+	RequestGiB int
+	// supported values: ""(by default), "TransactionOptimized", "Cool", "Hot", "Premium"
+	AccessTier string
+}
+
+// New creates a azure file client
+func New(config *azclients.ClientConfig) *Client {
+	fileSharesClient := storage.NewFileSharesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
+	fileSharesClient.Authorizer = config.Authorizer
+
+	fileServicesClient := storage.NewFileServicesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
+	fileServicesClient.Authorizer = config.Authorizer
+	return &Client{
+		fileSharesClient:   fileSharesClient,
+		fileServicesClient: fileServicesClient,
+		subscriptionID:     config.SubscriptionID,
+	}
+}
+
+// CreateFileShare creates a file share
+func (c *Client) CreateFileShare(resourceGroupName, accountName string, shareOptions *ShareOptions) error {
+	mc := metrics.NewMetricContext("file_shares", "create", resourceGroupName, c.subscriptionID, "")
+
+	if shareOptions == nil {
+		return fmt.Errorf("share options is nil")
+	}
+	quota := int32(shareOptions.RequestGiB)
+	fileShareProperties := &storage.FileShareProperties{
+		ShareQuota: &quota,
+	}
+	if shareOptions.Protocol == storage.EnabledProtocolsNFS {
+		fileShareProperties.EnabledProtocols = shareOptions.Protocol
+	}
+	if shareOptions.AccessTier != "" {
+		fileShareProperties.AccessTier = storage.ShareAccessTier(shareOptions.AccessTier)
+	}
+	fileShare := storage.FileShare{
+		Name:                &shareOptions.Name,
+		FileShareProperties: fileShareProperties,
+	}
+	_, err := c.fileSharesClient.Create(context.Background(), resourceGroupName, accountName, shareOptions.Name, fileShare, "")
+	var rerr *retry.Error
+	if err != nil {
+		rerr = &retry.Error{
+			RawError: err,
+		}
+	}
+	mc.Observe(rerr)
+
+	return err
+}
+
+// DeleteFileShare deletes a file share
+func (c *Client) DeleteFileShare(resourceGroupName, accountName, name string) error {
+	mc := metrics.NewMetricContext("file_shares", "delete", resourceGroupName, c.subscriptionID, "")
+
+	_, err := c.fileSharesClient.Delete(context.Background(), resourceGroupName, accountName, name, "")
+	var rerr *retry.Error
+	if err != nil {
+		rerr = &retry.Error{
+			RawError: err,
+		}
+	}
+	mc.Observe(rerr)
+
+	return err
+}
+
+// ResizeFileShare resizes a file share
+func (c *Client) ResizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error {
+	mc := metrics.NewMetricContext("file_shares", "resize", resourceGroupName, c.subscriptionID, "")
+	var rerr *retry.Error
+
+	quota := int32(sizeGiB)
+
+	share, err := c.fileSharesClient.Get(context.Background(), resourceGroupName, accountName, name, storage.GetShareExpandStats, "")
+	if err != nil {
+		rerr = &retry.Error{
+			RawError: err,
+		}
+		mc.Observe(rerr)
+		return fmt.Errorf("failed to get file share (%s): %w", name, err)
+	}
+	if *share.FileShareProperties.ShareQuota >= quota {
+		klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
+			share.FileShareProperties.ShareQuota, sizeGiB, accountName, name)
+		return nil
+	}
+
+	share.FileShareProperties.ShareQuota = &quota
+	_, err = c.fileSharesClient.Update(context.Background(), resourceGroupName, accountName, name, share)
+	if err != nil {
+		rerr = &retry.Error{
+			RawError: err,
+		}
+		mc.Observe(rerr)
+		return fmt.Errorf("failed to update quota on file share(%s), err: %w", name, err)
+	}
+
+	mc.Observe(rerr)
+	klog.V(4).Infof("resize file share completed, resourceGroupName(%s), accountName: %s, shareName: %s, sizeGiB: %d", resourceGroupName, accountName, name, sizeGiB)
+
+	return nil
+}
+
+// GetFileShare gets a file share
+func (c *Client) GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
+	mc := metrics.NewMetricContext("file_shares", "get", resourceGroupName, c.subscriptionID, "")
+
+	result, err := c.fileSharesClient.Get(context.Background(), resourceGroupName, accountName, name, storage.GetShareExpandStats, "")
+	var rerr *retry.Error
+	if err != nil {
+		rerr = &retry.Error{
+			RawError: err,
+		}
+	}
+	mc.Observe(rerr)
+
+	return result, err
+}
+
+// GetServiceProperties get service properties
+func (c *Client) GetServiceProperties(resourceGroupName, accountName string) (storage.FileServiceProperties, error) {
+	return c.fileServicesClient.GetServiceProperties(context.Background(), resourceGroupName, accountName)
+}
+
+// SetServiceProperties set service properties
+func (c *Client) SetServiceProperties(resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error) {
+	return c.fileServicesClient.SetServiceProperties(context.Background(), resourceGroupName, accountName, parameters)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..b038d46dbfe2841a8c82c0bc1d5d0b809cc3064f
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fileclient implements the client for azure file.
+package fileclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1247ae6b6db557322fe251adaf65c1417d04411
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileclient
+
+import (
+	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+)
+
+// Interface is the client interface for creating file shares, interface for test injection.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	CreateFileShare(resourceGroupName, accountName string, shareOptions *ShareOptions) error
+	DeleteFileShare(resourceGroupName, accountName, name string) error
+	ResizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error
+	GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error)
+	GetServiceProperties(resourceGroupName, accountName string) (storage.FileServiceProperties, error)
+	SetServiceProperties(resourceGroupName, accountName string, parameters storage.FileServiceProperties) (storage.FileServiceProperties, error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca17e982f0c49136789009943c23b6bbfc44e1fd
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go
@@ -0,0 +1,503 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loadbalancerclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements LoadBalancer client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new LoadBalancer client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure LoadBalancersClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure LoadBalancersClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a LoadBalancer.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (network.LoadBalancer, *retry.Error) {
+	mc := metrics.NewMetricContext("load_balancers", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.LoadBalancer{}, retry.GetRateLimitError(false, "LBGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("LBGet", "client throttled", c.RetryAfterReader)
+		return network.LoadBalancer{}, rerr
+	}
+
+	result, rerr := c.getLB(ctx, resourceGroupName, loadBalancerName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getLB gets a LoadBalancer.
+func (c *Client) getLB(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (network.LoadBalancer, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/loadBalancers",
+		loadBalancerName,
+	)
+	result := network.LoadBalancer{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, expand)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of LoadBalancer in the resource group.
+func (c *Client) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) {
+	mc := metrics.NewMetricContext("load_balancers", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "LBList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("LBList", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listLB(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listLB gets a list of LoadBalancers in the resource group.
+func (c *Client) listLB(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]network.LoadBalancer, 0)
+	page := &LoadBalancerListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.lblr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+// CreateOrUpdate creates or updates a LoadBalancer.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("load_balancers", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "LBCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("LBCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateLB(ctx, resourceGroupName, loadBalancerName, parameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateLB creates or updates a LoadBalancer.
+func (c *Client) createOrUpdateLB(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/loadBalancers",
+		loadBalancerName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(parameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancer.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.LoadBalancer, *retry.Error) {
+	result := &network.LoadBalancer{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a LoadBalancer by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) *retry.Error {
+	mc := metrics.NewMetricContext("load_balancers", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "LBDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("LBDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteLB(ctx, resourceGroupName, loadBalancerName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteLB deletes a LoadBalancer by name.
+func (c *Client) deleteLB(ctx context.Context, resourceGroupName string, loadBalancerName string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/loadBalancers",
+		loadBalancerName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+func (c *Client) listResponder(resp *http.Response) (result network.LoadBalancerListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// loadBalancerListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) loadBalancerListResultPreparer(ctx context.Context, lblr network.LoadBalancerListResult) (*http.Request, error) {
+	if lblr.NextLink == nil || len(to.String(lblr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(lblr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
+	req, err := c.loadBalancerListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "loadbalancerclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "loadbalancerclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "loadbalancerclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// LoadBalancerListResultPage contains a page of LoadBalancer values.
+type LoadBalancerListResultPage struct {
+	fn   func(context.Context, network.LoadBalancerListResult) (network.LoadBalancerListResult, error)
+	lblr network.LoadBalancerListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *LoadBalancerListResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.lblr)
+	if err != nil {
+		return err
+	}
+	page.lblr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *LoadBalancerListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page LoadBalancerListResultPage) NotDone() bool {
+	return !page.lblr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page LoadBalancerListResultPage) Response() network.LoadBalancerListResult {
+	return page.lblr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page LoadBalancerListResultPage) Values() []network.LoadBalancer {
+	if page.lblr.IsEmpty() {
+		return nil
+	}
+	return *page.lblr.Value
+}
+
+// CreateOrUpdateBackendPools creates or updates a LoadBalancer backend pool.
+func (c *Client) CreateOrUpdateBackendPools(ctx context.Context, resourceGroupName string, loadBalancerName string, backendPoolName string, parameters network.BackendAddressPool, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("load_balancers", "create_or_update_backend_pools", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "LBCreateOrUpdateBackendPools")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("LBCreateOrUpdateBackendPools", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateLBBackendPool creates or updates a LoadBalancer.
+func (c *Client) createOrUpdateLBBackendPool(ctx context.Context, resourceGroupName string, loadBalancerName string, backendPoolName string, parameters network.BackendAddressPool, etag string) *retry.Error {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/loadBalancers",
+		loadBalancerName,
+		"backendAddressPools",
+		backendPoolName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(parameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancerbackendpool.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateBackendPoolResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "loadbalancerbackendpool.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateBackendPoolResponder(resp *http.Response) (*network.BackendAddressPool, *retry.Error) {
+	result := &network.BackendAddressPool{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..a77686d3117169c9bd2e95f03d0feba28e7270e6
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package loadbalancerclient implements the client for LoadBalancer.
+package loadbalancerclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..a71b57391428b6b12ed3fd4390c5fa94dfad6d72
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loadbalancerclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2020-08-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for LoadBalancer.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a LoadBalancer.
+	Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, rerr *retry.Error)
+
+	// List gets a list of LoadBalancer in the resource group.
+	List(ctx context.Context, resourceGroupName string) (result []network.LoadBalancer, rerr *retry.Error)
+
+	// CreateOrUpdate creates or updates a LoadBalancer.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error
+
+	// CreateOrUpdateBackendPools creates or updates loadbalancer's backend address pool.
+	CreateOrUpdateBackendPools(ctx context.Context, resourceGroupName string, loadBalancerName string, backendPoolName string, parameters network.BackendAddressPool, etag string) *retry.Error
+
+	// Delete deletes a LoadBalancer by name.
+	Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..424e5ffbbfb0f0904d4de295099e7e741a441821
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockloadbalancerclient implements the mock client for LoadBalancer.
+package mockloadbalancerclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..7e45c8aa9e1bb1f90d8dda81ace7014fc4c6c0e2
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go
@@ -0,0 +1,126 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go
+
+// Package mockloadbalancerclient is a generated GoMock package.
+package mockloadbalancerclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, loadBalancerName, expand string) (network.LoadBalancer, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, loadBalancerName, expand)
+	ret0, _ := ret[0].(network.LoadBalancer)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, loadBalancerName, expand)
+}
+
+// List mocks base method.
+func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName)
+	ret0, _ := ret[0].([]network.LoadBalancer)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// List indicates an expected call of List.
+func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, loadBalancerName, parameters, etag)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters, etag interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, loadBalancerName, parameters, etag)
+}
+
+// CreateOrUpdateBackendPools mocks base method.
+func (m *MockInterface) CreateOrUpdateBackendPools(ctx context.Context, resourceGroupName, loadBalancerName, backendPoolName string, parameters network.BackendAddressPool, etag string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdateBackendPools", ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdateBackendPools indicates an expected call of CreateOrUpdateBackendPools.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdateBackendPools(ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateBackendPools", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdateBackendPools), ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, loadBalancerName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, loadBalancerName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, loadBalancerName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, loadBalancerName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c2369b9b48eb1ccd8f761234b4a2395498486b6
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privatednsclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+	"k8s.io/klog/v2"
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+)
+
+var _ Interface = &Client{}
+
+// Client implements privatednsclient Interface.
+type Client struct {
+	privateDNSClient privatedns.PrivateZonesClient
+}
+
+// New creates a new privatedns client.
+func New(config *azclients.ClientConfig) *Client {
+	privateDNSClient := privatedns.NewPrivateZonesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
+	privateDNSClient.Authorizer = config.Authorizer
+	client := &Client{
+		privateDNSClient: privateDNSClient,
+	}
+	return client
+}
+
+// CreateOrUpdate creates or updates a private dns zone
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, parameters privatedns.PrivateZone, waitForCompletion bool) error {
+	createOrUpdateFuture, err := c.privateDNSClient.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, parameters, "", "*")
+
+	if err != nil {
+		klog.V(5).Infof("Received error for %s, resourceGroup: %s, error: %s", "privatedns.put.request", resourceGroupName, err)
+		return err
+	}
+
+	if waitForCompletion {
+		err := createOrUpdateFuture.WaitForCompletionRef(ctx, c.privateDNSClient.Client)
+		if err != nil {
+			klog.V(5).Infof("Received error while waiting for completion for %s, resourceGroup: %s, error: %s", "privatedns.put.request", resourceGroupName, err)
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *Client) Get(ctx context.Context, resourceGroupName string, privateZoneName string) (result privatedns.PrivateZone, err error) {
+	return c.privateDNSClient.Get(ctx, resourceGroupName, privateZoneName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..628f8dbc488a1f904d77dcd5f22cb10122acb1de
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privatednsclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+)
+
+// Interface is the client interface for Private DNS Zones
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+
+	//Get gets the PrivateDNSZone
+	Get(ctx context.Context, resourceGroupName string, privateZoneName string) (result privatedns.PrivateZone, err error)
+
+	// CreateOrUpdate creates or updates a private dns zone.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, parameters privatedns.PrivateZone, waitForCompletion bool) error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..401d7d0d38d13a2712dba538109662f5b58341c5
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privatednszonegroupclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"k8s.io/klog/v2"
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+)
+
+var _ Interface = &Client{}
+
+// Client implements privatednszonegroupclient client Interface.
+type Client struct {
+	privateDNSZoneGroupClient network.PrivateDNSZoneGroupsClient
+}
+
+// New creates a new private dns zone group client.
+func New(config *azclients.ClientConfig) *Client {
+	privateDNSZoneGroupClient := network.NewPrivateDNSZoneGroupsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
+	privateDNSZoneGroupClient.Authorizer = config.Authorizer
+	client := &Client{
+		privateDNSZoneGroupClient: privateDNSZoneGroupClient,
+	}
+	return client
+}
+
+// CreateOrUpdate creates or updates a private dns zone group
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters network.PrivateDNSZoneGroup, waitForCompletion bool) error {
+	createOrUpdateFuture, err := c.privateDNSZoneGroupClient.CreateOrUpdate(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, parameters)
+	if err != nil {
+		klog.V(5).Infof("Received error for %s, resourceGroup: %s, privateEndpointName: %s, error: %s", "privatednszonegroup.put.request", resourceGroupName, privateEndpointName, err)
+		return err
+	}
+	if waitForCompletion {
+		err = createOrUpdateFuture.WaitForCompletionRef(ctx, c.privateDNSZoneGroupClient.Client)
+		if err != nil {
+			klog.V(5).Infof("Received error while waiting for completion for %s, resourceGroup: %s, privateEndpointName: %s, error: %s", "privatednszonegroup.put.request", resourceGroupName, privateEndpointName, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// Get gets the private dns zone group
+func (c *Client) Get(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string) (result network.PrivateDNSZoneGroup, err error) {
+	return c.privateDNSZoneGroupClient.Get(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..fae7df643f0d74b0a627371ac76ae2cc86f21140
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/interface.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privatednszonegroupclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+)
+
+// Interface is the client interface for Private DNS Zone Group.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+
+	// Get gets the private dns zone group
+	Get(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string) (result network.PrivateDNSZoneGroup, err error)
+
+	// CreateOrUpdate creates or updates a private dns zone group endpoint.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters network.PrivateDNSZoneGroup, waitForCompletion bool) error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..f7883321fd0194ff1c871793afd3bb7faeb3b5f6
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privateendpointclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"k8s.io/klog/v2"
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+)
+
+var _ Interface = &Client{}
+
+// Client implements privateendpointclient Interface.
+type Client struct {
+	privateEndpointClient network.PrivateEndpointsClient
+}
+
+// New creates a new private endpoint client.
+func New(config *azclients.ClientConfig) *Client {
+	privateEndpointClient := network.NewPrivateEndpointsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
+	privateEndpointClient.Authorizer = config.Authorizer
+
+	client := &Client{
+		privateEndpointClient: privateEndpointClient,
+	}
+	return client
+}
+
+// CreateOrUpdate creates or updates a private endpoint.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, endpointName string, privateEndpoint network.PrivateEndpoint, waitForCompletion bool) error {
+	createOrUpdateFuture, err := c.privateEndpointClient.CreateOrUpdate(ctx, resourceGroupName, endpointName, privateEndpoint)
+	if err != nil {
+		klog.V(5).Infof("Received error for %s, resourceGroup: %s, error: %s", "privateendpoint.put.request", resourceGroupName, err)
+		return err
+	}
+	if waitForCompletion {
+		err = createOrUpdateFuture.WaitForCompletionRef(ctx, c.privateEndpointClient.Client)
+		if err != nil {
+			klog.V(5).Infof("Received error while waiting for completion for %s, resourceGroup: %s, error: %s", "privateendpoint.put.request", resourceGroupName, err)
+			return err
+		}
+
+	}
+	return nil
+}
+
+// Get gets the private endpoint
+func (c *Client) Get(ctx context.Context, resourceGroupName string, privateEndpointName string, expand string) (result network.PrivateEndpoint, err error) {
+	return c.privateEndpointClient.Get(ctx, resourceGroupName, privateEndpointName, expand)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..8aa1d4cd1ddda087177c2b6fe570f2bf058c263c
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/interface.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package privateendpointclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+)
+
+// Interface is the client interface for Private Endpoints.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+
+	// Get gets the private endpoint
+	Get(ctx context.Context, resourceGroupName string, privateEndpointName string, expand string) (result network.PrivateEndpoint, err error)
+
+	// CreateOrUpdate creates or updates a private endpoint.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, endpointName string, privateEndpoint network.PrivateEndpoint, waitForCompletion bool) error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..be60a4f08a64cf82db41b2c77829857d49c60125
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/azure_publicipclient.go
@@ -0,0 +1,563 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package publicipclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements PublicIPAddress client Interface.
+type Client struct {
+	armClient              armclient.Interface
+	subscriptionID         string
+	cloudName              string
+	disableAzureStackCloud bool
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new PublicIPAddress client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure PublicIPAddressesClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure PublicIPAddressesClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:              armClient,
+		rateLimiterReader:      rateLimiterReader,
+		rateLimiterWriter:      rateLimiterWriter,
+		subscriptionID:         config.SubscriptionID,
+		cloudName:              config.CloudName,
+		disableAzureStackCloud: config.DisableAzureStackCloud,
+	}
+
+	return client
+}
+
+// Get gets a PublicIPAddress.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (network.PublicIPAddress, *retry.Error) {
+	mc := metrics.NewMetricContext("public_ip_addresses", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.PublicIPAddress{}, retry.GetRateLimitError(false, "PublicIPGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("PublicIPGet", "client throttled", c.RetryAfterReader)
+		return network.PublicIPAddress{}, rerr
+	}
+
+	result, rerr := c.getPublicIPAddress(ctx, resourceGroupName, publicIPAddressName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getPublicIPAddress gets a PublicIPAddress.
+func (c *Client) getPublicIPAddress(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (network.PublicIPAddress, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/publicIPAddresses",
+		publicIPAddressName,
+	)
+	result := network.PublicIPAddress{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, expand)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// GetVirtualMachineScaleSetPublicIPAddress gets a PublicIPAddress for VMSS VM.
+func (c *Client) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (network.PublicIPAddress, *retry.Error) {
+	mc := metrics.NewMetricContext("vmss_public_ip_addresses", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.PublicIPAddress{}, retry.GetRateLimitError(false, "VMSSPublicIPGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("VMSSPublicIPGet", "client throttled", c.RetryAfterReader)
+		return network.PublicIPAddress{}, rerr
+	}
+
+	result, rerr := c.getVMSSPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getVMSSPublicIPAddress gets a PublicIPAddress for VMSS VM.
+func (c *Client) getVMSSPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (network.PublicIPAddress, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s/networkInterfaces/%s/ipconfigurations/%s/publicipaddresses/%s",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName),
+		autorest.Encode("path", virtualMachineScaleSetName),
+		autorest.Encode("path", virtualmachineIndex),
+		autorest.Encode("path", networkInterfaceName),
+		autorest.Encode("path", IPConfigurationName),
+		autorest.Encode("path", publicIPAddressName),
+	)
+
+	result := network.PublicIPAddress{}
+	computeAPIVersion := ComputeAPIVersion
+	if strings.EqualFold(c.cloudName, AzureStackCloudName) && !c.disableAzureStackCloud {
+		computeAPIVersion = AzureStackComputeAPIVersion
+	}
+	queryParameters := map[string]interface{}{
+		"api-version": computeAPIVersion,
+	}
+	if len(expand) > 0 {
+		queryParameters["$expand"] = autorest.Encode("query", expand)
+	}
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithQueryParameters(queryParameters),
+	}
+	response, rerr := c.armClient.GetResourceWithDecorators(ctx, resourceID, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmsspublicip.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmsspublicip.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of PublicIPAddress in the resource group.
+func (c *Client) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, *retry.Error) {
+	mc := metrics.NewMetricContext("public_ip_addresses", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "PublicIPList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("PublicIPList", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listPublicIPAddress(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listPublicIPAddress gets a list of PublicIPAddress in the resource group.
+func (c *Client) listPublicIPAddress(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]network.PublicIPAddress, 0)
+	page := &PublicIPAddressListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.pialr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+// CreateOrUpdate creates or updates a PublicIPAddress.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error {
+	mc := metrics.NewMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "PublicIPCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("PublicIPCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdatePublicIP(ctx, resourceGroupName, publicIPAddressName, parameters)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdatePublicIP creates or updates a PublicIPAddress.
+func (c *Client) createOrUpdatePublicIP(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/publicIPAddresses",
+		publicIPAddressName,
+	)
+
+	response, rerr := c.armClient.PutResource(ctx, resourceID, parameters)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.PublicIPAddress, *retry.Error) {
+	result := &network.PublicIPAddress{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a PublicIPAddress by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) *retry.Error {
+	mc := metrics.NewMetricContext("public_ip_addresses", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "PublicIPDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("PublicIPDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deletePublicIP(ctx, resourceGroupName, publicIPAddressName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deletePublicIP deletes a PublicIPAddress by name.
+func (c *Client) deletePublicIP(ctx context.Context, resourceGroupName string, publicIPAddressName string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/publicIPAddresses",
+		publicIPAddressName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+func (c *Client) listResponder(resp *http.Response) (result network.PublicIPAddressListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// publicIPAddressListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) publicIPAddressListResultPreparer(ctx context.Context, lr network.PublicIPAddressListResult) (*http.Request, error) {
+	if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(lr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
+	req, err := c.publicIPAddressListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "publicipclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "publicipclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "publicipclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// PublicIPAddressListResultPage contains a page of PublicIPAddress values.
+type PublicIPAddressListResultPage struct {
+	fn    func(context.Context, network.PublicIPAddressListResult) (network.PublicIPAddressListResult, error)
+	pialr network.PublicIPAddressListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PublicIPAddressListResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.pialr)
+	if err != nil {
+		return err
+	}
+	page.pialr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PublicIPAddressListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PublicIPAddressListResultPage) NotDone() bool {
+	return !page.pialr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PublicIPAddressListResultPage) Response() network.PublicIPAddressListResult {
+	return page.pialr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PublicIPAddressListResultPage) Values() []network.PublicIPAddress {
+	if page.pialr.IsEmpty() {
+		return nil
+	}
+	return *page.pialr.Value
+}
+
+// ListAll gets all of PublicIPAddress in the subscription.
+func (c *Client) ListAll(ctx context.Context) ([]network.PublicIPAddress, *retry.Error) {
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		return nil, retry.GetRateLimitError(false, "PublicIPListAll")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		rerr := retry.GetThrottlingError("PublicIPListAll", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listAllPublicIPAddress(ctx)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listAllPublicIPAddress gets all of PublicIPAddress in the subscription.
+func (c *Client) listAllPublicIPAddress(ctx context.Context) ([]network.PublicIPAddress, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Network/publicIPAddresses",
+		autorest.Encode("path", c.subscriptionID))
+	result := make([]network.PublicIPAddress, 0)
+	page := &PublicIPAddressListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.listall.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.pialr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.listall.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "publicip.listall.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..1576dc7eceb86e40e299eb83f759672b9630b0b8
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package publicipclient implements the client for PublicIPAddress.
+package publicipclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..f53e7caa5df45cd0c87e463621b6371cb23c4036
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package publicipclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2020-08-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+
+	// ComputeAPIVersion is the API version for compute. It is required to get VMSS public IP.
+	ComputeAPIVersion = "2017-03-30"
+	// AzureStackComputeAPIVersion is the API version for compute for Azure Stack. It is required to get VMSS network interface.
+	AzureStackComputeAPIVersion = "2018-11-01"
+
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for PublicIPAddress.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a PublicIPAddress.
+	Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error)
+
+	// GetVirtualMachineScaleSetPublicIPAddress gets a PublicIPAddress for VMSS VM.
+	GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error)
+
+	// List gets a list of PublicIPAddress in the resource group.
+	List(ctx context.Context, resourceGroupName string) (result []network.PublicIPAddress, rerr *retry.Error)
+
+	// ListAll gets all of PublicIPAddress in the subscription.
+	ListAll(ctx context.Context) (result []network.PublicIPAddress, rerr *retry.Error)
+
+	// CreateOrUpdate creates or updates a PublicIPAddress.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error
+
+	// Delete deletes a PublicIPAddress by name.
+	Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d30b17fdd074390417b1ff9cb48317f778fe155
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockpublicipclient implements the mock client for PublicIPAddress.
+package mockpublicipclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..44a8efe866fa01dd82624654e7cf0e7505babd81
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go
@@ -0,0 +1,142 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go
+
+// Package mockpublicipclient is a generated GoMock package.
+package mockpublicipclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, publicIPAddressName, expand string) (network.PublicIPAddress, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, publicIPAddressName, expand)
+	ret0, _ := ret[0].(network.PublicIPAddress)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, publicIPAddressName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, publicIPAddressName, expand)
+}
+
+// GetVirtualMachineScaleSetPublicIPAddress mocks base method.
+func (m *MockInterface) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand string) (network.PublicIPAddress, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetVirtualMachineScaleSetPublicIPAddress", ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand)
+	ret0, _ := ret[0].(network.PublicIPAddress)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// GetVirtualMachineScaleSetPublicIPAddress indicates an expected call of GetVirtualMachineScaleSetPublicIPAddress.
+func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetPublicIPAddress", reflect.TypeOf((*MockInterface)(nil).GetVirtualMachineScaleSetPublicIPAddress), ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand)
+}
+
+// List mocks base method.
+func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName)
+	ret0, _ := ret[0].([]network.PublicIPAddress)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// List indicates an expected call of List.
+func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
+}
+
+// ListAll mocks base method.
+func (m *MockInterface) ListAll(ctx context.Context) ([]network.PublicIPAddress, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ListAll", ctx)
+	ret0, _ := ret[0].([]network.PublicIPAddress)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// ListAll indicates an expected call of ListAll.
+func (mr *MockInterfaceMockRecorder) ListAll(ctx interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAll", reflect.TypeOf((*MockInterface)(nil).ListAll), ctx)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, publicIPAddressName, parameters)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, publicIPAddressName, parameters)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, publicIPAddressName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, publicIPAddressName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, publicIPAddressName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, publicIPAddressName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/azure_routeclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/azure_routeclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..e70801760764d3994e6e9fdfefecccc2f8a65f48
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/azure_routeclient.go
@@ -0,0 +1,206 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package routeclient
+
+import (
+	"context"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements Route client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new Route client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure RoutesClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure RoutesClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// CreateOrUpdate creates or updates a Route.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("routes", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "RouteCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("RouteCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateRoute(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateRoute creates or updates a Route.
+func (c *Client) createOrUpdateRoute(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) *retry.Error {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/routeTables",
+		routeTableName,
+		"routes",
+		routeName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(routeParameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, routeParameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "route.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "route.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.Route, *retry.Error) {
+	result := &network.Route{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a Route by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) *retry.Error {
+	mc := metrics.NewMetricContext("routes", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "RouteDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("RouteDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteRoute(ctx, resourceGroupName, routeTableName, routeName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteRoute deletes a Route by name.
+func (c *Client) deleteRoute(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) *retry.Error {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/routeTables",
+		routeTableName,
+		"routes",
+		routeName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..18e39cd10168e8505fcdb2a005616666d31a3a8d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package routeclient implements the client for Route.
+package routeclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..2261616948636a36b1b4d02343c23b6a68cb53dd
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package routeclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2020-08-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for Route.
+// Don't forget to run the following command to generate the mock client:
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// CreateOrUpdate creates or updates a Route.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) *retry.Error
+
+	// Delete deletes a Route by name.
+	Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c78c86aeaf6e616e7991cd174b95db7786a97d0
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockrouteclient implements the mock client for Route.
+package mockrouteclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..bdec03adde0bc3ba6003f3ded9234503cf8c8388
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go
@@ -0,0 +1,82 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/interface.go
+
+// Package mockrouteclient is a generated GoMock package.
+package mockrouteclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, routeTableName, routeName string, routeParameters network.Route, etag string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, routeTableName, routeName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, routeTableName, routeName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, routeTableName, routeName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, routeTableName, routeName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/azure_routetableclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/azure_routetableclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb81cd8ac58ceeb7bffa878b7a26dd196422580a
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/azure_routetableclient.go
@@ -0,0 +1,220 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package routetableclient
+
+import (
+	"context"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements RouteTable client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new RouteTable client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure RouteTablesClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure RouteTablesClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a RouteTable.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (network.RouteTable, *retry.Error) {
+	mc := metrics.NewMetricContext("route_tables", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.RouteTable{}, retry.GetRateLimitError(false, "RouteTableGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("RouteTableGet", "client throttled", c.RetryAfterReader)
+		return network.RouteTable{}, rerr
+	}
+
+	result, rerr := c.getRouteTable(ctx, resourceGroupName, routeTableName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getRouteTable gets a RouteTable.
+func (c *Client) getRouteTable(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (network.RouteTable, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/routeTables",
+		routeTableName,
+	)
+	result := network.RouteTable{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, expand)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "routetable.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "routetable.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// CreateOrUpdate creates or updates a RouteTable.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("route_tables", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "RouteTableCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("RouteTableCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateRouteTable(ctx, resourceGroupName, routeTableName, parameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateRouteTable creates or updates a RouteTable.
+func (c *Client) createOrUpdateRouteTable(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/routeTables",
+		routeTableName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(parameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "routetable.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "routetable.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.RouteTable, *retry.Error) {
+	result := &network.RouteTable{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..49d8ed616f59582c23d31947471fa0d5bb110b27
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package routetableclient implements the client for RouteTable.
+package routetableclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..12582ecbf09687e7ef219f8a1a403ba3e786c032
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/interface.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package routetableclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2020-08-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for RouteTable.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a RouteTable.
+	Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, rerr *retry.Error)
+
+	// CreateOrUpdate creates or updates a RouteTable.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2aa4cc3990ea6493340c59251f83f5c3880c933
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mockroutetableclient implements the mock client for RouteTable.
+package mockroutetableclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..70aa3351e389bebd2cd13956e3837f6f790c6f89
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go
@@ -0,0 +1,83 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/interface.go
+
+// Package mockroutetableclient is a generated GoMock package.
+package mockroutetableclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, routeTableName, expand string) (network.RouteTable, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, routeTableName, expand)
+	ret0, _ := ret[0].(network.RouteTable)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, routeTableName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, routeTableName, expand)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, routeTableName string, parameters network.RouteTable, etag string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, routeTableName, parameters, etag)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters, etag interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, parameters, etag)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/azure_securitygroupclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/azure_securitygroupclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..9bc7a54fbff8553920a0a5ed0db7900c648c4909
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/azure_securitygroupclient.go
@@ -0,0 +1,426 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package securitygroupclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements SecurityGroup client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new SecurityGroup client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure SecurityGroupsClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure SecurityGroupsClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a SecurityGroup.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (network.SecurityGroup, *retry.Error) {
+	mc := metrics.NewMetricContext("security_groups", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.SecurityGroup{}, retry.GetRateLimitError(false, "NSGGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("NSGGet", "client throttled", c.RetryAfterReader)
+		return network.SecurityGroup{}, rerr
+	}
+
+	result, rerr := c.getSecurityGroup(ctx, resourceGroupName, networkSecurityGroupName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getSecurityGroup gets a SecurityGroup.
+func (c *Client) getSecurityGroup(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (network.SecurityGroup, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/networkSecurityGroups",
+		networkSecurityGroupName,
+	)
+	result := network.SecurityGroup{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, expand)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securitygroup.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securitygroup.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of SecurityGroups in the resource group.
+func (c *Client) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, *retry.Error) {
+	mc := metrics.NewMetricContext("security_groups", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "NSGList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("NSGList", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listSecurityGroup(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listSecurityGroup gets a list of SecurityGroups in the resource group.
+func (c *Client) listSecurityGroup(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]network.SecurityGroup, 0)
+	page := &SecurityGroupListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securitygroup.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.sglr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securitygroup.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securitygroup.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+// CreateOrUpdate creates or updates a SecurityGroup.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error {
+	mc := metrics.NewMetricContext("security_groups", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "NSGCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("NSGCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateNSG(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateNSG creates or updates a SecurityGroup.
+func (c *Client) createOrUpdateNSG(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/networkSecurityGroups",
+		networkSecurityGroupName,
+	)
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
+		autorest.WithJSON(parameters),
+	}
+	if etag != "" {
+		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
+	}
+
+	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securityGroup.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "securityGroup.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.SecurityGroup, *retry.Error) {
+	result := &network.SecurityGroup{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a SecurityGroup by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) *retry.Error {
+	mc := metrics.NewMetricContext("security_groups", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "NSGDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("NSGDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteNSG(ctx, resourceGroupName, networkSecurityGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteNSG deletes a SecurityGroup by name.
+func (c *Client) deleteNSG(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/networkSecurityGroups",
+		networkSecurityGroupName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+func (c *Client) listResponder(resp *http.Response) (result network.SecurityGroupListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// securityGroupListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) securityGroupListResultPreparer(ctx context.Context, sglr network.SecurityGroupListResult) (*http.Request, error) {
+	if sglr.NextLink == nil || len(to.String(sglr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(sglr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults network.SecurityGroupListResult) (result network.SecurityGroupListResult, err error) {
+	req, err := c.securityGroupListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "securitygroupclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "securitygroupclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "securitygroupclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// SecurityGroupListResultPage contains a page of SecurityGroup values.
+type SecurityGroupListResultPage struct {
+	fn   func(context.Context, network.SecurityGroupListResult) (network.SecurityGroupListResult, error)
+	sglr network.SecurityGroupListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *SecurityGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.sglr)
+	if err != nil {
+		return err
+	}
+	page.sglr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SecurityGroupListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page SecurityGroupListResultPage) NotDone() bool {
+	return !page.sglr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page SecurityGroupListResultPage) Response() network.SecurityGroupListResult {
+	return page.sglr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page SecurityGroupListResultPage) Values() []network.SecurityGroup {
+	if page.sglr.IsEmpty() {
+		return nil
+	}
+	return *page.sglr.Value
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b806d3ad4b86a4f2847ceb9a6c45e05451eba39
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package securitygroupclient implements the client for SecurityGroups.
+package securitygroupclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..3611d2dcc5f543f7e1830d7d47e312a26ff044d9
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/interface.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package securitygroupclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2020-08-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for SecurityGroups.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a SecurityGroup.
+	Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, rerr *retry.Error)
+
+	// List gets a list of SecurityGroup in the resource group.
+	List(ctx context.Context, resourceGroupName string) (result []network.SecurityGroup, rerr *retry.Error)
+
+	// CreateOrUpdate creates or updates a SecurityGroup.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error
+
+	// Delete deletes a SecurityGroup by name.
+	Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee6d0eb38fdd5088e08fff50580978574d9a160d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mocksecuritygroupclient implements the mock client for SecurityGroups.
+package mocksecuritygroupclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4629d7e94ef7eb46f96b47e9a5454aa22be287d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go
@@ -0,0 +1,112 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/interface.go
+
+// Package mocksecuritygroupclient is a generated GoMock package.
+package mocksecuritygroupclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, networkSecurityGroupName, expand string) (network.SecurityGroup, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, networkSecurityGroupName, expand)
+	ret0, _ := ret[0].(network.SecurityGroup)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, networkSecurityGroupName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, networkSecurityGroupName, expand)
+}
+
+// List mocks base method.
+func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName)
+	ret0, _ := ret[0].([]network.SecurityGroup)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// List indicates an expected call of List.
+func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, networkSecurityGroupName, parameters, etag)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkSecurityGroupName, parameters, etag)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkSecurityGroupName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, networkSecurityGroupName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkSecurityGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkSecurityGroupName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e500087647685853fd9320a893ea6726075d528
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go
@@ -0,0 +1,419 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshotclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements Snapshot client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new Snapshot client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure SnapshotClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure SnapshotClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a Snapshot.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error) {
+	mc := metrics.NewMetricContext("snapshot", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return compute.Snapshot{}, retry.GetRateLimitError(false, "SnapshotGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SnapshotGet", "client throttled", c.RetryAfterReader)
+		return compute.Snapshot{}, rerr
+	}
+
+	result, rerr := c.getSnapshot(ctx, resourceGroupName, snapshotName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getSnapshot gets a Snapshot.
+func (c *Client) getSnapshot(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Compute/snapshots",
+		snapshotName,
+	)
+	result := compute.Snapshot{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// Delete deletes a Snapshot by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error {
+	mc := metrics.NewMetricContext("snapshot", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "SnapshotDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SnapshotDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteSnapshot(ctx, resourceGroupName, snapshotName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteSnapshot deletes a PublicIPAddress by name.
+func (c *Client) deleteSnapshot(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Compute/snapshots",
+		snapshotName,
+	)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+// CreateOrUpdate creates or updates a Snapshot.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error {
+	mc := metrics.NewMetricContext("snapshot", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "SnapshotCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SnapshotCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateSnapshot(ctx, resourceGroupName, snapshotName, snapshot)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateSnapshot creates or updates a Snapshot.
+func (c *Client) createOrUpdateSnapshot(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Compute/snapshots",
+		snapshotName,
+	)
+
+	response, rerr := c.armClient.PutResource(ctx, resourceID, snapshot)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Snapshot, *retry.Error) {
+	result := &compute.Snapshot{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// ListByResourceGroup get a list snapshots by resourceGroup.
+func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) {
+	mc := metrics.NewMetricContext("snapshot", "list_by_resource_group", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "SnapshotListByResourceGroup")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SnapshotListByResourceGroup", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listSnapshotsByResourceGroup(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listSnapshotsByResourceGroup gets a list of snapshots in the resource group.
+func (c *Client) listSnapshotsByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]compute.Snapshot, 0)
+	page := &SnapshotListPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.sl, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "snapshot.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+func (c *Client) listResponder(resp *http.Response) (result compute.SnapshotList, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// SnapshotListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) SnapshotListResultPreparer(ctx context.Context, lr compute.SnapshotList) (*http.Request, error) {
+	if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(lr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults compute.SnapshotList) (result compute.SnapshotList, err error) {
+	req, err := c.SnapshotListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "snapshotclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "snapshotclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "snapshotclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// SnapshotListPage contains a page of Snapshot values.
+type SnapshotListPage struct {
+	fn func(context.Context, compute.SnapshotList) (compute.SnapshotList, error)
+	sl compute.SnapshotList
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *SnapshotListPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.sl)
+	if err != nil {
+		return err
+	}
+	page.sl = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SnapshotListPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page SnapshotListPage) NotDone() bool {
+	return !page.sl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page SnapshotListPage) Response() compute.SnapshotList {
+	return page.sl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page SnapshotListPage) Values() []compute.Snapshot {
+	if page.sl.IsEmpty() {
+		return nil
+	}
+	return *page.sl.Value
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..408846fd88beff1ffc5364d98420924d077d5ef9
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package snapshotclient implements the client for Snapshots.
+package snapshotclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..14b765c651bb327b21d83e5cb3fdb00558a94e02
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshotclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for compute.
+	APIVersion = "2020-12-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2019-03-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for Snapshots.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a Snapshot.
+	Get(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error)
+
+	// Delete deletes a Snapshot by name.
+	Delete(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error
+
+	// ListByResourceGroup get a list snapshots by resourceGroup.
+	ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error)
+
+	// CreateOrUpdate creates or updates a Snapshot.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..c7c97a2084c5a3a35775f64d3835af54207c26e0
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mocksnapshotclient implements the mock client for Snapshots.
+package mocksnapshotclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..0fd0431fbad4b4906dde802de9934dc90c21d2af
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go
@@ -0,0 +1,112 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go
+
+// Package mocksnapshotclient is a generated GoMock package.
+package mocksnapshotclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, snapshotName)
+	ret0, _ := ret[0].(compute.Snapshot)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, snapshotName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, snapshotName)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, snapshotName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, snapshotName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, snapshotName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, snapshotName)
+}
+
+// ListByResourceGroup mocks base method.
+func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName)
+	ret0, _ := ret[0].([]compute.Snapshot)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// ListByResourceGroup indicates an expected call of ListByResourceGroup.
+func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, snapshotName, snapshot)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, snapshotName, snapshot interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, snapshotName, snapshot)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/azure_subnetclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/azure_subnetclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d6ed1480f3c2757c237c74caae7e3ab59b71be6
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/azure_subnetclient.go
@@ -0,0 +1,426 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package subnetclient
+
+import (
+	"context"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements Subnet client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new Subnet client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure SubnetsClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure SubnetsClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a Subnet.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (network.Subnet, *retry.Error) {
+	mc := metrics.NewMetricContext("subnets", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return network.Subnet{}, retry.GetRateLimitError(false, "SubnetGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SubnetGet", "client throttled", c.RetryAfterReader)
+		return network.Subnet{}, rerr
+	}
+
+	result, rerr := c.getSubnet(ctx, resourceGroupName, virtualNetworkName, subnetName, expand)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getSubnet gets a Subnet.
+func (c *Client) getSubnet(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (network.Subnet, *retry.Error) {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/virtualNetworks",
+		virtualNetworkName,
+		"subnets",
+		subnetName,
+	)
+	result := network.Subnet{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, expand)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of Subnets in the VNet.
+func (c *Client) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, *retry.Error) {
+	mc := metrics.NewMetricContext("subnets", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "SubnetList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SubnetList", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listSubnet(ctx, resourceGroupName, virtualNetworkName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listSubnet gets a list of Subnets in the VNet.
+func (c *Client) listSubnet(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, *retry.Error) {
+	resourceID := armclient.GetChildResourcesListID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/virtualNetworks",
+		virtualNetworkName,
+		"subnets")
+
+	result := make([]network.Subnet, 0)
+	page := &SubnetListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.slr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+// CreateOrUpdate creates or updates a Subnet.
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) *retry.Error {
+	mc := metrics.NewMetricContext("subnets", "create_or_update", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "SubnetCreateOrUpdate")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SubnetCreateOrUpdate", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.createOrUpdateSubnet(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// createOrUpdateSubnet creates or updates a Subnet.
+func (c *Client) createOrUpdateSubnet(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) *retry.Error {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/virtualNetworks",
+		virtualNetworkName,
+		"subnets",
+		subnetName)
+
+	response, rerr := c.armClient.PutResource(ctx, resourceID, subnetParameters)
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.put.request", resourceID, rerr.Error())
+		return rerr
+	}
+
+	if response != nil && response.StatusCode != http.StatusNoContent {
+		_, rerr = c.createOrUpdateResponder(response)
+		if rerr != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "subnet.put.respond", resourceID, rerr.Error())
+			return rerr
+		}
+	}
+
+	return nil
+}
+
+func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.Subnet, *retry.Error) {
+	result := &network.Subnet{}
+	err := autorest.Respond(
+		resp,
+		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return result, retry.GetError(resp, err)
+}
+
+// Delete deletes a Subnet by name.
+func (c *Client) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) *retry.Error {
+	mc := metrics.NewMetricContext("subnets", "delete", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterWriter.TryAccept() {
+		mc.RateLimitedCount()
+		return retry.GetRateLimitError(true, "SubnetDelete")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterWriter.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("SubnetDelete", "client throttled", c.RetryAfterWriter)
+		return rerr
+	}
+
+	rerr := c.deleteSubnet(ctx, resourceGroupName, virtualNetworkName, subnetName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterWriter = rerr.RetryAfter
+		}
+
+		return rerr
+	}
+
+	return nil
+}
+
+// deleteSubnet deletes a PublicIPAddress by name.
+func (c *Client) deleteSubnet(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) *retry.Error {
+	resourceID := armclient.GetChildResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Network/virtualNetworks",
+		virtualNetworkName,
+		"subnets",
+		subnetName)
+
+	return c.armClient.DeleteResource(ctx, resourceID, "")
+}
+
+func (c *Client) listResponder(resp *http.Response) (result network.SubnetListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// subnetListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) subnetListResultPreparer(ctx context.Context, lblr network.SubnetListResult) (*http.Request, error) {
+	if lblr.NextLink == nil || len(to.String(lblr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(lblr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults network.SubnetListResult) (result network.SubnetListResult, err error) {
+	req, err := c.subnetListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "subnetclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "subnetclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "subnetclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// SubnetListResultPage contains a page of Subnet values.
+type SubnetListResultPage struct {
+	fn  func(context.Context, network.SubnetListResult) (network.SubnetListResult, error)
+	slr network.SubnetListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *SubnetListResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.slr)
+	if err != nil {
+		return err
+	}
+	page.slr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SubnetListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page SubnetListResultPage) NotDone() bool {
+	return !page.slr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page SubnetListResultPage) Response() network.SubnetListResult {
+	return page.slr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page SubnetListResultPage) Values() []network.Subnet {
+	if page.slr.IsEmpty() {
+		return nil
+	}
+	return *page.slr.Value
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..7248d19b199151752faf3eb92f1f5292edeccafe
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package subnetclient implements the client for Subnet.
+package subnetclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..28de4787a18a7dcffad5773ede6303b9fc1219ec
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/interface.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package subnetclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for network.
+	APIVersion = "2021-02-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2018-11-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for Subnet.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a Subnet.
+	Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, rerr *retry.Error)
+
+	// List gets a list of Subnet in the VNet.
+	List(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result []network.Subnet, rerr *retry.Error)
+
+	// CreateOrUpdate creates or updates a Subnet.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) *retry.Error
+
+	// Delete deletes a Subnet by name.
+	Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) *retry.Error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..89b77d28302428b5fff9d0eadfd4c967fb62fe14
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mocksubnetclient implements the mock client for Subnet.
+package mocksubnetclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4f1f24f2262944286316c0dd3e559bb0b70cf6e
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go
@@ -0,0 +1,112 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+//
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/interface.go
+
+// Package mocksubnetclient is a generated GoMock package.
+package mocksubnetclient
+
+import (
+	context "context"
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+	retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// MockInterface is a mock of Interface interface.
+type MockInterface struct {
+	ctrl     *gomock.Controller
+	recorder *MockInterfaceMockRecorder
+}
+
+// MockInterfaceMockRecorder is the mock recorder for MockInterface.
+type MockInterfaceMockRecorder struct {
+	mock *MockInterface
+}
+
+// NewMockInterface creates a new mock instance.
+func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
+	mock := &MockInterface{ctrl: ctrl}
+	mock.recorder = &MockInterfaceMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
+	return m.recorder
+}
+
+// Get mocks base method.
+func (m *MockInterface) Get(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName, expand string) (network.Subnet, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, virtualNetworkName, subnetName, expand)
+	ret0, _ := ret[0].(network.Subnet)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, virtualNetworkName, subnetName, expand interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, virtualNetworkName, subnetName, expand)
+}
+
+// List mocks base method.
+func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualNetworkName string) ([]network.Subnet, *retry.Error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, virtualNetworkName)
+	ret0, _ := ret[0].([]network.Subnet)
+	ret1, _ := ret[1].(*retry.Error)
+	return ret0, ret1
+}
+
+// List indicates an expected call of List.
+func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualNetworkName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualNetworkName)
+}
+
+// CreateOrUpdate mocks base method.
+func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string, subnetParameters network.Subnet) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// CreateOrUpdate indicates an expected call of CreateOrUpdate.
+func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters)
+}
+
+// Delete mocks base method.
+func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string) *retry.Error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, virtualNetworkName, subnetName)
+	ret0, _ := ret[0].(*retry.Error)
+	return ret0
+}
+
+// Delete indicates an expected call of Delete.
+func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, virtualNetworkName, subnetName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, virtualNetworkName, subnetName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c0befbc39026e6140081528b635da2da2d88cf0
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package virtualnetworklinksclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+	"k8s.io/klog/v2"
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+)
+
+var _ Interface = &Client{}
+
+// Client implements virtualnetworklinksclient Interface.
+type Client struct {
+	virtualNetworkLinksClient privatedns.VirtualNetworkLinksClient
+}
+
+// New creates a new virtualnetworklinks client.
+func New(config *azclients.ClientConfig) *Client {
+	virtualNetworkLinksClient := privatedns.NewVirtualNetworkLinksClient(config.SubscriptionID)
+	virtualNetworkLinksClient.Authorizer = config.Authorizer
+
+	client := &Client{
+		virtualNetworkLinksClient: virtualNetworkLinksClient,
+	}
+	return client
+}
+
+// CreateOrUpdate creates or updates a virtual network link
+func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters privatedns.VirtualNetworkLink, waitForCompletion bool) error {
+	createOrUpdateFuture, err := c.virtualNetworkLinksClient.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, "", "*")
+	if err != nil {
+		klog.V(5).Infof("Received error for %s, resourceGroup: %s, privateZoneName: %s, error: %s", "virtualnetworklinks.put.request", resourceGroupName, privateZoneName, err)
+		return err
+	}
+	if waitForCompletion {
+		err := createOrUpdateFuture.WaitForCompletionRef(ctx, c.virtualNetworkLinksClient.Client)
+		if err != nil {
+			klog.V(5).Infof("Received error while waiting for completion for %s, resourceGroup: %s, privateZoneName: %s, error: %s", "virtualnetworklinks.put.request", resourceGroupName, privateZoneName, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// Get gets a virtual network link
+func (c *Client) Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (result privatedns.VirtualNetworkLink, err error) {
+	return c.virtualNetworkLinksClient.Get(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4af658194581f0c2a5291eafd6f95627a2103fc
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package virtualnetworklinksclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+)
+
+// Interface is the client interface for Virtual Network Link.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+
+	// Get gets a virtual network link
+	Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (result privatedns.VirtualNetworkLink, err error)
+
+	// CreateOrUpdate creates or updates a private dns zone.
+	CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters privatedns.VirtualNetworkLink, waitForCompletion bool) error
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce3616aaa7d6c99cd4de1146508b5b82eae729ea
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/azure_vmasclient.go
@@ -0,0 +1,308 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vmasclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+//var _ Interface = &Client{}
+
+// Client implements VMAS client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new VMAS client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure AvailabilitySetsClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure AvailabilitySetsClient  (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// Get gets a AvailabilitySet.
+func (c *Client) Get(ctx context.Context, resourceGroupName string, vmasName string) (compute.AvailabilitySet, *retry.Error) {
+	mc := metrics.NewMetricContext("vmas", "get", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return compute.AvailabilitySet{}, retry.GetRateLimitError(false, "VMASGet")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("VMASGet", "client throttled", c.RetryAfterReader)
+		return compute.AvailabilitySet{}, rerr
+	}
+
+	result, rerr := c.getVMAS(ctx, resourceGroupName, vmasName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getVMAS gets a AvailabilitySet.
+func (c *Client) getVMAS(ctx context.Context, resourceGroupName string, vmasName string) (compute.AvailabilitySet, *retry.Error) {
+	resourceID := armclient.GetResourceID(
+		c.subscriptionID,
+		resourceGroupName,
+		"Microsoft.Compute/availabilitySets",
+		vmasName,
+	)
+	result := compute.AvailabilitySet{}
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmas.get.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmas.get.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
+
+// List gets a list of AvailabilitySets in the resource group.
+func (c *Client) List(ctx context.Context, resourceGroupName string) ([]compute.AvailabilitySet, *retry.Error) {
+	mc := metrics.NewMetricContext("vmas", "list", resourceGroupName, c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return nil, retry.GetRateLimitError(false, "VMASList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("VMASList", "client throttled", c.RetryAfterReader)
+		return nil, rerr
+	}
+
+	result, rerr := c.listVMAS(ctx, resourceGroupName)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listVMAS gets a list of AvailabilitySets in the resource group.
+func (c *Client) listVMAS(ctx context.Context, resourceGroupName string) ([]compute.AvailabilitySet, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", resourceGroupName))
+	result := make([]compute.AvailabilitySet, 0)
+	page := &AvailabilitySetListResultPage{}
+	page.fn = c.listNextResults
+
+	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmas.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	var err error
+	page.vmaslr, err = c.listResponder(resp)
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmas.list.respond", resourceID, err)
+		return result, retry.GetError(resp, err)
+	}
+
+	for {
+		result = append(result, page.Values()...)
+
+		// Abort the loop when there's no nextLink in the response.
+		if to.String(page.Response().NextLink) == "" {
+			break
+		}
+
+		if err = page.NextWithContext(ctx); err != nil {
+			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmas.list.next", resourceID, err)
+			return result, retry.GetError(page.Response().Response.Response, err)
+		}
+	}
+
+	return result, nil
+}
+
+func (c *Client) listResponder(resp *http.Response) (result compute.AvailabilitySetListResult, err error) {
+	err = autorest.Respond(
+		resp,
+		autorest.ByIgnoring(),
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	result.Response = autorest.Response{Response: resp}
+	return
+}
+
+// availabilitySetListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (c *Client) availabilitySetListResultPreparer(ctx context.Context, vmaslr compute.AvailabilitySetListResult) (*http.Request, error) {
+	if vmaslr.NextLink == nil || len(to.String(vmaslr.NextLink)) < 1 {
+		return nil, nil
+	}
+
+	decorators := []autorest.PrepareDecorator{
+		autorest.WithBaseURL(to.String(vmaslr.NextLink)),
+	}
+	return c.armClient.PrepareGetRequest(ctx, decorators...)
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (c *Client) listNextResults(ctx context.Context, lastResults compute.AvailabilitySetListResult) (result compute.AvailabilitySetListResult, err error) {
+	req, err := c.availabilitySetListResultPreparer(ctx, lastResults)
+	if err != nil {
+		return result, autorest.NewErrorWithError(err, "vmasclient", "listNextResults", nil, "Failure preparing next results request")
+	}
+	if req == nil {
+		return
+	}
+
+	resp, rerr := c.armClient.Send(ctx, req)
+	defer c.armClient.CloseResponse(ctx, resp)
+	if rerr != nil {
+		result.Response = autorest.Response{Response: resp}
+		return result, autorest.NewErrorWithError(rerr.Error(), "vmasclient", "listNextResults", resp, "Failure sending next results request")
+	}
+
+	result, err = c.listResponder(resp)
+	if err != nil {
+		err = autorest.NewErrorWithError(err, "vmasclient", "listNextResults", resp, "Failure responding to next results request")
+	}
+
+	return
+}
+
+// AvailabilitySetListResultPage  contains a page of AvailabilitySet values.
+type AvailabilitySetListResultPage struct {
+	fn     func(context.Context, compute.AvailabilitySetListResult) (compute.AvailabilitySetListResult, error)
+	vmaslr compute.AvailabilitySetListResult
+}
+
+// NextWithContext advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AvailabilitySetListResultPage) NextWithContext(ctx context.Context) (err error) {
+	next, err := page.fn(ctx, page.vmaslr)
+	if err != nil {
+		return err
+	}
+	page.vmaslr = next
+	return nil
+}
+
+// Next advances to the next page of values.  If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AvailabilitySetListResultPage) Next() error {
+	return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AvailabilitySetListResultPage) NotDone() bool {
+	return !page.vmaslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AvailabilitySetListResultPage) Response() compute.AvailabilitySetListResult {
+	return page.vmaslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AvailabilitySetListResultPage) Values() []compute.AvailabilitySet {
+	if page.vmaslr.IsEmpty() {
+		return nil
+	}
+	return *page.vmaslr.Value
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f9dee7ed5651fa67999443d960441c902d599eb
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package vmasclient implements the client for VMAS.
+package vmasclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ff65ccfce3f3335d5b97392338a572bd7ad9c24
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient/interface.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vmasclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for VMAS.
+	APIVersion = "2020-12-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2019-07-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for AvailabilitySet.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// Get gets a VirtualMachineScaleSet.
+	Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.AvailabilitySet, rerr *retry.Error)
+
+	// List gets a list of VirtualMachineScaleSets in the resource group.
+	List(ctx context.Context, resourceGroupName string) (result []compute.AvailabilitySet, rerr *retry.Error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3a6711ea074eb49731e60e0ef644c0cb1999904
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/azure_vmsizeclient.go
@@ -0,0 +1,144 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vmsizeclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+// Client implements VirtualMachineSize client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+
+	// Rate limiting configures.
+	rateLimiterReader flowcontrol.RateLimiter
+	rateLimiterWriter flowcontrol.RateLimiter
+
+	// ARM throttling configures.
+	RetryAfterReader time.Time
+	RetryAfterWriter time.Time
+}
+
+// New creates a new VirtualMachineSize client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
+
+	if azclients.RateLimitEnabled(config.RateLimitConfig) {
+		klog.V(2).Infof("Azure VirtualMachineSizesClient (read ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPS,
+			config.RateLimitConfig.CloudProviderRateLimitBucket)
+		klog.V(2).Infof("Azure VirtualMachineSizesClient (write ops) using rate limit config: QPS=%g, bucket=%d",
+			config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
+			config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
+	}
+
+	client := &Client{
+		armClient:         armClient,
+		rateLimiterReader: rateLimiterReader,
+		rateLimiterWriter: rateLimiterWriter,
+		subscriptionID:    config.SubscriptionID,
+		cloudName:         config.CloudName,
+	}
+
+	return client
+}
+
+// List gets compute.VirtualMachineSizeListResult.
+func (c *Client) List(ctx context.Context, location string) (compute.VirtualMachineSizeListResult, *retry.Error) {
+	mc := metrics.NewMetricContext("vmsizes", "list", "", c.subscriptionID, "")
+
+	// Report errors if the client is rate limited.
+	if !c.rateLimiterReader.TryAccept() {
+		mc.RateLimitedCount()
+		return compute.VirtualMachineSizeListResult{}, retry.GetRateLimitError(false, "VMSizesList")
+	}
+
+	// Report errors if the client is throttled.
+	if c.RetryAfterReader.After(time.Now()) {
+		mc.ThrottledCount()
+		rerr := retry.GetThrottlingError("VMSizesList", "client throttled", c.RetryAfterReader)
+		return compute.VirtualMachineSizeListResult{}, rerr
+	}
+
+	result, rerr := c.listVirtualMachineSizes(ctx, location)
+	mc.Observe(rerr)
+	if rerr != nil {
+		if rerr.IsThrottled() {
+			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
+			c.RetryAfterReader = rerr.RetryAfter
+		}
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// listVirtualMachineSizes gets compute.VirtualMachineSizeListResult.
+func (c *Client) listVirtualMachineSizes(ctx context.Context, location string) (compute.VirtualMachineSizeListResult, *retry.Error) {
+	resourceID := fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute/locations/%s/vmSizes",
+		autorest.Encode("path", c.subscriptionID),
+		autorest.Encode("path", location),
+	)
+
+	result := compute.VirtualMachineSizeListResult{}
+	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmsize.list.request", resourceID, rerr.Error())
+		return result, rerr
+	}
+
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmsize.list.respond", resourceID, err)
+		return result, retry.GetError(response, err)
+	}
+
+	result.Response = autorest.Response{Response: response}
+	return result, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..9d7ea23769108febdeabb487a0ae95c477ae50ef
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package vmsizeclient implements the client for VirtualMachineSizes.
+package vmsizeclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d4235db3af5f97b42202a37c97d248f0096e7ca
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient/interface.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vmsizeclient
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for compute.
+	APIVersion = "2020-12-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2017-12-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for VirtualMachineSizes.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	// List gets compute.VirtualMachineSizeListResult.
+	List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, rerr *retry.Error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/azure_zoneclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/azure_zoneclient.go
new file mode 100644
index 0000000000000000000000000000000000000000..0d43ddd619c53f24cf4e884a991e54dcf2bdaae5
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/azure_zoneclient.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package zoneclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	"k8s.io/klog/v2"
+
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var _ Interface = &Client{}
+
+type resourceTypeMetadata struct {
+	ResourceType string         `json:"resourceType"`
+	ZoneMappings []zoneMappings `json:"zoneMappings"`
+}
+
+type zoneMappings struct {
+	Location string   `json:"location"`
+	Zones    []string `json:"zones"`
+}
+
+type providerListDataProperty struct {
+	ID            string                 `json:"id"`
+	ResourceTypes []resourceTypeMetadata `json:"resourceTypes"`
+}
+
+type providerListData struct {
+	ProviderListDataProperties []providerListDataProperty `json:"value"`
+}
+
+// Client implements zone client Interface.
+type Client struct {
+	armClient      armclient.Interface
+	subscriptionID string
+	cloudName      string
+}
+
+// New creates a new zone client with ratelimiting.
+func New(config *azclients.ClientConfig) *Client {
+	baseURI := config.ResourceManagerEndpoint
+	authorizer := config.Authorizer
+	apiVersion := APIVersion
+	if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud {
+		apiVersion = AzureStackCloudAPIVersion
+	}
+
+	armClient := armclient.New(authorizer, *config, baseURI, apiVersion)
+	client := &Client{
+		armClient:      armClient,
+		subscriptionID: config.SubscriptionID,
+		cloudName:      config.CloudName,
+	}
+
+	return client
+}
+
+// GetZones gets the region-zone map for the subscription specified
+func (c *Client) GetZones(ctx context.Context, subscriptionID string) (map[string][]string, *retry.Error) {
+	result, rerr := c.getZones(ctx, subscriptionID)
+	if rerr != nil {
+
+		return result, rerr
+	}
+
+	return result, nil
+}
+
+// getZones gets the region-zone map for the subscription specified
+func (c *Client) getZones(ctx context.Context, subscriptionID string) (map[string][]string, *retry.Error) {
+	resourceID := armclient.GetProviderResourcesListID(subscriptionID)
+
+	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
+	defer c.armClient.CloseResponse(ctx, response)
+	if rerr != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "zone.get.request", resourceID, rerr.Error())
+		return nil, rerr
+	}
+
+	result := providerListData{}
+	err := autorest.Respond(
+		response,
+		azure.WithErrorUnlessStatusCode(http.StatusOK),
+		autorest.ByUnmarshallingJSON(&result))
+	if err != nil {
+		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "zone.get.respond", resourceID, err)
+		return nil, retry.GetError(response, err)
+	}
+
+	regionZoneMap := make(map[string][]string)
+	expectedID := fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Compute", subscriptionID)
+	if len(result.ProviderListDataProperties) != 0 {
+		for _, property := range result.ProviderListDataProperties {
+			if strings.EqualFold(property.ID, expectedID) {
+				for _, resourceType := range property.ResourceTypes {
+					if strings.EqualFold(resourceType.ResourceType, "virtualMachines") {
+						if len(resourceType.ZoneMappings) != 0 {
+							for _, zoneMapping := range resourceType.ZoneMappings {
+								location := strings.ToLower(strings.ReplaceAll(zoneMapping.Location, " ", ""))
+								regionZoneMap[location] = zoneMapping.Zones
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return regionZoneMap, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee10203647b7e1f26b129d3c503ffef60344a7f4
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package zoneclient implements the client for ARM.
+package zoneclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9f542afa8869ac52ef945f3f331252335f95a32
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient/interface.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package zoneclient
+
+import (
+	"context"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+const (
+	// APIVersion is the API version for provider list api.
+	APIVersion = "2020-06-01"
+	// AzureStackCloudAPIVersion is the API version for Azure Stack
+	AzureStackCloudAPIVersion = "2019-07-01"
+	// AzureStackCloudName is the cloud name of Azure Stack
+	AzureStackCloudName = "AZURESTACKCLOUD"
+)
+
+// Interface is the client interface for ARM.
+// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client.
+type Interface interface {
+	GetZones(ctx context.Context, subscriptionID string) (map[string][]string, *retry.Error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd936db482c654e4d40dac250f0f6518a2da751c
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go
@@ -0,0 +1,175 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"k8s.io/client-go/tools/cache"
+)
+
+// AzureCacheReadType defines the read type for cache data
+type AzureCacheReadType int
+
+const (
+	// CacheReadTypeDefault returns data from cache if cache entry not expired
+	// if cache entry expired, then it will refetch the data using getter
+	// save the entry in cache and then return
+	CacheReadTypeDefault AzureCacheReadType = iota
+	// CacheReadTypeUnsafe returns data from cache even if the cache entry is
+	// active/expired. If entry doesn't exist in cache, then data is fetched
+	// using getter, saved in cache and returned
+	CacheReadTypeUnsafe
+	// CacheReadTypeForceRefresh force refreshes the cache even if the cache entry
+	// is not expired
+	CacheReadTypeForceRefresh
+)
+
+// GetFunc defines a getter function for timedCache.
+type GetFunc func(key string) (interface{}, error)
+
+// AzureCacheEntry is the internal structure stores inside TTLStore.
+type AzureCacheEntry struct {
+	Key  string
+	Data interface{}
+
+	// The lock to ensure not updating same entry simultaneously.
+	Lock sync.Mutex
+	// time when entry was fetched and created
+	CreatedOn time.Time
+}
+
+// cacheKeyFunc defines the key function required in TTLStore.
+func cacheKeyFunc(obj interface{}) (string, error) {
+	return obj.(*AzureCacheEntry).Key, nil
+}
+
+// TimedCache is a cache with TTL.
+type TimedCache struct {
+	Store  cache.Store
+	Lock   sync.Mutex
+	Getter GetFunc
+	TTL    time.Duration
+}
+
+// NewTimedcache creates a new TimedCache.
+func NewTimedcache(ttl time.Duration, getter GetFunc) (*TimedCache, error) {
+	if getter == nil {
+		return nil, fmt.Errorf("getter is not provided")
+	}
+
+	return &TimedCache{
+		Getter: getter,
+		// switch to using NewStore instead of NewTTLStore so that we can
+		// reuse entries for calls that are fine with reading expired/stalled data.
+		// with NewTTLStore, entries are not returned if they have already expired.
+		Store: cache.NewStore(cacheKeyFunc),
+		TTL:   ttl,
+	}, nil
+}
+
+// getInternal returns AzureCacheEntry by key. If the key is not cached yet,
+// it returns a AzureCacheEntry with nil data.
+func (t *TimedCache) getInternal(key string) (*AzureCacheEntry, error) {
+	entry, exists, err := t.Store.GetByKey(key)
+	if err != nil {
+		return nil, err
+	}
+	// if entry exists, return the entry
+	if exists {
+		return entry.(*AzureCacheEntry), nil
+	}
+
+	// lock here to ensure if entry doesn't exist, we add a new entry
+	// avoiding overwrites
+	t.Lock.Lock()
+	defer t.Lock.Unlock()
+
+	// Another goroutine might have written the same key.
+	entry, exists, err = t.Store.GetByKey(key)
+	if err != nil {
+		return nil, err
+	}
+	if exists {
+		return entry.(*AzureCacheEntry), nil
+	}
+
+	// Still not found, add new entry with nil data.
+	// Note the data will be filled later by getter.
+	newEntry := &AzureCacheEntry{
+		Key:  key,
+		Data: nil,
+	}
+	_ = t.Store.Add(newEntry)
+	return newEntry, nil
+}
+
+// Get returns the requested item by key.
+func (t *TimedCache) Get(key string, crt AzureCacheReadType) (interface{}, error) {
+	entry, err := t.getInternal(key)
+	if err != nil {
+		return nil, err
+	}
+
+	entry.Lock.Lock()
+	defer entry.Lock.Unlock()
+
+	// entry exists and if cache is not force refreshed
+	if entry.Data != nil && crt != CacheReadTypeForceRefresh {
+		// allow unsafe read, so return data even if expired
+		if crt == CacheReadTypeUnsafe {
+			return entry.Data, nil
+		}
+		// if cached data is not expired, return cached data
+		if crt == CacheReadTypeDefault && time.Since(entry.CreatedOn) < t.TTL {
+			return entry.Data, nil
+		}
+	}
+	// Data is not cached yet, cache data is expired or requested force refresh
+	// cache it by getter. entry is locked before getting to ensure concurrent
+	// gets don't result in multiple ARM calls.
+	data, err := t.Getter(key)
+	if err != nil {
+		return nil, err
+	}
+
+	// set the data in cache and also set the last update time
+	// to now as the data was recently fetched
+	entry.Data = data
+	entry.CreatedOn = time.Now().UTC()
+
+	return entry.Data, nil
+}
+
+// Delete removes an item from the cache.
+func (t *TimedCache) Delete(key string) error {
+	return t.Store.Delete(&AzureCacheEntry{
+		Key: key,
+	})
+}
+
+// Set sets the data cache for the key.
+// It is only used for testing.
+func (t *TimedCache) Set(key string, data interface{}) {
+	_ = t.Store.Add(&AzureCacheEntry{
+		Key:       key,
+		Data:      data,
+		CreatedOn: time.Now().UTC(),
+	})
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4972a75c57edefdba38d4324d28bc51ae279362
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cache is an implementation of Azure caches.
+package cache // import "sigs.k8s.io/cloud-provider-azure/pkg/cache"
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
new file mode 100644
index 0000000000000000000000000000000000000000..45fa3984fe027d1c5779ad602543d13b2dbbd0e1
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
@@ -0,0 +1,1222 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/adal"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/wait"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/client-go/informers"
+	clientset "k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/kubernetes/scheme"
+	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
+	"k8s.io/client-go/tools/cache"
+	"k8s.io/client-go/tools/record"
+	"k8s.io/client-go/util/flowcontrol"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/auth"
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient"
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+
+	// ensure the newly added package from azure-sdk-for-go is in vendor/
+	_ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/containerserviceclient"
+	// ensure the newly added package from azure-sdk-for-go is in vendor/
+	_ "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient"
+
+	"sigs.k8s.io/yaml"
+)
+
+var (
+	// Master nodes are not added to standard load balancer by default.
+	defaultExcludeMasterFromStandardLB = true
+	// Outbound SNAT is enabled by default.
+	defaultDisableOutboundSNAT = false
+	// RouteUpdateWaitingInSeconds is 30 seconds by default.
+	defaultRouteUpdateWaitingInSeconds = 30
+)
+
+// Config holds the configuration parsed from the --cloud-config flag
+// All fields are required unless otherwise specified
+// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as
+// flags or CLIs. Config fields should not change behavior in incompatible ways and
+// should be deprecated for at least 2 release prior to removing.
+// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli
+// for more details.
+type Config struct {
+	auth.AzureAuthConfig
+	CloudProviderRateLimitConfig
+
+	// The cloud configure type for Azure cloud provider. Supported values are file, secret and merge.
+	CloudConfigType cloudConfigType `json:"cloudConfigType,omitempty" yaml:"cloudConfigType,omitempty"`
+
+	// The name of the resource group that the cluster is deployed in
+	ResourceGroup string `json:"resourceGroup,omitempty" yaml:"resourceGroup,omitempty"`
+	// The location of the resource group that the cluster is deployed in
+	Location string `json:"location,omitempty" yaml:"location,omitempty"`
+	// The name of site where the cluster will be deployed to that is more granular than the region specified by the "location" field.
+	// Currently only public ip, load balancer and managed disks support this.
+	ExtendedLocationName string `json:"extendedLocationName,omitempty" yaml:"extendedLocationName,omitempty"`
+	// The type of site that is being targeted.
+	// Currently only public ip, load balancer and managed disks support this.
+	ExtendedLocationType string `json:"extendedLocationType,omitempty" yaml:"extendedLocationType,omitempty"`
+	// The name of the VNet that the cluster is deployed in
+	VnetName string `json:"vnetName,omitempty" yaml:"vnetName,omitempty"`
+	// The name of the resource group that the Vnet is deployed in
+	VnetResourceGroup string `json:"vnetResourceGroup,omitempty" yaml:"vnetResourceGroup,omitempty"`
+	// The name of the subnet that the cluster is deployed in
+	SubnetName string `json:"subnetName,omitempty" yaml:"subnetName,omitempty"`
+	// The name of the security group attached to the cluster's subnet
+	SecurityGroupName string `json:"securityGroupName,omitempty" yaml:"securityGroupName,omitempty"`
+	// The name of the resource group that the security group is deployed in
+	SecurityGroupResourceGroup string `json:"securityGroupResourceGroup,omitempty" yaml:"securityGroupResourceGroup,omitempty"`
+	// (Optional in 1.6) The name of the route table attached to the subnet that the cluster is deployed in
+	RouteTableName string `json:"routeTableName,omitempty" yaml:"routeTableName,omitempty"`
+	// The name of the resource group that the RouteTable is deployed in
+	RouteTableResourceGroup string `json:"routeTableResourceGroup,omitempty" yaml:"routeTableResourceGroup,omitempty"`
+	// (Optional) The name of the availability set that should be used as the load balancer backend
+	// If this is set, the Azure cloudprovider will only add nodes from that availability set to the load
+	// balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then
+	// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
+	// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
+	PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName,omitempty" yaml:"primaryAvailabilitySetName,omitempty"`
+	// The type of azure nodes. Candidate values are: vmss and standard.
+	// If not set, it will be default to standard.
+	VMType string `json:"vmType,omitempty" yaml:"vmType,omitempty"`
+	// The name of the scale set that should be used as the load balancer backend.
+	// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
+	// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
+	// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
+	// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
+	PrimaryScaleSetName string `json:"primaryScaleSetName,omitempty" yaml:"primaryScaleSetName,omitempty"`
+	// Tags determines what tags shall be applied to the shared resources managed by controller manager, which
+	// includes load balancer, security group and route table. The supported format is `a=b,c=d,...`. After updated
+	// this config, the old tags would be replaced by the new ones.
+	// Because special characters are not supported in "tags" configuration, "tags" support would be removed in a future release,
+	// please consider migrating the config to "tagsMap".
+	Tags string `json:"tags,omitempty" yaml:"tags,omitempty"`
+	// TagsMap is similar to Tags but holds tags with special characters such as `=` and `,`.
+	TagsMap map[string]string `json:"tagsMap,omitempty" yaml:"tagsMap,omitempty"`
+	// SystemTags determines the tag keys managed by cloud provider. If it is not set, no tags would be deleted if
+	// the `Tags` is changed. However, the old tags would be deleted if they are neither included in `Tags` nor
+	// in `SystemTags` after the update of `Tags`.
+	SystemTags string `json:"systemTags,omitempty" yaml:"systemTags,omitempty"`
+	// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
+	// If not set, it will be default to basic.
+	LoadBalancerSku string `json:"loadBalancerSku,omitempty" yaml:"loadBalancerSku,omitempty"`
+	// LoadBalancerName determines the specific name of the load balancer user want to use, working with
+	// LoadBalancerResourceGroup
+	LoadBalancerName string `json:"loadBalancerName,omitempty" yaml:"loadBalancerName,omitempty"`
+	// LoadBalancerResourceGroup determines the specific resource group of the load balancer user want to use, working
+	// with LoadBalancerName
+	LoadBalancerResourceGroup string `json:"loadBalancerResourceGroup,omitempty" yaml:"loadBalancerResourceGroup,omitempty"`
+	// PreConfiguredBackendPoolLoadBalancerTypes determines whether the LoadBalancer BackendPool has been preconfigured.
+	// Candidate values are:
+	//   "": exactly with today (not pre-configured for any LBs)
+	//   "internal": for internal LoadBalancer
+	//   "external": for external LoadBalancer
+	//   "all": for both internal and external LoadBalancer
+	PreConfiguredBackendPoolLoadBalancerTypes string `json:"preConfiguredBackendPoolLoadBalancerTypes,omitempty" yaml:"preConfiguredBackendPoolLoadBalancerTypes,omitempty"`
+
+	// DisableAvailabilitySetNodes disables VMAS nodes support when "VMType" is set to "vmss".
+	DisableAvailabilitySetNodes bool `json:"disableAvailabilitySetNodes,omitempty" yaml:"disableAvailabilitySetNodes,omitempty"`
+	// DisableAzureStackCloud disables AzureStackCloud support. It should be used
+	// when setting AzureAuthConfig.Cloud with "AZURESTACKCLOUD" to customize ARM endpoints
+	// while the cluster is not running on AzureStack.
+	DisableAzureStackCloud bool `json:"disableAzureStackCloud,omitempty" yaml:"disableAzureStackCloud,omitempty"`
+	// Enable exponential backoff to manage resource request retries
+	CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty" yaml:"cloudProviderBackoff,omitempty"`
+	// Use instance metadata service where possible
+	UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty" yaml:"useInstanceMetadata,omitempty"`
+
+	// EnableMultipleStandardLoadBalancers determines the behavior of the standard load balancer. If set to true
+	// there would be one standard load balancer per VMAS or VMSS, which is similar with the behavior of the basic
+	// load balancer. Users could select the specific standard load balancer for their service by the service
+	// annotation `service.beta.kubernetes.io/azure-load-balancer-mode`, If set to false, the same standard load balancer
+	// would be shared by all services in the cluster. In this case, the mode selection annotation would be ignored.
+	EnableMultipleStandardLoadBalancers bool `json:"enableMultipleStandardLoadBalancers,omitempty" yaml:"enableMultipleStandardLoadBalancers,omitempty"`
+	// NodePoolsWithoutDedicatedSLB stores the VMAS/VMSS names that share the primary standard load balancer instead
+	// of having a dedicated one. This is useful only when EnableMultipleStandardLoadBalancers is set to true.
+	NodePoolsWithoutDedicatedSLB string `json:"nodePoolsWithoutDedicatedSLB,omitempty" yaml:"nodePoolsWithoutDedicatedSLB,omitempty"`
+
+	// Backoff exponent
+	CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent,omitempty" yaml:"cloudProviderBackoffExponent,omitempty"`
+	// Backoff jitter
+	CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter,omitempty" yaml:"cloudProviderBackoffJitter,omitempty"`
+
+	// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
+	// If not set, it will be default to true.
+	ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB,omitempty" yaml:"excludeMasterFromStandardLB,omitempty"`
+	// DisableOutboundSNAT disables the outbound SNAT for public load balancer rules.
+	// It should only be set when loadBalancerSku is standard. If not set, it will be default to false.
+	DisableOutboundSNAT *bool `json:"disableOutboundSNAT,omitempty" yaml:"disableOutboundSNAT,omitempty"`
+
+	// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
+	MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount,omitempty" yaml:"maximumLoadBalancerRuleCount,omitempty"`
+	// Backoff retry limit
+	CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty" yaml:"cloudProviderBackoffRetries,omitempty"`
+	// Backoff duration
+	CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty" yaml:"cloudProviderBackoffDuration,omitempty"`
+	// AvailabilitySetNodesCacheTTLInSeconds sets the Cache TTL for availabilitySetNodesCache
+	// if not set, will use default value
+	AvailabilitySetNodesCacheTTLInSeconds int `json:"availabilitySetNodesCacheTTLInSeconds,omitempty" yaml:"availabilitySetNodesCacheTTLInSeconds,omitempty"`
+	// VmssCacheTTLInSeconds sets the cache TTL for VMSS
+	VmssCacheTTLInSeconds int `json:"vmssCacheTTLInSeconds,omitempty" yaml:"vmssCacheTTLInSeconds,omitempty"`
+	// VmssVirtualMachinesCacheTTLInSeconds sets the cache TTL for vmssVirtualMachines
+	VmssVirtualMachinesCacheTTLInSeconds int `json:"vmssVirtualMachinesCacheTTLInSeconds,omitempty" yaml:"vmssVirtualMachinesCacheTTLInSeconds,omitempty"`
+	// VmCacheTTLInSeconds sets the cache TTL for vm
+	VMCacheTTLInSeconds int `json:"vmCacheTTLInSeconds,omitempty" yaml:"vmCacheTTLInSeconds,omitempty"`
+	// LoadBalancerCacheTTLInSeconds sets the cache TTL for load balancer
+	LoadBalancerCacheTTLInSeconds int `json:"loadBalancerCacheTTLInSeconds,omitempty" yaml:"loadBalancerCacheTTLInSeconds,omitempty"`
+	// NsgCacheTTLInSeconds sets the cache TTL for network security group
+	NsgCacheTTLInSeconds int `json:"nsgCacheTTLInSeconds,omitempty" yaml:"nsgCacheTTLInSeconds,omitempty"`
+	// RouteTableCacheTTLInSeconds sets the cache TTL for route table
+	RouteTableCacheTTLInSeconds int `json:"routeTableCacheTTLInSeconds,omitempty" yaml:"routeTableCacheTTLInSeconds,omitempty"`
+	// AvailabilitySetsCacheTTLInSeconds sets the cache TTL for VMAS
+	AvailabilitySetsCacheTTLInSeconds int `json:"availabilitySetsCacheTTLInSeconds,omitempty" yaml:"availabilitySetsCacheTTLInSeconds,omitempty"`
+	// RouteUpdateWaitingInSeconds is the delay time for waiting route updates to take effect. This waiting delay is added
+	// because the routes are not taken effect when the async route updating operation returns success. Default is 30 seconds.
+	RouteUpdateWaitingInSeconds int `json:"routeUpdateWaitingInSeconds,omitempty" yaml:"routeUpdateWaitingInSeconds,omitempty"`
+	// The user agent for Azure customer usage attribution
+	UserAgent string `json:"userAgent,omitempty" yaml:"userAgent,omitempty"`
+	// LoadBalancerBackendPoolConfigurationType defines how vms join the load balancer backend pools. Supported values
+	// are `nodeIPConfiguration`, `nodeIP` and `podIP`.
+	// `nodeIPConfiguration`: vm network interfaces will be attached to the inbound backend pool of the load balancer (default);
+	// `nodeIP`: vm private IPs will be attached to the inbound backend pool of the load balancer;
+	// `podIP`: pod IPs will be attached to the inbound backend pool of the load balancer (not supported yet).
+	LoadBalancerBackendPoolConfigurationType string `json:"loadBalancerBackendPoolConfigurationType,omitempty" yaml:"loadBalancerBackendPoolConfigurationType,omitempty"`
+	// PutVMSSVMBatchSize defines how many requests the client send concurrently when putting the VMSS VMs.
+	// If it is smaller than or equal to zero, the request will be sent one by one in sequence (default).
+	PutVMSSVMBatchSize int `json:"putVMSSVMBatchSize" yaml:"putVMSSVMBatchSize"`
+}
+
+type InitSecretConfig struct {
+	SecretName      string `json:"secretName,omitempty" yaml:"secretName,omitempty"`
+	SecretNamespace string `json:"secretNamespace,omitempty" yaml:"secretNamespace,omitempty"`
+	CloudConfigKey  string `json:"cloudConfigKey,omitempty" yaml:"cloudConfigKey,omitempty"`
+}
+
+// HasExtendedLocation returns true if extendedlocation prop are specified.
+func (config *Config) HasExtendedLocation() bool {
+	return config.ExtendedLocationName != "" && config.ExtendedLocationType != ""
+}
+
+var (
+	_ cloudprovider.Interface    = (*Cloud)(nil)
+	_ cloudprovider.Instances    = (*Cloud)(nil)
+	_ cloudprovider.LoadBalancer = (*Cloud)(nil)
+	_ cloudprovider.Routes       = (*Cloud)(nil)
+	_ cloudprovider.Zones        = (*Cloud)(nil)
+	_ cloudprovider.PVLabeler    = (*Cloud)(nil)
+)
+
+// Cloud holds the config and clients
+type Cloud struct {
+	Config
+	InitSecretConfig
+	Environment azure.Environment
+
+	RoutesClient                    routeclient.Interface
+	SubnetsClient                   subnetclient.Interface
+	InterfacesClient                interfaceclient.Interface
+	RouteTablesClient               routetableclient.Interface
+	LoadBalancerClient              loadbalancerclient.Interface
+	PublicIPAddressesClient         publicipclient.Interface
+	SecurityGroupsClient            securitygroupclient.Interface
+	VirtualMachinesClient           vmclient.Interface
+	StorageAccountClient            storageaccountclient.Interface
+	DisksClient                     diskclient.Interface
+	SnapshotsClient                 snapshotclient.Interface
+	FileClient                      fileclient.Interface
+	VirtualMachineScaleSetsClient   vmssclient.Interface
+	VirtualMachineScaleSetVMsClient vmssvmclient.Interface
+	VirtualMachineSizesClient       vmsizeclient.Interface
+	AvailabilitySetsClient          vmasclient.Interface
+	ZoneClient                      zoneclient.Interface
+	privateendpointclient           privateendpointclient.Interface
+	privatednsclient                privatednsclient.Interface
+	privatednszonegroupclient       privatednszonegroupclient.Interface
+	virtualNetworkLinksClient       virtualnetworklinksclient.Interface
+
+	ResourceRequestBackoff  wait.Backoff
+	Metadata                *InstanceMetadataService
+	VMSet                   VMSet
+	LoadBalancerBackendPool BackendPool
+
+	// ipv6DualStack allows overriding for unit testing.  It's normally initialized from featuregates
+	ipv6DualStackEnabled bool
+	// isSHaredLoadBalancerSynced indicates if the reconcileSharedLoadBalancer has been run
+	isSharedLoadBalancerSynced bool
+	// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
+	nodeCachesLock sync.RWMutex
+	// nodeNames holds current nodes for tracking added nodes in VM caches.
+	nodeNames sets.String
+	// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
+	// it is updated by the nodeInformer
+	nodeZones map[string]sets.String
+	// nodeResourceGroups holds nodes external resource groups
+	nodeResourceGroups map[string]string
+	// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
+	unmanagedNodes sets.String
+	// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
+	excludeLoadBalancerNodes sets.String
+	nodePrivateIPs           map[string]sets.String
+	// nodeInformerSynced is for determining if the informer has synced.
+	nodeInformerSynced cache.InformerSynced
+
+	// routeCIDRsLock holds lock for routeCIDRs cache.
+	routeCIDRsLock sync.Mutex
+	// routeCIDRs holds cache for route CIDRs.
+	routeCIDRs map[string]string
+
+	// regionZonesMap stores all available zones for the subscription by region
+	regionZonesMap   map[string][]string
+	refreshZonesLock sync.RWMutex
+
+	KubeClient       clientset.Interface
+	eventBroadcaster record.EventBroadcaster
+	eventRecorder    record.EventRecorder
+	routeUpdater     *delayedRouteUpdater
+
+	vmCache  *azcache.TimedCache
+	lbCache  *azcache.TimedCache
+	nsgCache *azcache.TimedCache
+	rtCache  *azcache.TimedCache
+
+	*ManagedDiskController
+	*controllerCommon
+}
+
+func init() {
+	// In go-autorest SDK https://github.com/Azure/go-autorest/blob/master/autorest/sender.go#L258-L287,
+	// if ARM returns http.StatusTooManyRequests, the sender doesn't increase the retry attempt count,
+	// hence the Azure clients will keep retrying forever until it get a status code other than 429.
+	// So we explicitly removes http.StatusTooManyRequests from autorest.StatusCodesForRetry.
+	// Refer https://github.com/Azure/go-autorest/issues/398.
+	// TODO(feiskyer): Use autorest.SendDecorator to customize the retry policy when new Azure SDK is available.
+	statusCodesForRetry := make([]int, 0)
+	for _, code := range autorest.StatusCodesForRetry {
+		if code != http.StatusTooManyRequests {
+			statusCodesForRetry = append(statusCodesForRetry, code)
+		}
+	}
+	autorest.StatusCodesForRetry = statusCodesForRetry
+}
+
+// NewCloud returns a Cloud with initialized clients
+func NewCloud(configReader io.Reader, callFromCCM bool) (cloudprovider.Interface, error) {
+	az, err := NewCloudWithoutFeatureGates(configReader, callFromCCM)
+	if err != nil {
+		return nil, err
+	}
+	az.ipv6DualStackEnabled = utilfeature.DefaultFeatureGate.Enabled(consts.IPv6DualStack)
+
+	return az, nil
+}
+
+func NewCloudFromConfigFile(configFilePath string, calFromCCM bool) (cloudprovider.Interface, error) {
+	var (
+		cloud cloudprovider.Interface
+		err   error
+	)
+
+	if configFilePath != "" {
+		var config *os.File
+		config, err = os.Open(configFilePath)
+		if err != nil {
+			klog.Fatalf("Couldn't open cloud provider configuration %s: %#v",
+				configFilePath, err)
+		}
+
+		defer config.Close()
+		cloud, err = NewCloud(config, calFromCCM)
+	} else {
+		// Pass explicit nil so plugins can actually check for nil. See
+		// "Why is my nil error value not equal to nil?" in golang.org/doc/faq.
+		cloud, err = NewCloud(nil, false)
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("could not init cloud provider azure: %v", err)
+	}
+	if cloud == nil {
+		return nil, fmt.Errorf("nil cloud")
+	}
+
+	return cloud, nil
+}
+
+func (az *Cloud) configSecretMetadata(secretName, secretNamespace, cloudConfigKey string) {
+	if secretName == "" {
+		secretName = consts.DefaultCloudProviderConfigSecName
+	}
+	if secretNamespace == "" {
+		secretNamespace = consts.DefaultCloudProviderConfigSecNamespace
+	}
+	if cloudConfigKey == "" {
+		cloudConfigKey = consts.DefaultCloudProviderConfigSecKey
+	}
+
+	az.InitSecretConfig = InitSecretConfig{
+		SecretName:      secretName,
+		SecretNamespace: secretNamespace,
+		CloudConfigKey:  cloudConfigKey,
+	}
+}
+
+func NewCloudFromSecret(clientBuilder cloudprovider.ControllerClientBuilder, secretName, secretNamespace, cloudConfigKey string) (cloudprovider.Interface, error) {
+	az := &Cloud{
+		nodeNames:                sets.NewString(),
+		nodeZones:                map[string]sets.String{},
+		nodeResourceGroups:       map[string]string{},
+		unmanagedNodes:           sets.NewString(),
+		routeCIDRs:               map[string]string{},
+		excludeLoadBalancerNodes: sets.NewString(),
+		nodePrivateIPs:           map[string]sets.String{},
+	}
+
+	az.configSecretMetadata(secretName, secretNamespace, cloudConfigKey)
+
+	az.Initialize(clientBuilder, wait.NeverStop)
+
+	err := az.InitializeCloudFromSecret()
+	if err != nil {
+		return nil, fmt.Errorf("NewCloudFromSecret: failed to initialize cloud from secret %s/%s: %v", az.SecretNamespace, az.SecretName, err)
+	}
+
+	az.ipv6DualStackEnabled = utilfeature.DefaultFeatureGate.Enabled(consts.IPv6DualStack)
+
+	return az, nil
+}
+
+// NewCloudWithoutFeatureGates returns a Cloud without trying to wire the feature gates.  This is used by the unit tests
+// that don't load the actual features being used in the cluster.
+func NewCloudWithoutFeatureGates(configReader io.Reader, callFromCCM bool) (*Cloud, error) {
+	config, err := ParseConfig(configReader)
+	if err != nil {
+		return nil, err
+	}
+
+	az := &Cloud{
+		nodeNames:                sets.NewString(),
+		nodeZones:                map[string]sets.String{},
+		nodeResourceGroups:       map[string]string{},
+		unmanagedNodes:           sets.NewString(),
+		routeCIDRs:               map[string]string{},
+		excludeLoadBalancerNodes: sets.NewString(),
+		nodePrivateIPs:           map[string]sets.String{},
+	}
+
+	err = az.InitializeCloudFromConfig(config, false, callFromCCM)
+	if err != nil {
+		return nil, err
+	}
+
+	return az, nil
+}
+
+// InitializeCloudFromConfig initializes the Cloud from config.
+func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromCCM bool) error {
+	if config == nil {
+		// should not reach here
+		return fmt.Errorf("InitializeCloudFromConfig: cannot initialize from nil config")
+	}
+
+	if config.RouteTableResourceGroup == "" {
+		config.RouteTableResourceGroup = config.ResourceGroup
+	}
+
+	if config.SecurityGroupResourceGroup == "" {
+		config.SecurityGroupResourceGroup = config.ResourceGroup
+	}
+
+	if config.VMType == "" {
+		// default to standard vmType if not set.
+		config.VMType = consts.VMTypeStandard
+	}
+
+	if config.RouteUpdateWaitingInSeconds <= 0 {
+		config.RouteUpdateWaitingInSeconds = defaultRouteUpdateWaitingInSeconds
+	}
+
+	if config.DisableAvailabilitySetNodes && config.VMType != consts.VMTypeVMSS {
+		return fmt.Errorf("disableAvailabilitySetNodes %v is only supported when vmType is 'vmss'", config.DisableAvailabilitySetNodes)
+	}
+
+	if config.CloudConfigType == "" {
+		// The default cloud config type is cloudConfigTypeMerge.
+		config.CloudConfigType = cloudConfigTypeMerge
+	} else {
+		supportedCloudConfigTypes := sets.NewString(
+			string(cloudConfigTypeMerge),
+			string(cloudConfigTypeFile),
+			string(cloudConfigTypeSecret))
+		if !supportedCloudConfigTypes.Has(string(config.CloudConfigType)) {
+			return fmt.Errorf("cloudConfigType %v is not supported, supported values are %v", config.CloudConfigType, supportedCloudConfigTypes.List())
+		}
+	}
+
+	if config.LoadBalancerBackendPoolConfigurationType == "" ||
+		// TODO(nilo19): support pod IP mode in the future
+		strings.EqualFold(config.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypePODIP) {
+		config.LoadBalancerBackendPoolConfigurationType = consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration
+	} else {
+		supportedLoadBalancerBackendPoolConfigurationTypes := sets.NewString(
+			strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration),
+			strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIP),
+			strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypePODIP))
+		if !supportedLoadBalancerBackendPoolConfigurationTypes.Has(strings.ToLower(config.LoadBalancerBackendPoolConfigurationType)) {
+			return fmt.Errorf("loadBalancerBackendPoolConfigurationType %s is not supported, supported values are %v", config.LoadBalancerBackendPoolConfigurationType, supportedLoadBalancerBackendPoolConfigurationTypes.List())
+		}
+	}
+
+	env, err := auth.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
+	if err != nil {
+		return err
+	}
+
+	servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env, env.ServiceManagementEndpoint)
+	if errors.Is(err, auth.ErrorNoAuth) {
+		// Only controller-manager would lazy-initialize from secret, and credentials are required for such case.
+		if fromSecret {
+			err := fmt.Errorf("no credentials provided for Azure cloud provider")
+			klog.Fatal(err)
+			return err
+		}
+
+		// No credentials provided, useInstanceMetadata should be enabled for Kubelet.
+		// TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're
+		// requiring different credential settings.
+		if !config.UseInstanceMetadata && config.CloudConfigType == cloudConfigTypeFile {
+			return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials")
+		}
+
+		klog.V(2).Infof("Azure cloud provider is starting without credentials")
+	} else if err != nil {
+		return err
+	}
+
+	// Initialize rate limiting config options.
+	InitializeCloudProviderRateLimitConfig(&config.CloudProviderRateLimitConfig)
+
+	resourceRequestBackoff := az.setCloudProviderBackoffDefaults(config)
+
+	err = az.setLBDefaults(config)
+	if err != nil {
+		return err
+	}
+
+	az.Config = *config
+	az.Environment = *env
+	az.ResourceRequestBackoff = resourceRequestBackoff
+	az.Metadata, err = NewInstanceMetadataService(consts.ImdsServer)
+	if err != nil {
+		return err
+	}
+
+	// No credentials provided, InstanceMetadataService would be used for getting Azure resources.
+	// Note that this only applies to Kubelet, controller-manager should configure credentials for managing Azure resources.
+	if servicePrincipalToken == nil {
+		return nil
+	}
+
+	// If uses network resources in different AAD Tenant, then prepare corresponding Service Principal Token for VM/VMSS client and network resources client
+	err = az.configureMultiTenantClients(servicePrincipalToken)
+	if err != nil {
+		return err
+	}
+
+	if az.MaximumLoadBalancerRuleCount == 0 {
+		az.MaximumLoadBalancerRuleCount = consts.MaximumLoadBalancerRuleCount
+	}
+
+	if strings.EqualFold(consts.VMTypeVMSS, az.Config.VMType) {
+		az.VMSet, err = newScaleSet(az)
+		if err != nil {
+			return err
+		}
+	} else {
+		az.VMSet, err = newAvailabilitySet(az)
+		if err != nil {
+			return err
+		}
+	}
+
+	if az.isLBBackendPoolTypeNodeIPConfig() {
+		az.LoadBalancerBackendPool = newBackendPoolTypeNodeIPConfig(az)
+	} else if az.isLBBackendPoolTypeNodeIP() {
+		az.LoadBalancerBackendPool = newBackendPoolTypeNodeIP(az)
+	}
+
+	err = az.initCaches()
+	if err != nil {
+		return err
+	}
+
+	if err := initDiskControllers(az); err != nil {
+		return err
+	}
+
+	// updating routes and syncing zones only in CCM
+	if callFromCCM {
+		// start delayed route updater.
+		az.routeUpdater = newDelayedRouteUpdater(az, routeUpdateInterval)
+		go az.routeUpdater.run()
+
+		// Azure Stack does not support zone at the moment
+		// https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-network-differences?view=azs-2102
+		if !az.isStackCloud() {
+			// wait for the success first time of syncing zones
+			err = az.syncRegionZonesMap()
+			if err != nil {
+				klog.Errorf("InitializeCloudFromConfig: failed to sync regional zones map for the first time: %s", err.Error())
+				return err
+			}
+
+			go az.refreshZones(az.syncRegionZonesMap)
+		}
+	}
+
+	return nil
+}
+
+func (az *Cloud) isLBBackendPoolTypeNodeIPConfig() bool {
+	return strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration)
+}
+
+func (az *Cloud) isLBBackendPoolTypeNodeIP() bool {
+	return strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIP)
+}
+
+func (az *Cloud) getPutVMSSVMBatchSize() int {
+	return az.PutVMSSVMBatchSize
+}
+
+func (az *Cloud) initCaches() (err error) {
+	az.vmCache, err = az.newVMCache()
+	if err != nil {
+		return err
+	}
+
+	az.lbCache, err = az.newLBCache()
+	if err != nil {
+		return err
+	}
+
+	az.nsgCache, err = az.newNSGCache()
+	if err != nil {
+		return err
+	}
+
+	az.rtCache, err = az.newRouteTableCache()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (az *Cloud) setLBDefaults(config *Config) error {
+	if strings.EqualFold(config.LoadBalancerSku, consts.LoadBalancerSkuStandard) {
+		// Do not add master nodes to standard LB by default.
+		if config.ExcludeMasterFromStandardLB == nil {
+			config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
+		}
+
+		// Enable outbound SNAT by default.
+		if config.DisableOutboundSNAT == nil {
+			config.DisableOutboundSNAT = &defaultDisableOutboundSNAT
+		}
+	} else {
+		if config.DisableOutboundSNAT != nil && *config.DisableOutboundSNAT {
+			return fmt.Errorf("disableOutboundSNAT should only set when loadBalancerSku is standard")
+		}
+	}
+	return nil
+}
+
+func (az *Cloud) configureMultiTenantClients(servicePrincipalToken *adal.ServicePrincipalToken) error {
+	var err error
+	var multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken
+	var networkResourceServicePrincipalToken *adal.ServicePrincipalToken
+	if az.Config.UsesNetworkResourceInDifferentTenantOrSubscription() {
+		multiTenantServicePrincipalToken, err = auth.GetMultiTenantServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
+		if err != nil {
+			return err
+		}
+		networkResourceServicePrincipalToken, err = auth.GetNetworkResourceServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
+		if err != nil {
+			return err
+		}
+	}
+
+	az.configAzureClients(servicePrincipalToken, multiTenantServicePrincipalToken, networkResourceServicePrincipalToken)
+	return nil
+}
+
+func (az *Cloud) setCloudProviderBackoffDefaults(config *Config) wait.Backoff {
+	// Conditionally configure resource request backoff
+	resourceRequestBackoff := wait.Backoff{
+		Steps: 1,
+	}
+	if config.CloudProviderBackoff {
+		// Assign backoff defaults if no configuration was passed in
+		if config.CloudProviderBackoffRetries == 0 {
+			config.CloudProviderBackoffRetries = consts.BackoffRetriesDefault
+		}
+		if config.CloudProviderBackoffDuration == 0 {
+			config.CloudProviderBackoffDuration = consts.BackoffDurationDefault
+		}
+		if config.CloudProviderBackoffExponent == 0 {
+			config.CloudProviderBackoffExponent = consts.BackoffExponentDefault
+		}
+
+		if config.CloudProviderBackoffJitter == 0 {
+			config.CloudProviderBackoffJitter = consts.BackoffJitterDefault
+		}
+
+		resourceRequestBackoff = wait.Backoff{
+			Steps:    config.CloudProviderBackoffRetries,
+			Factor:   config.CloudProviderBackoffExponent,
+			Duration: time.Duration(config.CloudProviderBackoffDuration) * time.Second,
+			Jitter:   config.CloudProviderBackoffJitter,
+		}
+		klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
+			config.CloudProviderBackoffRetries,
+			config.CloudProviderBackoffExponent,
+			config.CloudProviderBackoffDuration,
+			config.CloudProviderBackoffJitter)
+	} else {
+		// CloudProviderBackoffRetries will be set to 1 by default as the requirements of Azure SDK.
+		config.CloudProviderBackoffRetries = 1
+		config.CloudProviderBackoffDuration = consts.BackoffDurationDefault
+	}
+	return resourceRequestBackoff
+}
+
+func (az *Cloud) configAzureClients(
+	servicePrincipalToken *adal.ServicePrincipalToken,
+	multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken,
+	networkResourceServicePrincipalToken *adal.ServicePrincipalToken) {
+	azClientConfig := az.getAzureClientConfig(servicePrincipalToken)
+
+	// Prepare AzureClientConfig for all azure clients
+	interfaceClientConfig := azClientConfig.WithRateLimiter(az.Config.InterfaceRateLimit)
+	vmSizeClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineSizeRateLimit)
+	snapshotClientConfig := azClientConfig.WithRateLimiter(az.Config.SnapshotRateLimit)
+	storageAccountClientConfig := azClientConfig.WithRateLimiter(az.Config.StorageAccountRateLimit)
+	diskClientConfig := azClientConfig.WithRateLimiter(az.Config.DiskRateLimit)
+	vmClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineRateLimit)
+	vmssClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineScaleSetRateLimit)
+	// Error "not an active Virtual Machine Scale Set VM" is not retriable for VMSS VM.
+	// But http.StatusNotFound is retriable because of ARM replication latency.
+	vmssVMClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineScaleSetRateLimit)
+	vmssVMClientConfig.Backoff = vmssVMClientConfig.Backoff.WithNonRetriableErrors([]string{consts.VmssVMNotActiveErrorMessage}).WithRetriableHTTPStatusCodes([]int{http.StatusNotFound})
+	routeClientConfig := azClientConfig.WithRateLimiter(az.Config.RouteRateLimit)
+	subnetClientConfig := azClientConfig.WithRateLimiter(az.Config.SubnetsRateLimit)
+	routeTableClientConfig := azClientConfig.WithRateLimiter(az.Config.RouteTableRateLimit)
+	loadBalancerClientConfig := azClientConfig.WithRateLimiter(az.Config.LoadBalancerRateLimit)
+	securityGroupClientConfig := azClientConfig.WithRateLimiter(az.Config.SecurityGroupRateLimit)
+	publicIPClientConfig := azClientConfig.WithRateLimiter(az.Config.PublicIPAddressRateLimit)
+	// TODO(ZeroMagic): add azurefileRateLimit
+	fileClientConfig := azClientConfig.WithRateLimiter(nil)
+	vmasClientConfig := azClientConfig.WithRateLimiter(az.Config.AvailabilitySetRateLimit)
+	zoneClientConfig := azClientConfig.WithRateLimiter(nil)
+
+	// If uses network resources in different AAD Tenant, update Authorizer for VM/VMSS/VMAS client config
+	if multiTenantServicePrincipalToken != nil {
+		multiTenantServicePrincipalTokenAuthorizer := autorest.NewMultiTenantServicePrincipalTokenAuthorizer(multiTenantServicePrincipalToken)
+		vmClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
+		vmssClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
+		vmssVMClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
+		vmasClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
+	}
+
+	// If uses network resources in different AAD Tenant, update SubscriptionID and Authorizer for network resources client config
+	if networkResourceServicePrincipalToken != nil {
+		networkResourceServicePrincipalTokenAuthorizer := autorest.NewBearerAuthorizer(networkResourceServicePrincipalToken)
+		routeClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+		subnetClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+		routeTableClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+		loadBalancerClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+		securityGroupClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+		publicIPClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
+
+		routeClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+		subnetClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+		routeTableClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+		loadBalancerClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+		securityGroupClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+		publicIPClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
+	}
+
+	// Initialize all azure clients based on client config
+	az.InterfacesClient = interfaceclient.New(interfaceClientConfig)
+	az.VirtualMachineSizesClient = vmsizeclient.New(vmSizeClientConfig)
+	az.SnapshotsClient = snapshotclient.New(snapshotClientConfig)
+	az.StorageAccountClient = storageaccountclient.New(storageAccountClientConfig)
+	az.DisksClient = diskclient.New(diskClientConfig)
+	az.VirtualMachinesClient = vmclient.New(vmClientConfig)
+	az.VirtualMachineScaleSetsClient = vmssclient.New(vmssClientConfig)
+	az.VirtualMachineScaleSetVMsClient = vmssvmclient.New(vmssVMClientConfig)
+	az.RoutesClient = routeclient.New(routeClientConfig)
+	az.SubnetsClient = subnetclient.New(subnetClientConfig)
+	az.RouteTablesClient = routetableclient.New(routeTableClientConfig)
+	az.LoadBalancerClient = loadbalancerclient.New(loadBalancerClientConfig)
+	az.SecurityGroupsClient = securitygroupclient.New(securityGroupClientConfig)
+	az.PublicIPAddressesClient = publicipclient.New(publicIPClientConfig)
+	az.FileClient = fileclient.New(fileClientConfig)
+	az.AvailabilitySetsClient = vmasclient.New(vmasClientConfig)
+	az.privateendpointclient = privateendpointclient.New(azClientConfig)
+	az.privatednsclient = privatednsclient.New(azClientConfig)
+	az.privatednszonegroupclient = privatednszonegroupclient.New(azClientConfig)
+	az.virtualNetworkLinksClient = virtualnetworklinksclient.New(azClientConfig)
+
+	if az.ZoneClient == nil {
+		az.ZoneClient = zoneclient.New(zoneClientConfig)
+	}
+}
+
+func (az *Cloud) getAzureClientConfig(servicePrincipalToken *adal.ServicePrincipalToken) *azclients.ClientConfig {
+	azClientConfig := &azclients.ClientConfig{
+		CloudName:               az.Config.Cloud,
+		Location:                az.Config.Location,
+		SubscriptionID:          az.Config.SubscriptionID,
+		ResourceManagerEndpoint: az.Environment.ResourceManagerEndpoint,
+		Authorizer:              autorest.NewBearerAuthorizer(servicePrincipalToken),
+		Backoff:                 &retry.Backoff{Steps: 1},
+		DisableAzureStackCloud:  az.Config.DisableAzureStackCloud,
+		UserAgent:               az.Config.UserAgent,
+	}
+
+	if az.Config.CloudProviderBackoff {
+		azClientConfig.Backoff = &retry.Backoff{
+			Steps:    az.Config.CloudProviderBackoffRetries,
+			Factor:   az.Config.CloudProviderBackoffExponent,
+			Duration: time.Duration(az.Config.CloudProviderBackoffDuration) * time.Second,
+			Jitter:   az.Config.CloudProviderBackoffJitter,
+		}
+	}
+
+	if az.Config.HasExtendedLocation() {
+		azClientConfig.ExtendedLocation = &azclients.ExtendedLocation{
+			Name: az.Config.ExtendedLocationName,
+			Type: az.Config.ExtendedLocationType,
+		}
+	}
+
+	return azClientConfig
+}
+
+// ParseConfig returns a parsed configuration for an Azure cloudprovider config file
+func ParseConfig(configReader io.Reader) (*Config, error) {
+	var config Config
+	if configReader == nil {
+		return nil, nil
+	}
+
+	configContents, err := ioutil.ReadAll(configReader)
+	if err != nil {
+		return nil, err
+	}
+
+	err = yaml.Unmarshal(configContents, &config)
+	if err != nil {
+		return nil, err
+	}
+
+	// The resource group name may be in different cases from different Azure APIs, hence it is converted to lower here.
+	// See more context at https://github.com/kubernetes/kubernetes/issues/71994.
+	config.ResourceGroup = strings.ToLower(config.ResourceGroup)
+	return &config, nil
+}
+
+func (az *Cloud) isStackCloud() bool {
+	return strings.EqualFold(az.Config.Cloud, consts.AzureStackCloudName) && !az.Config.DisableAzureStackCloud
+}
+
+// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
+func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
+	az.KubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
+	az.eventBroadcaster = record.NewBroadcaster()
+	az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.KubeClient.CoreV1().Events("")})
+	az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
+}
+
+// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
+func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
+	return az, true
+}
+
+// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
+func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
+	return az, true
+}
+
+// InstancesV2 returns an instancesV2 interface. Also returns true if the interface is supported, false otherwise.
+func (az *Cloud) InstancesV2() (cloudprovider.InstancesV2, bool) {
+	return az, true
+}
+
+// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
+func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
+	if az.isStackCloud() {
+		// Azure stack does not support zones at this point
+		// https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-network-differences?view=azs-2102
+		return nil, false
+	}
+	return az, true
+}
+
+// Clusters returns a clusters interface.  Also returns true if the interface is supported, false otherwise.
+func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
+	return nil, false
+}
+
+// Routes returns a routes interface along with whether the interface is supported.
+func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
+	return az, true
+}
+
+// HasClusterID returns true if the cluster has a clusterID
+func (az *Cloud) HasClusterID() bool {
+	return true
+}
+
+// ProviderName returns the cloud provider ID.
+func (az *Cloud) ProviderName() string {
+	return consts.CloudProviderName
+}
+
+func initDiskControllers(az *Cloud) error {
+	// Common controller contains the function
+	// needed by both blob disk and managed disk controllers
+
+	qps := float32(defaultAtachDetachDiskQPS)
+	bucket := defaultAtachDetachDiskBucket
+	if az.Config.AttachDetachDiskRateLimit != nil {
+		qps = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitQPSWrite
+		bucket = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitBucketWrite
+	}
+	klog.V(2).Infof("attach/detach disk operation rate limit QPS: %f, Bucket: %d", qps, bucket)
+
+	common := &controllerCommon{
+		location:              az.Location,
+		storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
+		resourceGroup:         az.ResourceGroup,
+		subscriptionID:        az.SubscriptionID,
+		cloud:                 az,
+		lockMap:               newLockMap(),
+		diskOpRateLimiter:     flowcontrol.NewTokenBucketRateLimiter(qps, bucket),
+	}
+
+	if az.HasExtendedLocation() {
+		common.extendedLocation = &ExtendedLocation{
+			Name: az.ExtendedLocationName,
+			Type: az.ExtendedLocationType,
+		}
+	}
+
+	az.ManagedDiskController = &ManagedDiskController{common: common}
+	az.controllerCommon = common
+
+	return nil
+}
+
+// SetInformers sets informers for Azure cloud provider.
+func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
+	klog.Infof("Setting up informers for Azure cloud provider")
+	nodeInformer := informerFactory.Core().V1().Nodes().Informer()
+	nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+		AddFunc: func(obj interface{}) {
+			node := obj.(*v1.Node)
+			az.updateNodeCaches(nil, node)
+		},
+		UpdateFunc: func(prev, obj interface{}) {
+			prevNode := prev.(*v1.Node)
+			newNode := obj.(*v1.Node)
+			az.updateNodeCaches(prevNode, newNode)
+		},
+		DeleteFunc: func(obj interface{}) {
+			node, isNode := obj.(*v1.Node)
+			// We can get DeletedFinalStateUnknown instead of *v1.Node here
+			// and we need to handle that correctly.
+			if !isNode {
+				deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
+				if !ok {
+					klog.Errorf("Received unexpected object: %v", obj)
+					return
+				}
+				node, ok = deletedState.Obj.(*v1.Node)
+				if !ok {
+					klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
+					return
+				}
+			}
+			az.updateNodeCaches(node, nil)
+		},
+	})
+	az.nodeInformerSynced = nodeInformer.HasSynced
+}
+
+// updateNodeCaches updates local cache for node's zones and external resource groups.
+func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
+	az.nodeCachesLock.Lock()
+	defer az.nodeCachesLock.Unlock()
+
+	if prevNode != nil {
+		// Remove from nodeNames cache.
+		az.nodeNames.Delete(prevNode.ObjectMeta.Name)
+
+		// Remove from nodeZones cache.
+		prevZone, ok := prevNode.ObjectMeta.Labels[consts.LabelFailureDomainBetaZone]
+		if ok && az.isAvailabilityZone(prevZone) {
+			az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
+			if az.nodeZones[prevZone].Len() == 0 {
+				az.nodeZones[prevZone] = nil
+			}
+		}
+
+		// Remove from nodeResourceGroups cache.
+		_, ok = prevNode.ObjectMeta.Labels[consts.ExternalResourceGroupLabel]
+		if ok {
+			delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
+		}
+
+		// Remove from unmanagedNodes cache.
+		managed, ok := prevNode.ObjectMeta.Labels[consts.ManagedByAzureLabel]
+		if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) {
+			az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
+			az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
+		}
+
+		// Remove from excludeLoadBalancerNodes cache.
+		if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
+			az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
+		}
+
+		// Remove from nodePrivateIPs cache.
+		for _, address := range getNodePrivateIPAddresses(prevNode) {
+			klog.V(4).Infof("removing IP address %s of the node %s", address, prevNode.Name)
+			az.nodePrivateIPs[prevNode.Name].Delete(address)
+		}
+	}
+
+	if newNode != nil {
+		// Add to nodeNames cache.
+		az.nodeNames.Insert(newNode.ObjectMeta.Name)
+
+		// Add to nodeZones cache.
+		newZone, ok := newNode.ObjectMeta.Labels[consts.LabelFailureDomainBetaZone]
+		if ok && az.isAvailabilityZone(newZone) {
+			if az.nodeZones[newZone] == nil {
+				az.nodeZones[newZone] = sets.NewString()
+			}
+			az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
+		}
+
+		// Add to nodeResourceGroups cache.
+		newRG, ok := newNode.ObjectMeta.Labels[consts.ExternalResourceGroupLabel]
+		if ok && len(newRG) > 0 {
+			az.nodeResourceGroups[newNode.ObjectMeta.Name] = strings.ToLower(newRG)
+		}
+
+		// Add to unmanagedNodes cache.
+		managed, ok := newNode.ObjectMeta.Labels[consts.ManagedByAzureLabel]
+		if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) {
+			az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
+			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
+		}
+
+		// Add to excludeLoadBalancerNodes cache.
+		if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
+			klog.V(4).Infof("adding node %s from the exclude-from-lb list because the label %s is found", newNode.Name, v1.LabelNodeExcludeBalancers)
+			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
+		}
+
+		// Add to nodePrivateIPs cache
+		for _, address := range getNodePrivateIPAddresses(newNode) {
+			if az.nodePrivateIPs[newNode.Name] == nil {
+				az.nodePrivateIPs[newNode.Name] = sets.NewString()
+			}
+
+			klog.V(4).Infof("adding IP address %s of the node %s", address, newNode.Name)
+			az.nodePrivateIPs[newNode.Name].Insert(address)
+		}
+	}
+}
+
+// GetActiveZones returns all the zones in which k8s nodes are currently running.
+func (az *Cloud) GetActiveZones() (sets.String, error) {
+	if az.nodeInformerSynced == nil {
+		return nil, fmt.Errorf("azure cloud provider doesn't have informers set")
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
+	}
+
+	zones := sets.NewString()
+	for zone, nodes := range az.nodeZones {
+		if len(nodes) > 0 {
+			zones.Insert(zone)
+		}
+	}
+	return zones, nil
+}
+
+// GetLocation returns the location in which k8s cluster is currently running.
+func (az *Cloud) GetLocation() string {
+	return az.Location
+}
+
+// GetNodeResourceGroup gets resource group for given node.
+func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
+	// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
+	if az.nodeInformerSynced == nil {
+		return az.ResourceGroup, nil
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
+	}
+
+	// Return external resource group if it has been cached.
+	if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
+		return cachedRG, nil
+	}
+
+	// Return resource group from cloud provider options.
+	return az.ResourceGroup, nil
+}
+
+// GetNodeNames returns a set of all node names in the k8s cluster.
+func (az *Cloud) GetNodeNames() (sets.String, error) {
+	// Kubelet won't set az.nodeInformerSynced, return nil.
+	if az.nodeInformerSynced == nil {
+		return nil, nil
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return nil, fmt.Errorf("node informer is not synced when trying to GetNodeNames")
+	}
+
+	return sets.NewString(az.nodeNames.List()...), nil
+}
+
+// GetResourceGroups returns a set of resource groups that all nodes are running on.
+func (az *Cloud) GetResourceGroups() (sets.String, error) {
+	// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
+	if az.nodeInformerSynced == nil {
+		return sets.NewString(az.ResourceGroup), nil
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
+	}
+
+	resourceGroups := sets.NewString(az.ResourceGroup)
+	for _, rg := range az.nodeResourceGroups {
+		resourceGroups.Insert(rg)
+	}
+
+	return resourceGroups, nil
+}
+
+// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
+func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
+	// Kubelet won't set az.nodeInformerSynced, always return nil.
+	if az.nodeInformerSynced == nil {
+		return nil, nil
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
+	}
+
+	return sets.NewString(az.unmanagedNodes.List()...), nil
+}
+
+// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
+func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
+	// Kubelet won't set az.nodeInformerSynced, always return nil.
+	if az.nodeInformerSynced == nil {
+		return false, nil
+	}
+
+	az.nodeCachesLock.RLock()
+	defer az.nodeCachesLock.RUnlock()
+	if !az.nodeInformerSynced() {
+		return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
+	}
+
+	// Return true if the node is in external resource group.
+	if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
+		return true, nil
+	}
+
+	return az.excludeLoadBalancerNodes.Has(nodeName), nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
new file mode 100644
index 0000000000000000000000000000000000000000..c236fca7b1d96cba3da7da529d8878bc359c470c
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
@@ -0,0 +1,543 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"regexp"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/wait"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var (
+	pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`)
+)
+
+// RequestBackoff if backoff is disabled in cloud provider it
+// returns a new Backoff object steps = 1
+// This is to make sure that the requested command executes
+// at least once
+func (az *Cloud) RequestBackoff() (resourceRequestBackoff wait.Backoff) {
+	if az.CloudProviderBackoff {
+		return az.ResourceRequestBackoff
+	}
+	resourceRequestBackoff = wait.Backoff{
+		Steps: 1,
+	}
+	return resourceRequestBackoff
+}
+
+// Event creates a event for the specified object.
+func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) {
+	if obj != nil && reason != "" {
+		az.eventRecorder.Event(obj, eventType, reason, message)
+	}
+}
+
+// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
+func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt azcache.AzureCacheReadType) (compute.VirtualMachine, error) {
+	var machine compute.VirtualMachine
+	var retryErr error
+	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
+		machine, retryErr = az.getVirtualMachine(name, crt)
+		if errors.Is(retryErr, cloudprovider.InstanceNotFound) {
+			return true, cloudprovider.InstanceNotFound
+		}
+		if retryErr != nil {
+			klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
+			return false, nil
+		}
+		klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
+		return true, nil
+	})
+	if errors.Is(err, wait.ErrWaitTimeout) {
+		err = retryErr
+	}
+	return machine, err
+}
+
+// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry
+func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	allNodes, rerr := az.VirtualMachinesClient.List(ctx, resourceGroup)
+	if rerr != nil {
+		klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr)
+		return nil, rerr.Error()
+	}
+	klog.V(2).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
+	return allNodes, nil
+}
+
+// getPrivateIPsForMachine is wrapper for optional backoff getting private ips
+// list of a node by name
+func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) {
+	return az.getPrivateIPsForMachineWithRetry(nodeName)
+}
+
+func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) {
+	var privateIPs []string
+	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
+		var retryErr error
+		privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
+		if retryErr != nil {
+			// won't retry since the instance doesn't exist on Azure.
+			if errors.Is(retryErr, cloudprovider.InstanceNotFound) {
+				return true, retryErr
+			}
+			klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr)
+			return false, nil
+		}
+		klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName)
+		return true, nil
+	})
+	return privateIPs, err
+}
+
+func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
+	return az.GetIPForMachineWithRetry(nodeName)
+}
+
+// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
+func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
+	var ip, publicIP string
+	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
+		var retryErr error
+		ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
+		if retryErr != nil {
+			klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
+			return false, nil
+		}
+		klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
+		return true, nil
+	})
+	return ip, publicIP, err
+}
+
+// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, to.String(sg.Etag))
+	klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
+	if rerr == nil {
+		// Invalidate the cache right after updating
+		_ = az.nsgCache.Delete(*sg.Name)
+		return nil
+	}
+
+	nsgJSON, _ := json.Marshal(sg)
+	klog.Warningf("CreateOrUpdateSecurityGroup(%s) failed: %v, NSG request: %s", to.String(sg.Name), rerr.Error(), string(nsgJSON))
+
+	// Invalidate the cache because ETAG precondition mismatch.
+	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
+		klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name)
+		_ = az.nsgCache.Delete(*sg.Name)
+	}
+
+	// Invalidate the cache because another new operation has canceled the current request.
+	if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) {
+		klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name)
+		_ = az.nsgCache.Delete(*sg.Name)
+	}
+
+	return rerr.Error()
+}
+
+func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer {
+	if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
+		return *lb
+	}
+
+	frontendIPConfigurations := *lb.FrontendIPConfigurations
+	for i := range frontendIPConfigurations {
+		config := frontendIPConfigurations[i]
+		if config.FrontendIPConfigurationPropertiesFormat != nil &&
+			config.Subnet != nil &&
+			config.Subnet.ID != nil {
+			subnet := network.Subnet{
+				ID: config.Subnet.ID,
+			}
+			if config.Subnet.Name != nil {
+				subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name
+			}
+			config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet
+			frontendIPConfigurations[i] = config
+			continue
+		}
+	}
+
+	lb.FrontendIPConfigurations = &frontendIPConfigurations
+	return *lb
+}
+
+// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	lb = cleanupSubnetInFrontendIPConfigurations(&lb)
+
+	rgName := az.getLoadBalancerResourceGroup()
+	rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, to.String(lb.Name), lb, to.String(lb.Etag))
+	klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
+	if rerr == nil {
+		// Invalidate the cache right after updating
+		_ = az.lbCache.Delete(*lb.Name)
+		return nil
+	}
+
+	lbJSON, _ := json.Marshal(lb)
+	klog.Warningf("LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s", to.String(lb.Name), rerr.Error(), string(lbJSON))
+
+	// Invalidate the cache because ETAG precondition mismatch.
+	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
+		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", to.String(lb.Name))
+		_ = az.lbCache.Delete(*lb.Name)
+	}
+
+	retryErrorMessage := rerr.Error().Error()
+	// Invalidate the cache because another new operation has canceled the current request.
+	if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) {
+		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", to.String(lb.Name))
+		_ = az.lbCache.Delete(*lb.Name)
+	}
+
+	// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state
+	if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) {
+		matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)
+		if len(matches) != 3 {
+			klog.Errorf("Failed to parse the retry error message %s", retryErrorMessage)
+			return rerr.Error()
+		}
+		pipRG, pipName := matches[1], matches[2]
+		klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name))
+		pip, _, err := az.getPublicIPAddress(pipRG, pipName)
+		if err != nil {
+			klog.Errorf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err)
+			return rerr.Error()
+		}
+		// Perform a dummy update to fix the provisioning state
+		err = az.CreateOrUpdatePIP(service, pipRG, pip)
+		if err != nil {
+			klog.Errorf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err)
+			return rerr.Error()
+		}
+		// Invalidate the LB cache, return the error, and the controller manager
+		// would retry the LB update in the next reconcile loop
+		_ = az.lbCache.Delete(*lb.Name)
+	}
+
+	return rerr.Error()
+}
+
+func (az *Cloud) CreateOrUpdateLBBackendPool(lbName string, backendPool network.BackendAddressPool) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	klog.V(4).Infof("CreateOrUpdateLBBackendPool: updating backend pool %s in LB %s", to.String(backendPool.Name), lbName)
+	rerr := az.LoadBalancerClient.CreateOrUpdateBackendPools(ctx, az.getLoadBalancerResourceGroup(), lbName, to.String(backendPool.Name), backendPool, to.String(backendPool.Etag))
+	if rerr == nil {
+		// Invalidate the cache right after updating
+		_ = az.lbCache.Delete(lbName)
+		return nil
+	}
+
+	// Invalidate the cache because ETAG precondition mismatch.
+	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
+		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName)
+		_ = az.lbCache.Delete(lbName)
+	}
+
+	retryErrorMessage := rerr.Error().Error()
+	// Invalidate the cache because another new operation has canceled the current request.
+	if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) {
+		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName)
+		_ = az.lbCache.Delete(lbName)
+	}
+
+	return rerr.Error()
+}
+
+// ListManagedLBs invokes az.LoadBalancerClient.List and filter out
+// those that are not managed by cloud provider azure or not associated to a managed VMSet.
+func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) {
+	allLBs, err := az.ListLB(service)
+	if err != nil {
+		return nil, err
+	}
+
+	if allLBs == nil {
+		klog.Warningf("ListManagedLBs: no LBs found")
+		return nil, nil
+	}
+
+	// return early if wantLb=false
+	if nodes == nil {
+		return allLBs, nil
+	}
+
+	agentPoolLBs := make([]network.LoadBalancer, 0)
+	agentPoolVMSetNames, err := az.VMSet.GetAgentPoolVMSetNames(nodes)
+	if err != nil {
+		return nil, fmt.Errorf("ListManagedLBs: failed to get agent pool vmSet names: %w", err)
+	}
+
+	agentPoolVMSetNamesSet := sets.NewString()
+	if agentPoolVMSetNames != nil && len(*agentPoolVMSetNames) > 0 {
+		for _, vmSetName := range *agentPoolVMSetNames {
+			klog.V(5).Infof("ListManagedLBs: found agent pool vmSet name %s", vmSetName)
+			agentPoolVMSetNamesSet.Insert(strings.ToLower(vmSetName))
+		}
+	}
+
+	for _, lb := range allLBs {
+		vmSetNameFromLBName := az.mapLoadBalancerNameToVMSet(to.String(lb.Name), clusterName)
+		if strings.EqualFold(strings.TrimSuffix(to.String(lb.Name), consts.InternalLoadBalancerNameSuffix), clusterName) ||
+			agentPoolVMSetNamesSet.Has(strings.ToLower(vmSetNameFromLBName)) {
+			agentPoolLBs = append(agentPoolLBs, lb)
+			klog.V(4).Infof("ListManagedLBs: found agent pool LB %s", to.String(lb.Name))
+		}
+	}
+
+	return agentPoolLBs, nil
+}
+
+// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry
+func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rgName := az.getLoadBalancerResourceGroup()
+	allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName)
+	if rerr != nil {
+		if rerr.IsNotFound() {
+			return nil, nil
+		}
+		az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error())
+		klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr)
+		return nil, rerr.Error()
+	}
+	klog.V(2).Infof("LoadBalancerClient.List(%v) success", rgName)
+	return allLBs, nil
+}
+
+// ListPIP list the PIP resources in the given resource group
+func (az *Cloud) ListPIP(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	allPIPs, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
+	if rerr != nil {
+		if rerr.IsNotFound() {
+			return nil, nil
+		}
+		az.Event(service, v1.EventTypeWarning, "ListPublicIPs", rerr.Error().Error())
+		klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, rerr)
+		return nil, rerr.Error()
+	}
+
+	klog.V(2).Infof("PublicIPAddressesClient.List(%v) success", pipResourceGroup)
+	return allPIPs, nil
+}
+
+// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, to.String(pip.Name), pip)
+	klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, to.String(pip.Name))
+	if rerr != nil {
+		pipJSON, _ := json.Marshal(pip)
+		klog.Warningf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s, PublicIP request: %s", pipResourceGroup, to.String(pip.Name), rerr.Error().Error(), string(pipJSON))
+		az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error())
+		return rerr.Error()
+	}
+
+	return nil
+}
+
+// CreateOrUpdateInterface invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
+	klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
+	if rerr != nil {
+		klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error().Error())
+		az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error().Error())
+		return rerr.Error()
+	}
+
+	return nil
+}
+
+// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
+func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
+	if rerr != nil {
+		klog.Errorf("PublicIPAddressesClient.Delete(%s) failed: %s", pipName, rerr.Error().Error())
+		az.Event(service, v1.EventTypeWarning, "DeletePublicIPAddress", rerr.Error().Error())
+
+		if strings.Contains(rerr.Error().Error(), consts.CannotDeletePublicIPErrorMessageCode) {
+			klog.Warningf("DeletePublicIP for public IP %s failed with error %v, this is because other resources are referencing the public IP. The deletion of the service will continue.", pipName, rerr.Error())
+			return nil
+		}
+		return rerr.Error()
+	}
+
+	return nil
+}
+
+// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry
+func (az *Cloud) DeleteLB(service *v1.Service, lbName string) *retry.Error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rgName := az.getLoadBalancerResourceGroup()
+	rerr := az.LoadBalancerClient.Delete(ctx, rgName, lbName)
+	if rerr == nil {
+		// Invalidate the cache right after updating
+		_ = az.lbCache.Delete(lbName)
+		return nil
+	}
+
+	klog.Errorf("LoadBalancerClient.Delete(%s) failed: %s", lbName, rerr.Error().Error())
+	az.Event(service, v1.EventTypeWarning, "DeleteLoadBalancer", rerr.Error().Error())
+	return rerr
+}
+
+// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, to.String(routeTable.Etag))
+	if rerr == nil {
+		// Invalidate the cache right after updating
+		_ = az.rtCache.Delete(*routeTable.Name)
+		return nil
+	}
+
+	rtJSON, _ := json.Marshal(routeTable)
+	klog.Warningf("RouteTablesClient.CreateOrUpdate(%s) failed: %v, RouteTable request: %s", to.String(routeTable.Name), rerr.Error(), string(rtJSON))
+
+	// Invalidate the cache because etag mismatch.
+	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
+		klog.V(3).Infof("Route table cache for %s is cleanup because of http.StatusPreconditionFailed", *routeTable.Name)
+		_ = az.rtCache.Delete(*routeTable.Name)
+	}
+	// Invalidate the cache because another new operation has canceled the current request.
+	if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) {
+		klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name)
+		_ = az.rtCache.Delete(*routeTable.Name)
+	}
+	klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error())
+	return rerr.Error()
+}
+
+// CreateOrUpdateRoute invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, to.String(route.Etag))
+	klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
+	if rerr == nil {
+		_ = az.rtCache.Delete(az.RouteTableName)
+		return nil
+	}
+
+	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
+		klog.V(3).Infof("Route cache for %s is cleanup because of http.StatusPreconditionFailed", *route.Name)
+		_ = az.rtCache.Delete(az.RouteTableName)
+	}
+	// Invalidate the cache because another new operation has canceled the current request.
+	if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) {
+		klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name)
+		_ = az.rtCache.Delete(az.RouteTableName)
+	}
+	return rerr.Error()
+}
+
+// DeleteRouteWithName invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
+func (az *Cloud) DeleteRouteWithName(routeName string) error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	rerr := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName)
+	klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName)
+	if rerr == nil {
+		return nil
+	}
+
+	klog.Errorf("RoutesClient.Delete(%s, %s) failed: %v", az.RouteTableName, routeName, rerr.Error())
+	return rerr.Error()
+}
+
+// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update().
+func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+	// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+	klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated")
+	vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName)
+	if rerr != nil {
+		klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr)
+		return rerr
+	}
+	if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+		klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName)
+		return nil
+	}
+
+	rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters)
+	klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName)
+	if rerr != nil {
+		klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr)
+		return rerr
+	}
+
+	return nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b0af9e7e542d3a331a421b6cb1d2c27668525a3
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/yaml"
+)
+
+// The config type for Azure cloud provider secret. Supported values are:
+// * file   : The values are read from local cloud-config file.
+// * secret : The values from secret would override all configures from local cloud-config file.
+// * merge  : The values from secret would override only configurations that are explicitly set in the secret. This is the default value.
+type cloudConfigType string
+
+const (
+	cloudConfigTypeFile   cloudConfigType = "file"
+	cloudConfigTypeSecret cloudConfigType = "secret"
+	cloudConfigTypeMerge  cloudConfigType = "merge"
+)
+
+// InitializeCloudFromSecret initializes Azure cloud provider from Kubernetes secret.
+func (az *Cloud) InitializeCloudFromSecret() error {
+	config, err := az.GetConfigFromSecret()
+	if err != nil {
+		klog.Errorf("Failed to get cloud-config from secret: %v", err)
+		return fmt.Errorf("InitializeCloudFromSecret: failed to get cloud config from secret %s/%s: %w", az.SecretNamespace, az.SecretName, err)
+	}
+
+	if config == nil {
+		// Skip re-initialization if the config is not override.
+		return nil
+	}
+
+	if err := az.InitializeCloudFromConfig(config, true, true); err != nil {
+		klog.Errorf("Failed to initialize Azure cloud provider: %v", err)
+		return fmt.Errorf("InitializeCloudFromSecret: failed to initialize Azure cloud provider: %w", err)
+	}
+
+	return nil
+}
+
+func (az *Cloud) GetConfigFromSecret() (*Config, error) {
+	// Read config from file and no override, return nil.
+	if az.Config.CloudConfigType == cloudConfigTypeFile {
+		return nil, nil
+	}
+
+	secret, err := az.KubeClient.CoreV1().Secrets(az.SecretNamespace).Get(context.TODO(), az.SecretName, metav1.GetOptions{})
+	if err != nil {
+		return nil, fmt.Errorf("failed to get secret %s/%s: %w", az.SecretNamespace, az.SecretName, err)
+	}
+
+	cloudConfigData, ok := secret.Data[az.CloudConfigKey]
+	if !ok {
+		return nil, fmt.Errorf("cloud-config is not set in the secret (%s/%s)", az.SecretNamespace, az.SecretName)
+	}
+
+	config := Config{}
+	if az.Config.CloudConfigType == "" || az.Config.CloudConfigType == cloudConfigTypeMerge {
+		// Merge cloud config, set default value to existing config.
+		config = az.Config
+	}
+
+	err = yaml.Unmarshal(cloudConfigData, &config)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse Azure cloud-config: %w", err)
+	}
+
+	return &config, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
new file mode 100644
index 0000000000000000000000000000000000000000..42a4611e336177f1b2ad06270e42594bf391ce72
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
@@ -0,0 +1,657 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+
+	"k8s.io/apimachinery/pkg/types"
+	kwait "k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/util/flowcontrol"
+	cloudprovider "k8s.io/cloud-provider"
+	volerr "k8s.io/cloud-provider/volume/errors"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+const (
+	// Disk Caching is not supported for disks 4 TiB and larger
+	// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
+	diskCachingLimit = 4096 // GiB
+
+	maxLUN                 = 64 // max number of LUNs per VM
+	errStatusCode400       = "statuscode=400"
+	errInvalidParameter    = `code="invalidparameter"`
+	errTargetInstanceIds   = `target="instanceids"`
+	sourceSnapshot         = "snapshot"
+	sourceVolume           = "volume"
+	attachDiskMapKeySuffix = "attachdiskmap"
+	detachDiskMapKeySuffix = "detachdiskmap"
+
+	// WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks
+	// https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator
+	WriteAcceleratorEnabled = "writeacceleratorenabled"
+
+	// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot.
+	diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s"
+
+	// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-from-an-existing-managed-disk-in-the-same-or-different-subscription.
+	managedDiskPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s"
+)
+
+var defaultBackOff = kwait.Backoff{
+	Steps:    20,
+	Duration: 2 * time.Second,
+	Factor:   1.5,
+	Jitter:   0.0,
+}
+
+var (
+	managedDiskPathRE  = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+)`)
+	diskSnapshotPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)`)
+)
+
+type controllerCommon struct {
+	subscriptionID        string
+	location              string
+	extendedLocation      *ExtendedLocation
+	storageEndpointSuffix string
+	resourceGroup         string
+	diskStateMap          sync.Map // <diskURI, attaching/detaching state>
+	lockMap               *lockMap
+	cloud                 *Cloud
+	// disk queue that is waiting for attach or detach on specific node
+	// <nodeName, map<diskURI, *AttachDiskOptions/DetachDiskOptions>>
+	attachDiskMap sync.Map
+	detachDiskMap sync.Map
+	// attach/detach disk rate limiter
+	diskOpRateLimiter flowcontrol.RateLimiter
+}
+
+// AttachDiskOptions attach disk options
+type AttachDiskOptions struct {
+	cachingMode             compute.CachingTypes
+	diskName                string
+	diskEncryptionSetID     string
+	writeAcceleratorEnabled bool
+	lun                     int32
+}
+
+// ExtendedLocation contains additional info about the location of resources.
+type ExtendedLocation struct {
+	// Name - The name of the extended location.
+	Name string `json:"name,omitempty"`
+	// Type - The type of the extended location.
+	Type string `json:"type,omitempty"`
+}
+
+// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
+func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
+	// 1. vmType is standard, return cloud.VMSet directly.
+	if c.cloud.VMType == consts.VMTypeStandard {
+		return c.cloud.VMSet, nil
+	}
+
+	// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to ScaleSet.
+	ss, ok := c.cloud.VMSet.(*ScaleSet)
+	if !ok {
+		return nil, fmt.Errorf("error of converting vmSet (%q) to ScaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
+	}
+
+	// 3. If the node is managed by availability set, then return ss.availabilitySet.
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt)
+	if err != nil {
+		return nil, err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet, nil
+	}
+
+	// 4. Node is managed by vmss
+	return ss, nil
+}
+
+// AttachDisk attaches a disk to vm
+// parameter async indicates whether allow multiple batch disk attach on one node in parallel
+// return (lun, error)
+func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, diskURI string, nodeName types.NodeName,
+	cachingMode compute.CachingTypes, disk *compute.Disk) (int32, error) {
+	diskEncryptionSetID := ""
+	writeAcceleratorEnabled := false
+
+	// there is possibility that disk is nil when GetDisk is throttled
+	// don't check disk state when GetDisk is throttled
+	if disk != nil {
+		if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
+			vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
+			if err != nil {
+				return -1, err
+			}
+			attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy)
+			if err != nil {
+				return -1, err
+			}
+			if strings.EqualFold(string(nodeName), string(attachedNode)) {
+				klog.Warningf("volume %q is actually attached to current node %q, invalidate vm cache and return error", diskURI, nodeName)
+				// update VM(invalidate vm cache)
+				if errUpdate := c.UpdateVM(nodeName); errUpdate != nil {
+					return -1, errUpdate
+				}
+				lun, _, err := c.GetDiskLun(diskName, diskURI, nodeName)
+				return lun, err
+			}
+
+			attachErr := fmt.Sprintf(
+				"disk(%s) already attached to node(%s), could not be attached to node(%s)",
+				diskURI, *disk.ManagedBy, nodeName)
+			klog.V(2).Infof("found dangling volume %s attached to node %s, could not be attached to node(%s)", diskURI, attachedNode, nodeName)
+			return -1, volerr.NewDanglingError(attachErr, attachedNode, "")
+		}
+
+		if disk.DiskProperties != nil {
+			if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone {
+				// Disk Caching is not supported for disks 4 TiB and larger
+				// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
+				cachingMode = compute.CachingTypesNone
+				klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None",
+					diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit)
+			}
+
+			if disk.DiskProperties.Encryption != nil &&
+				disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
+				diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
+			}
+
+			if disk.DiskProperties.DiskState != compute.Unattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
+				return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.Unattached)
+			}
+		}
+
+		if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok {
+			if v != nil && strings.EqualFold(*v, "true") {
+				writeAcceleratorEnabled = true
+			}
+		}
+	}
+
+	options := AttachDiskOptions{
+		lun:                     -1,
+		diskName:                diskName,
+		cachingMode:             cachingMode,
+		diskEncryptionSetID:     diskEncryptionSetID,
+		writeAcceleratorEnabled: writeAcceleratorEnabled,
+	}
+	node := strings.ToLower(string(nodeName))
+	diskuri := strings.ToLower(diskURI)
+	if err := c.insertAttachDiskRequest(diskuri, node, &options); err != nil {
+		return -1, err
+	}
+
+	c.lockMap.LockEntry(node)
+	unlock := false
+	defer func() {
+		if !unlock {
+			c.lockMap.UnlockEntry(node)
+		}
+	}()
+
+	diskMap, err := c.cleanAttachDiskRequests(node)
+	if err != nil {
+		return -1, err
+	}
+
+	lun, err := c.SetDiskLun(nodeName, diskuri, diskMap)
+	if err != nil {
+		return -1, err
+	}
+
+	klog.V(2).Infof("Trying to attach volume %q lun %d to node %q, diskMap: %s", diskURI, lun, nodeName, diskMap)
+	if len(diskMap) == 0 {
+		return lun, nil
+	}
+
+	vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return -1, err
+	}
+	c.diskStateMap.Store(disk, "attaching")
+	defer c.diskStateMap.Delete(disk)
+	future, err := vmset.AttachDisk(nodeName, diskMap)
+	if err != nil {
+		return -1, err
+	}
+
+	if async && c.diskOpRateLimiter.TryAccept() {
+		// unlock and wait for attach disk complete
+		unlock = true
+		c.lockMap.UnlockEntry(node)
+	} else {
+		klog.Warningf("azureDisk - switch to batch operation due to rate limited(async: %t), QPS: %f", async, c.diskOpRateLimiter.QPS())
+	}
+	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
+	if err != nil {
+		return -1, err
+	}
+	return lun, vmset.WaitForUpdateResult(ctx, future, resourceGroup, "attach_disk")
+}
+
+func (c *controllerCommon) insertAttachDiskRequest(diskURI, nodeName string, options *AttachDiskOptions) error {
+	var diskMap map[string]*AttachDiskOptions
+	attachDiskMapKey := nodeName + attachDiskMapKeySuffix
+	c.lockMap.LockEntry(attachDiskMapKey)
+	defer c.lockMap.UnlockEntry(attachDiskMapKey)
+	v, ok := c.attachDiskMap.Load(nodeName)
+	if ok {
+		if diskMap, ok = v.(map[string]*AttachDiskOptions); !ok {
+			return fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName)
+		}
+	} else {
+		diskMap = make(map[string]*AttachDiskOptions)
+		c.attachDiskMap.Store(nodeName, diskMap)
+	}
+	// insert attach disk request to queue
+	_, ok = diskMap[diskURI]
+	if ok {
+		klog.V(2).Infof("azureDisk - duplicated attach disk(%s) request on node(%s)", diskURI, nodeName)
+	} else {
+		diskMap[diskURI] = options
+	}
+	return nil
+}
+
+// clean up attach disk requests
+// return original attach disk requests
+func (c *controllerCommon) cleanAttachDiskRequests(nodeName string) (map[string]*AttachDiskOptions, error) {
+	var diskMap map[string]*AttachDiskOptions
+
+	attachDiskMapKey := nodeName + attachDiskMapKeySuffix
+	c.lockMap.LockEntry(attachDiskMapKey)
+	defer c.lockMap.UnlockEntry(attachDiskMapKey)
+	v, ok := c.attachDiskMap.Load(nodeName)
+	if !ok {
+		return diskMap, nil
+	}
+	if diskMap, ok = v.(map[string]*AttachDiskOptions); !ok {
+		return diskMap, fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName)
+	}
+	c.attachDiskMap.Store(nodeName, make(map[string]*AttachDiskOptions))
+	return diskMap, nil
+}
+
+// DetachDisk detaches a disk from VM
+func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName) error {
+	if _, err := c.cloud.InstanceID(context.TODO(), nodeName); err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			// if host doesn't exist, no need to detach
+			klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached",
+				nodeName, diskURI)
+			return nil
+		}
+		klog.Warningf("failed to get azure instance id (%v)", err)
+		return fmt.Errorf("failed to get azure instance id for node %q: %w", nodeName, err)
+	}
+
+	vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return err
+	}
+
+	node := strings.ToLower(string(nodeName))
+	disk := strings.ToLower(diskURI)
+	if err := c.insertDetachDiskRequest(diskName, disk, node); err != nil {
+		return err
+	}
+
+	c.lockMap.LockEntry(node)
+	defer c.lockMap.UnlockEntry(node)
+	diskMap, err := c.cleanDetachDiskRequests(node)
+	if err != nil {
+		return err
+	}
+
+	klog.V(2).Infof("Trying to detach volume %q from node %q, diskMap: %s", diskURI, nodeName, diskMap)
+	if len(diskMap) > 0 {
+		c.diskStateMap.Store(disk, "detaching")
+		defer c.diskStateMap.Delete(disk)
+		if err = vmset.DetachDisk(nodeName, diskMap); err != nil {
+			if isInstanceNotFoundError(err) {
+				// if host doesn't exist, no need to detach
+				klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached",
+					err, diskURI)
+				return nil
+			}
+		}
+	} else {
+		lun, _, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName)
+		if errGetLun == nil || !strings.Contains(errGetLun.Error(), consts.CannotFindDiskLUN) {
+			return fmt.Errorf("disk(%s) is still attatched to node(%s) on lun(%d), error: %v", diskURI, nodeName, lun, errGetLun)
+		}
+	}
+
+	if err != nil {
+		klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
+		return err
+	}
+
+	klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI)
+	return nil
+}
+
+// UpdateVM updates a vm
+func (c *controllerCommon) UpdateVM(nodeName types.NodeName) error {
+	vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return err
+	}
+	node := strings.ToLower(string(nodeName))
+	c.lockMap.LockEntry(node)
+	defer c.lockMap.UnlockEntry(node)
+	return vmset.UpdateVM(nodeName)
+}
+
+func (c *controllerCommon) insertDetachDiskRequest(diskName, diskURI, nodeName string) error {
+	var diskMap map[string]string
+	detachDiskMapKey := nodeName + detachDiskMapKeySuffix
+	c.lockMap.LockEntry(detachDiskMapKey)
+	defer c.lockMap.UnlockEntry(detachDiskMapKey)
+	v, ok := c.detachDiskMap.Load(nodeName)
+	if ok {
+		if diskMap, ok = v.(map[string]string); !ok {
+			return fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName)
+		}
+	} else {
+		diskMap = make(map[string]string)
+		c.detachDiskMap.Store(nodeName, diskMap)
+	}
+	// insert detach disk request to queue
+	_, ok = diskMap[diskURI]
+	if ok {
+		klog.V(2).Infof("azureDisk - duplicated detach disk(%s) request on node(%s)", diskURI, nodeName)
+	} else {
+		diskMap[diskURI] = diskName
+	}
+	return nil
+}
+
+// clean up detach disk requests
+// return original detach disk requests
+func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string]string, error) {
+	var diskMap map[string]string
+
+	detachDiskMapKey := nodeName + detachDiskMapKeySuffix
+	c.lockMap.LockEntry(detachDiskMapKey)
+	defer c.lockMap.UnlockEntry(detachDiskMapKey)
+	v, ok := c.detachDiskMap.Load(nodeName)
+	if !ok {
+		return diskMap, nil
+	}
+	if diskMap, ok = v.(map[string]string); !ok {
+		return diskMap, fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName)
+	}
+	// clean up original requests in disk map
+	c.detachDiskMap.Store(nodeName, make(map[string]string))
+	return diskMap, nil
+}
+
+// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
+func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+	vmset, err := c.getNodeVMSet(nodeName, crt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return vmset.GetDataDisks(nodeName, crt)
+}
+
+// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
+func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, *string, error) {
+	// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
+	// to ensure we get LUN based on latest entry.
+	disks, provisioningState, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
+		return -1, provisioningState, err
+	}
+
+	for _, disk := range disks {
+		if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
+			(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
+			(disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
+			if disk.ToBeDetached != nil && *disk.ToBeDetached {
+				klog.Warningf("azureDisk - find disk(ToBeDetached): lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
+			} else {
+				// found the disk
+				klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
+				return *disk.Lun, provisioningState, nil
+			}
+		}
+	}
+	return -1, provisioningState, fmt.Errorf("%s for disk %s", consts.CannotFindDiskLUN, diskName)
+}
+
+// SetDiskLun find unused luns and allocate lun for every disk in diskMap.
+// Return lun of diskURI, -1 if all luns are used.
+func (c *controllerCommon) SetDiskLun(nodeName types.NodeName, diskURI string, diskMap map[string]*AttachDiskOptions) (int32, error) {
+	disks, _, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
+		return -1, err
+	}
+
+	lun := int32(-1)
+	_, isDiskInMap := diskMap[diskURI]
+	used := make([]bool, maxLUN)
+	for _, disk := range disks {
+		if disk.Lun != nil {
+			used[*disk.Lun] = true
+			if !isDiskInMap {
+				// find lun of diskURI since diskURI is not in diskMap
+				if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) {
+					lun = *disk.Lun
+				}
+			}
+		}
+	}
+	if !isDiskInMap && lun < 0 {
+		return -1, fmt.Errorf("could not find disk(%s) in current disk list(len: %d) nor in diskMap(%v)", diskURI, len(disks), diskMap)
+	}
+	if len(diskMap) == 0 {
+		// attach disk request is empty, return directly
+		return lun, nil
+	}
+
+	// allocate lun for every disk in diskMap
+	var diskLuns []int32
+	count := 0
+	for k, v := range used {
+		if !v {
+			diskLuns = append(diskLuns, int32(k))
+			count++
+			if count >= len(diskMap) {
+				break
+			}
+		}
+	}
+
+	if len(diskLuns) != len(diskMap) {
+		return -1, fmt.Errorf("could not find enough disk luns(current: %d) for diskMap(%v, len=%d), diskURI(%s)",
+			len(diskLuns), diskMap, len(diskMap), diskURI)
+	}
+
+	count = 0
+	for uri, opt := range diskMap {
+		if opt == nil {
+			return -1, fmt.Errorf("unexpected nil pointer in diskMap(%v), diskURI(%s)", diskMap, diskURI)
+		}
+		if strings.EqualFold(uri, diskURI) {
+			lun = diskLuns[count]
+		}
+		opt.lun = diskLuns[count]
+		count++
+	}
+	if lun < 0 {
+		return lun, fmt.Errorf("could not find lun of diskURI(%s), diskMap(%v)", diskURI, diskMap)
+	}
+	return lun, nil
+}
+
+// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
+func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
+	attached := make(map[string]bool)
+	for _, diskName := range diskNames {
+		attached[diskName] = false
+	}
+
+	// doing stalled read for getNodeDataDisks to ensure we don't call ARM
+	// for every reconcile call. The cache is invalidated after Attach/Detach
+	// disk. So the new entry will be fetched and cached the first time reconcile
+	// loop runs after the Attach/Disk OP which will reflect the latest model.
+	disks, _, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			// if host doesn't exist, no need to detach
+			klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
+				nodeName, diskNames)
+			return attached, nil
+		}
+
+		return attached, err
+	}
+
+	for _, disk := range disks {
+		for _, diskName := range diskNames {
+			if disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName) {
+				attached[diskName] = true
+			}
+		}
+	}
+
+	return attached, nil
+}
+
+func filterDetachingDisks(unfilteredDisks []compute.DataDisk) []compute.DataDisk {
+	filteredDisks := []compute.DataDisk{}
+	for _, disk := range unfilteredDisks {
+		if disk.ToBeDetached != nil && *disk.ToBeDetached {
+			if disk.Name != nil {
+				klog.V(2).Infof("Filtering disk: %s with ToBeDetached flag set.", *disk.Name)
+			}
+		} else {
+			filteredDisks = append(filteredDisks, disk)
+		}
+	}
+	return filteredDisks
+}
+
+func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk {
+	filteredDisks := []compute.DataDisk{}
+	for _, disk := range unfilteredDisks {
+		filter := false
+		if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil {
+			diskURI := *disk.ManagedDisk.ID
+			exist, err := c.cloud.checkDiskExists(ctx, diskURI)
+			if err != nil {
+				klog.Errorf("checkDiskExists(%s) failed with error: %v", diskURI, err)
+			} else {
+				// only filter disk when checkDiskExists returns <false, nil>
+				filter = !exist
+				if filter {
+					klog.Errorf("disk(%s) does not exist, removed from data disk list", diskURI)
+				}
+			}
+		}
+
+		if !filter {
+			filteredDisks = append(filteredDisks, disk)
+		}
+	}
+	return filteredDisks
+}
+
+func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) (bool, error) {
+	diskName := path.Base(diskURI)
+	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
+	if err != nil {
+		return false, err
+	}
+
+	if _, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName); rerr != nil {
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			return false, nil
+		}
+		return false, rerr.Error()
+	}
+
+	return true, nil
+}
+
+func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (compute.CreationData, error) {
+	if sourceResourceID == "" {
+		return compute.CreationData{
+			CreateOption: compute.Empty,
+		}, nil
+	}
+
+	switch sourceType {
+	case sourceSnapshot:
+		if match := diskSnapshotPathRE.FindString(sourceResourceID); match == "" {
+			sourceResourceID = fmt.Sprintf(diskSnapshotPath, subscriptionID, resourceGroup, sourceResourceID)
+		}
+
+	case sourceVolume:
+		if match := managedDiskPathRE.FindString(sourceResourceID); match == "" {
+			sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID)
+		}
+	default:
+		return compute.CreationData{
+			CreateOption: compute.Empty,
+		}, nil
+	}
+
+	splits := strings.Split(sourceResourceID, "/")
+	if len(splits) > 9 {
+		if sourceType == sourceSnapshot {
+			return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE)
+		}
+		return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE)
+	}
+	return compute.CreationData{
+		CreateOption:     compute.Copy,
+		SourceResourceID: &sourceResourceID,
+	}, nil
+}
+
+func isInstanceNotFoundError(err error) bool {
+	errMsg := strings.ToLower(err.Error())
+	if strings.Contains(errMsg, strings.ToLower(consts.VmssVMNotActiveErrorMessage)) {
+		return true
+	}
+	return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go
new file mode 100644
index 0000000000000000000000000000000000000000..3bf5d49772c151a3567492bd5bb7b8853978dc62
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go
@@ -0,0 +1,250 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"net/http"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+// AttachDisk attaches a disk to vm
+func (as *availabilitySet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
+	vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return nil, err
+	}
+
+	vmName := mapNodeNameToVMName(nodeName)
+	nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return nil, err
+	}
+
+	disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+	copy(disks, *vm.StorageProfile.DataDisks)
+
+	for k, v := range diskMap {
+		diskURI := k
+		opt := v
+		attached := false
+		for _, disk := range *vm.StorageProfile.DataDisks {
+			if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) {
+				attached = true
+				break
+			}
+		}
+		if attached {
+			klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s)", diskURI, nodeName)
+			continue
+		}
+
+		managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
+		if opt.diskEncryptionSetID == "" {
+			if vm.StorageProfile.OsDisk != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
+				// set diskEncryptionSet as value of os disk by default
+				opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
+			}
+		}
+		if opt.diskEncryptionSetID != "" {
+			managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID}
+		}
+		disks = append(disks,
+			compute.DataDisk{
+				Name:                    &opt.diskName,
+				Lun:                     &opt.lun,
+				Caching:                 opt.cachingMode,
+				CreateOption:            "attach",
+				ManagedDisk:             managedDisk,
+				WriteAcceleratorEnabled: to.BoolPtr(opt.writeAcceleratorEnabled),
+			})
+	}
+
+	newVM := compute.VirtualMachineUpdate{
+		VirtualMachineProperties: &compute.VirtualMachineProperties{
+			StorageProfile: &compute.StorageProfile{
+				DataDisks: &disks,
+			},
+		},
+	}
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, vmName, diskMap)
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = as.cloud.vmCache.Delete(vmName)
+	}()
+
+	future, rerr := as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
+	if rerr != nil {
+		klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr)
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName)
+			disks := as.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks)
+			newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
+			future, rerr = as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
+		}
+	}
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr)
+	if rerr != nil {
+		return future, rerr.Error()
+	}
+	return future, nil
+}
+
+// WaitForUpdateResult waits for the response of the update request
+func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
+	if rerr := as.VirtualMachinesClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// DetachDisk detaches a disk from VM
+func (as *availabilitySet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error {
+	vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		// if host doesn't exist, no need to detach
+		klog.Warningf("azureDisk - cannot find node %s, skip detaching disk list(%s)", nodeName, diskMap)
+		return nil
+	}
+
+	vmName := mapNodeNameToVMName(nodeName)
+	nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return err
+	}
+
+	disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+	copy(disks, *vm.StorageProfile.DataDisks)
+
+	bFoundDisk := false
+	for i, disk := range disks {
+		for diskURI, diskName := range diskMap {
+			if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
+				(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
+				(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
+				// found the disk
+				klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
+				disks[i].ToBeDetached = to.BoolPtr(true)
+				bFoundDisk = true
+			}
+		}
+	}
+
+	if !bFoundDisk {
+		// only log here, next action is to update VM status with original meta data
+		klog.Errorf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap)
+	} else {
+		if strings.EqualFold(as.cloud.Environment.Name, consts.AzureStackCloudName) && !as.Config.DisableAzureStackCloud {
+			// Azure stack does not support ToBeDetached flag, use original way to detach disk
+			newDisks := []compute.DataDisk{}
+			for _, disk := range disks {
+				if !to.Bool(disk.ToBeDetached) {
+					newDisks = append(newDisks, disk)
+				}
+			}
+			disks = newDisks
+		}
+	}
+
+	newVM := compute.VirtualMachineUpdate{
+		VirtualMachineProperties: &compute.VirtualMachineProperties{
+			StorageProfile: &compute.StorageProfile{
+				DataDisks: &disks,
+			},
+		},
+	}
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap)
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = as.cloud.vmCache.Delete(vmName)
+	}()
+
+	rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
+	if rerr != nil {
+		klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr)
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName)
+			disks := as.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks)
+			newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
+			rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
+		}
+	}
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr)
+	if rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// UpdateVM updates a vm
+func (as *availabilitySet) UpdateVM(nodeName types.NodeName) error {
+	vmName := mapNodeNameToVMName(nodeName)
+	nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return err
+	}
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, vmName)
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = as.cloud.vmCache.Delete(vmName)
+	}()
+
+	rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, compute.VirtualMachineUpdate{}, "update_vm")
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - returned with %v", nodeResourceGroup, vmName, rerr)
+	if rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// GetDataDisks gets a list of data disks attached to the node.
+func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+	vm, err := as.getVirtualMachine(nodeName, crt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if vm.StorageProfile.DataDisks == nil {
+		return nil, nil, nil
+	}
+
+	return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c9a3268e48c421c8361b46a52f0fec131077226
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"net/http"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+// AttachDisk attaches a disk to vm
+func (ss *ScaleSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
+	vmName := mapNodeNameToVMName(nodeName)
+	ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return nil, err
+	}
+
+	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return nil, err
+	}
+
+	disks := []compute.DataDisk{}
+	if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
+		disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+		copy(disks, *vm.StorageProfile.DataDisks)
+	}
+
+	for k, v := range diskMap {
+		diskURI := k
+		opt := v
+		attached := false
+		for _, disk := range *vm.StorageProfile.DataDisks {
+			if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) {
+				attached = true
+				break
+			}
+		}
+		if attached {
+			klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s)", diskURI, nodeName)
+			continue
+		}
+
+		managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
+		if opt.diskEncryptionSetID == "" {
+			if vm.StorageProfile.OsDisk != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
+				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
+				// set diskEncryptionSet as value of os disk by default
+				opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
+			}
+		}
+		if opt.diskEncryptionSetID != "" {
+			managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID}
+		}
+		disks = append(disks,
+			compute.DataDisk{
+				Name:                    &opt.diskName,
+				Lun:                     &opt.lun,
+				Caching:                 opt.cachingMode,
+				CreateOption:            "attach",
+				ManagedDisk:             managedDisk,
+				WriteAcceleratorEnabled: to.BoolPtr(opt.writeAcceleratorEnabled),
+			})
+	}
+
+	newVM := compute.VirtualMachineScaleSetVM{
+		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
+			StorageProfile: &compute.StorageProfile{
+				DataDisks: &disks,
+			},
+		},
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = ss.deleteCacheForNode(vmName)
+	}()
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, nodeName, diskMap)
+	future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
+	if rerr != nil {
+		klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr)
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName)
+			disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks)
+			newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks
+			future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
+		}
+	}
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s, %s) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr)
+	if rerr != nil {
+		return future, rerr.Error()
+	}
+	return future, nil
+}
+
+// WaitForUpdateResult waits for the response of the update request
+func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
+	if rerr := ss.VirtualMachineScaleSetVMsClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// DetachDisk detaches a disk from VM
+func (ss *ScaleSet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error {
+	vmName := mapNodeNameToVMName(nodeName)
+	ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return err
+	}
+
+	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return err
+	}
+
+	disks := []compute.DataDisk{}
+	if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
+		disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
+		copy(disks, *vm.StorageProfile.DataDisks)
+	}
+	bFoundDisk := false
+	for i, disk := range disks {
+		for diskURI, diskName := range diskMap {
+			if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
+				(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
+				(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
+				// found the disk
+				klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
+				disks[i].ToBeDetached = to.BoolPtr(true)
+				bFoundDisk = true
+			}
+		}
+	}
+
+	if !bFoundDisk {
+		// only log here, next action is to update VM status with original meta data
+		klog.Errorf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap)
+	} else {
+		if strings.EqualFold(ss.cloud.Environment.Name, consts.AzureStackCloudName) && !ss.Config.DisableAzureStackCloud {
+			// Azure stack does not support ToBeDetached flag, use original way to detach disk
+			newDisks := []compute.DataDisk{}
+			for _, disk := range disks {
+				if !to.Bool(disk.ToBeDetached) {
+					newDisks = append(newDisks, disk)
+				}
+			}
+			disks = newDisks
+		}
+	}
+
+	newVM := compute.VirtualMachineScaleSetVM{
+		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
+			StorageProfile: &compute.StorageProfile{
+				DataDisks: &disks,
+			},
+		},
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = ss.deleteCacheForNode(vmName)
+	}()
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, nodeName, diskMap)
+	rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
+	if rerr != nil {
+		klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr)
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName)
+			disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks)
+			newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks
+			rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
+		}
+	}
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr)
+	if rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// UpdateVM updates a vm
+func (ss *ScaleSet) UpdateVM(nodeName types.NodeName) error {
+	vmName := mapNodeNameToVMName(nodeName)
+	ssName, instanceID, _, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return err
+	}
+
+	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	// Invalidate the cache right after updating
+	defer func() {
+		_ = ss.deleteCacheForNode(vmName)
+	}()
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, nodeName)
+	rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, compute.VirtualMachineScaleSetVM{}, "update_vmss_instance")
+
+	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - returned with %v", nodeResourceGroup, nodeName, rerr)
+	if rerr != nil {
+		return rerr.Error()
+	}
+	return nil
+}
+
+// GetDataDisks gets a list of data disks attached to the node.
+func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+	_, _, vm, err := ss.getVmssVM(string(nodeName), crt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
+		return nil, nil, nil
+	}
+
+	return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
new file mode 100644
index 0000000000000000000000000000000000000000..abe343f43ad48536af1eae7b0220fad9a9215d38
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"fmt"
+
+	"github.com/golang/mock/gomock"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/client-go/tools/record"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/auth"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+var (
+	errPreconditionFailedEtagMismatch = fmt.Errorf("PreconditionFailedEtagMismatch")
+)
+
+// NewTestScaleSet creates a fake ScaleSet for unit test
+func NewTestScaleSet(ctrl *gomock.Controller) (*ScaleSet, error) {
+	return newTestScaleSetWithState(ctrl)
+}
+
+func newTestScaleSetWithState(ctrl *gomock.Controller) (*ScaleSet, error) {
+	cloud := GetTestCloud(ctrl)
+	ss, err := newScaleSet(cloud)
+	if err != nil {
+		return nil, err
+	}
+
+	return ss.(*ScaleSet), nil
+}
+
+// GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers
+func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
+	az = &Cloud{
+		Config: Config{
+			AzureAuthConfig: auth.AzureAuthConfig{
+				TenantID:       "tenant",
+				SubscriptionID: "subscription",
+			},
+			ResourceGroup:                            "rg",
+			VnetResourceGroup:                        "rg",
+			RouteTableResourceGroup:                  "rg",
+			SecurityGroupResourceGroup:               "rg",
+			Location:                                 "westus",
+			VnetName:                                 "vnet",
+			SubnetName:                               "subnet",
+			SecurityGroupName:                        "nsg",
+			RouteTableName:                           "rt",
+			PrimaryAvailabilitySetName:               "as",
+			PrimaryScaleSetName:                      "vmss",
+			MaximumLoadBalancerRuleCount:             250,
+			VMType:                                   consts.VMTypeStandard,
+			LoadBalancerBackendPoolConfigurationType: consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration,
+		},
+		nodeZones:                map[string]sets.String{},
+		nodeInformerSynced:       func() bool { return true },
+		nodeResourceGroups:       map[string]string{},
+		unmanagedNodes:           sets.NewString(),
+		excludeLoadBalancerNodes: sets.NewString(),
+		nodePrivateIPs:           map[string]sets.String{},
+		routeCIDRs:               map[string]string{},
+		eventRecorder:            &record.FakeRecorder{},
+	}
+	az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
+	az.SnapshotsClient = mocksnapshotclient.NewMockInterface(ctrl)
+	az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl)
+	az.LoadBalancerClient = mockloadbalancerclient.NewMockInterface(ctrl)
+	az.PublicIPAddressesClient = mockpublicipclient.NewMockInterface(ctrl)
+	az.RoutesClient = mockrouteclient.NewMockInterface(ctrl)
+	az.RouteTablesClient = mockroutetableclient.NewMockInterface(ctrl)
+	az.SecurityGroupsClient = mocksecuritygroupclient.NewMockInterface(ctrl)
+	az.SubnetsClient = mocksubnetclient.NewMockInterface(ctrl)
+	az.VirtualMachineScaleSetsClient = mockvmssclient.NewMockInterface(ctrl)
+	az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl)
+	az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl)
+	az.VMSet, _ = newAvailabilitySet(az)
+	az.vmCache, _ = az.newVMCache()
+	az.lbCache, _ = az.newLBCache()
+	az.nsgCache, _ = az.newNSGCache()
+	az.rtCache, _ = az.newRouteTableCache()
+	az.LoadBalancerBackendPool = NewMockBackendPool(ctrl)
+
+	_ = initDiskControllers(az)
+
+	az.regionZonesMap = map[string][]string{az.Location: {"1", "2", "3"}}
+
+	return az
+}
+
+// GetTestCloudWithExtendedLocation returns a fake azure cloud for unit tests in Azure related CSI drivers with extended location.
+func GetTestCloudWithExtendedLocation(ctrl *gomock.Controller) (az *Cloud) {
+	az = GetTestCloud(ctrl)
+	az.Config.ExtendedLocationName = "microsoftlosangeles1"
+	az.Config.ExtendedLocationType = "EdgeZone"
+	az.controllerCommon.extendedLocation = &ExtendedLocation{
+		Name: az.Config.ExtendedLocationName,
+		Type: az.Config.ExtendedLocationType,
+	}
+	return az
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3046f2cc585d4b27b4397ecb4168dd0245f393e
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_file.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
+)
+
+// create file share
+func (az *Cloud) createFileShare(resourceGroupName, accountName string, shareOptions *fileclient.ShareOptions) error {
+	return az.FileClient.CreateFileShare(resourceGroupName, accountName, shareOptions)
+}
+
+func (az *Cloud) deleteFileShare(resourceGroupName, accountName, name string) error {
+	return az.FileClient.DeleteFileShare(resourceGroupName, accountName, name)
+}
+
+func (az *Cloud) resizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error {
+	return az.FileClient.ResizeFileShare(resourceGroupName, accountName, name, sizeGiB)
+}
+
+func (az *Cloud) getFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
+	return az.FileClient.GetFileShare(resourceGroupName, accountName, name)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go
new file mode 100644
index 0000000000000000000000000000000000000000..629b8ec106a931da8c348b0c9a156b14929db2c7
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+
+	"k8s.io/klog/v2"
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+// NetworkMetadata contains metadata about an instance's network
+type NetworkMetadata struct {
+	Interface []NetworkInterface `json:"interface"`
+}
+
+// NetworkInterface represents an instances network interface.
+type NetworkInterface struct {
+	IPV4 NetworkData `json:"ipv4"`
+	IPV6 NetworkData `json:"ipv6"`
+	MAC  string      `json:"macAddress"`
+}
+
+// NetworkData contains IP information for a network.
+type NetworkData struct {
+	IPAddress []IPAddress `json:"ipAddress"`
+	Subnet    []Subnet    `json:"subnet"`
+}
+
+// IPAddress represents IP address information.
+type IPAddress struct {
+	PrivateIP string `json:"privateIpAddress"`
+	PublicIP  string `json:"publicIpAddress"`
+}
+
+// Subnet represents subnet information.
+type Subnet struct {
+	Address string `json:"address"`
+	Prefix  string `json:"prefix"`
+}
+
+// ComputeMetadata represents compute information
+type ComputeMetadata struct {
+	Environment    string `json:"azEnvironment,omitempty"`
+	SKU            string `json:"sku,omitempty"`
+	Name           string `json:"name,omitempty"`
+	Zone           string `json:"zone,omitempty"`
+	VMSize         string `json:"vmSize,omitempty"`
+	OSType         string `json:"osType,omitempty"`
+	Location       string `json:"location,omitempty"`
+	FaultDomain    string `json:"platformFaultDomain,omitempty"`
+	UpdateDomain   string `json:"platformUpdateDomain,omitempty"`
+	ResourceGroup  string `json:"resourceGroupName,omitempty"`
+	VMScaleSetName string `json:"vmScaleSetName,omitempty"`
+	SubscriptionID string `json:"subscriptionId,omitempty"`
+}
+
+// InstanceMetadata represents instance information.
+type InstanceMetadata struct {
+	Compute *ComputeMetadata `json:"compute,omitempty"`
+	Network *NetworkMetadata `json:"network,omitempty"`
+}
+
+// PublicIPMetadata represents the public IP metadata.
+type PublicIPMetadata struct {
+	FrontendIPAddress string `json:"frontendIpAddress,omitempty"`
+	PrivateIPAddress  string `json:"privateIpAddress,omitempty"`
+}
+
+// LoadbalancerProfile represents load balancer profile in IMDS.
+type LoadbalancerProfile struct {
+	PublicIPAddresses []PublicIPMetadata `json:"publicIpAddresses,omitempty"`
+}
+
+// LoadBalancerMetadata represents load balancer metadata.
+type LoadBalancerMetadata struct {
+	LoadBalancer *LoadbalancerProfile `json:"loadbalancer,omitempty"`
+}
+
+// InstanceMetadataService knows how to query the Azure instance metadata server.
+type InstanceMetadataService struct {
+	imdsServer string
+	imsCache   *azcache.TimedCache
+}
+
+// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
+func NewInstanceMetadataService(imdsServer string) (*InstanceMetadataService, error) {
+	ims := &InstanceMetadataService{
+		imdsServer: imdsServer,
+	}
+
+	imsCache, err := azcache.NewTimedcache(consts.MetadataCacheTTL, ims.getMetadata)
+	if err != nil {
+		return nil, err
+	}
+
+	ims.imsCache = imsCache
+	return ims, nil
+}
+
+func (ims *InstanceMetadataService) getMetadata(key string) (interface{}, error) {
+	instanceMetadata, err := ims.getInstanceMetadata(key)
+	if err != nil {
+		return nil, err
+	}
+
+	if instanceMetadata.Network != nil && len(instanceMetadata.Network.Interface) > 0 {
+		netInterface := instanceMetadata.Network.Interface[0]
+		if (len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PublicIP) > 0) ||
+			(len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PublicIP) > 0) {
+			// Return if public IP address has already part of instance metadata.
+			return instanceMetadata, nil
+		}
+
+		loadBalancerMetadata, err := ims.getLoadBalancerMetadata()
+		if err != nil || loadBalancerMetadata == nil || loadBalancerMetadata.LoadBalancer == nil {
+			// Log a warning since loadbalancer metadata may not be available when the VM
+			// is not in standard LoadBalancer backend address pool.
+			klog.V(4).Infof("Warning: failed to get loadbalancer metadata: %v", err)
+			return instanceMetadata, nil
+		}
+
+		publicIPs := loadBalancerMetadata.LoadBalancer.PublicIPAddresses
+		if len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PrivateIP) > 0 {
+			for _, pip := range publicIPs {
+				if pip.PrivateIPAddress == netInterface.IPV4.IPAddress[0].PrivateIP {
+					netInterface.IPV4.IPAddress[0].PublicIP = pip.FrontendIPAddress
+					break
+				}
+			}
+		}
+		if len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PrivateIP) > 0 {
+			for _, pip := range publicIPs {
+				if pip.PrivateIPAddress == netInterface.IPV6.IPAddress[0].PrivateIP {
+					netInterface.IPV6.IPAddress[0].PublicIP = pip.FrontendIPAddress
+					break
+				}
+			}
+		}
+	}
+
+	return instanceMetadata, nil
+}
+
+func (ims *InstanceMetadataService) getInstanceMetadata(key string) (*InstanceMetadata, error) {
+	req, err := http.NewRequest("GET", ims.imdsServer+consts.ImdsInstanceURI, nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Metadata", "True")
+	req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
+
+	q := req.URL.Query()
+	q.Add("format", "json")
+	q.Add("api-version", consts.ImdsInstanceAPIVersion)
+	req.URL.RawQuery = q.Encode()
+
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status)
+	}
+
+	data, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	obj := InstanceMetadata{}
+	err = json.Unmarshal(data, &obj)
+	if err != nil {
+		return nil, err
+	}
+
+	return &obj, nil
+}
+
+func (ims *InstanceMetadataService) getLoadBalancerMetadata() (*LoadBalancerMetadata, error) {
+	req, err := http.NewRequest("GET", ims.imdsServer+consts.ImdsLoadBalancerURI, nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Metadata", "True")
+	req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
+
+	q := req.URL.Query()
+	q.Add("format", "json")
+	q.Add("api-version", consts.ImdsLoadBalancerAPIVersion)
+	req.URL.RawQuery = q.Encode()
+
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("failure of getting loadbalancer metadata with response %q", resp.Status)
+	}
+
+	data, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	obj := LoadBalancerMetadata{}
+	err = json.Unmarshal(data, &obj)
+	if err != nil {
+		return nil, err
+	}
+
+	return &obj, nil
+}
+
+// GetMetadata gets instance metadata from cache.
+// crt determines if we can get data from stalled cache/need fresh if cache expired.
+func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType) (*InstanceMetadata, error) {
+	cache, err := ims.imsCache.Get(consts.MetadataCacheKey, crt)
+	if err != nil {
+		return nil, err
+	}
+
+	// Cache shouldn't be nil, but added a check in case something is wrong.
+	if cache == nil {
+		return nil, fmt.Errorf("failure of getting instance metadata")
+	}
+
+	if metadata, ok := cache.(*InstanceMetadata); ok {
+		return metadata, nil
+	}
+
+	return nil, fmt.Errorf("failure of getting instance metadata")
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ef038d2fbe0551073abc560fdd620dff695d963
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
@@ -0,0 +1,542 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+	"strings"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+const (
+	vmPowerStatePrefix       = "PowerState/"
+	vmPowerStateStopped      = "stopped"
+	vmPowerStateDeallocated  = "deallocated"
+	vmPowerStateDeallocating = "deallocating"
+
+	// nodeNameEnvironmentName is the environment variable name for getting node name.
+	// It is only used for out-of-tree cloud provider.
+	nodeNameEnvironmentName = "NODE_NAME"
+)
+
+var (
+	errNodeNotInitialized = fmt.Errorf("providerID is empty, the node is not initialized yet")
+)
+
+func (az *Cloud) addressGetter(nodeName types.NodeName) ([]v1.NodeAddress, error) {
+	ip, publicIP, err := az.getIPForMachine(nodeName)
+	if err != nil {
+		klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
+		return nil, err
+	}
+
+	addresses := []v1.NodeAddress{
+		{Type: v1.NodeInternalIP, Address: ip},
+		{Type: v1.NodeHostName, Address: string(nodeName)},
+	}
+	if len(publicIP) > 0 {
+		addresses = append(addresses, v1.NodeAddress{
+			Type:    v1.NodeExternalIP,
+			Address: publicIP,
+		})
+	}
+	return addresses, nil
+}
+
+// NodeAddresses returns the addresses of the specified instance.
+func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
+	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	unmanaged, err := az.IsNodeUnmanaged(string(name))
+	if err != nil {
+		return nil, err
+	}
+	if unmanaged {
+		klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
+		return nil, nil
+	}
+
+	if az.UseInstanceMetadata {
+		metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
+		if err != nil {
+			return nil, err
+		}
+
+		if metadata.Compute == nil || metadata.Network == nil {
+			return nil, fmt.Errorf("failure of getting instance metadata")
+		}
+
+		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
+		if err != nil {
+			return nil, err
+		}
+
+		// Not local instance, get addresses from Azure ARM API.
+		if !isLocalInstance {
+			if az.VMSet != nil {
+				return az.addressGetter(name)
+			}
+
+			// vmSet == nil indicates credentials are not provided.
+			return nil, fmt.Errorf("no credentials provided for Azure cloud provider")
+		}
+
+		return az.getLocalInstanceNodeAddresses(metadata.Network.Interface, string(name))
+	}
+
+	return az.addressGetter(name)
+}
+
+func (az *Cloud) getLocalInstanceNodeAddresses(netInterfaces []NetworkInterface, nodeName string) ([]v1.NodeAddress, error) {
+	if len(netInterfaces) == 0 {
+		return nil, fmt.Errorf("no interface is found for the instance")
+	}
+
+	// Use ip address got from instance metadata.
+	netInterface := netInterfaces[0]
+	addresses := []v1.NodeAddress{
+		{Type: v1.NodeHostName, Address: nodeName},
+	}
+	if len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PrivateIP) > 0 {
+		address := netInterface.IPV4.IPAddress[0]
+		addresses = append(addresses, v1.NodeAddress{
+			Type:    v1.NodeInternalIP,
+			Address: address.PrivateIP,
+		})
+		if len(address.PublicIP) > 0 {
+			addresses = append(addresses, v1.NodeAddress{
+				Type:    v1.NodeExternalIP,
+				Address: address.PublicIP,
+			})
+		}
+	}
+	if len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PrivateIP) > 0 {
+		address := netInterface.IPV6.IPAddress[0]
+		addresses = append(addresses, v1.NodeAddress{
+			Type:    v1.NodeInternalIP,
+			Address: address.PrivateIP,
+		})
+		if len(address.PublicIP) > 0 {
+			addresses = append(addresses, v1.NodeAddress{
+				Type:    v1.NodeExternalIP,
+				Address: address.PublicIP,
+			})
+		}
+	}
+
+	if len(addresses) == 1 {
+		// No IP addresses is got from instance metadata service, clean up cache and report errors.
+		_ = az.Metadata.imsCache.Delete(consts.MetadataCacheKey)
+		return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
+	}
+	return addresses, nil
+}
+
+// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
+	if providerID == "" {
+		return nil, errNodeNotInitialized
+	}
+
+	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	if az.IsNodeUnmanagedByProviderID(providerID) {
+		klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
+		return nil, nil
+	}
+
+	if az.VMSet == nil {
+		// vmSet == nil indicates credentials are not provided.
+		return nil, fmt.Errorf("no credentials provided for Azure cloud provider")
+	}
+
+	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		return nil, err
+	}
+
+	return az.NodeAddresses(ctx, name)
+}
+
+// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
+// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
+func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
+	if providerID == "" {
+		return false, errNodeNotInitialized
+	}
+
+	// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
+	if az.IsNodeUnmanagedByProviderID(providerID) {
+		klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
+		return true, nil
+	}
+
+	if az.VMSet == nil {
+		// vmSet == nil indicates credentials are not provided.
+		return false, fmt.Errorf("no credentials provided for Azure cloud provider")
+	}
+
+	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return false, nil
+		}
+		return false, err
+	}
+
+	_, err = az.InstanceID(ctx, name)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return false, nil
+		}
+		return false, err
+	}
+
+	return true, nil
+}
+
+// InstanceExists returns true if the instance for the given node exists according to the cloud provider.
+// Use the node.name or node.spec.providerID field to find the node in the cloud provider.
+func (az *Cloud) InstanceExists(ctx context.Context, node *v1.Node) (bool, error) {
+	if node == nil {
+		return false, nil
+	}
+	providerID := node.Spec.ProviderID
+	if providerID == "" {
+		var err error
+		providerID, err = cloudprovider.GetInstanceProviderID(ctx, az, types.NodeName(node.Name))
+		if err != nil {
+			klog.Errorf("InstanceExists: failed to get the provider ID by node name %s: %v", node.Name, err)
+			return false, err
+		}
+	}
+
+	return az.InstanceExistsByProviderID(ctx, providerID)
+}
+
+// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
+func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
+	if providerID == "" {
+		return false, nil
+	}
+	if az.VMSet == nil {
+		// vmSet == nil indicates credentials are not provided.
+		return false, fmt.Errorf("no credentials provided for Azure cloud provider")
+	}
+
+	nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return false, nil
+		}
+
+		return false, err
+	}
+
+	powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName))
+	if err != nil {
+		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return false, nil
+		}
+
+		return false, err
+	}
+	klog.V(3).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
+
+	provisioningState, err := az.VMSet.GetProvisioningStateByNodeName(string(nodeName))
+	if err != nil {
+		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return false, nil
+		}
+
+		return false, err
+	}
+	klog.V(3).Infof("InstanceShutdownByProviderID gets provisioning state %q for node %q", provisioningState, nodeName)
+
+	status := strings.ToLower(powerStatus)
+	provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(compute.ProvisioningStateSucceeded)))
+	return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil
+}
+
+// InstanceShutdown returns true if the instance is shutdown according to the cloud provider.
+// Use the node.name or node.spec.providerID field to find the node in the cloud provider.
+func (az *Cloud) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, error) {
+	if node == nil {
+		return false, nil
+	}
+	providerID := node.Spec.ProviderID
+	if providerID == "" {
+		var err error
+		providerID, err = cloudprovider.GetInstanceProviderID(ctx, az, types.NodeName(node.Name))
+		if err != nil {
+			// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
+			if strings.Contains(err.Error(), cloudprovider.InstanceNotFound.Error()) {
+				return false, nil
+			}
+
+			klog.Errorf("InstanceShutdown: failed to get the provider ID by node name %s: %v", node.Name, err)
+			return false, err
+		}
+	}
+
+	return az.InstanceShutdownByProviderID(ctx, providerID)
+}
+
+func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
+	var err error
+	nodeName := mapNodeNameToVMName(name)
+
+	// VMSS vmName is not same with hostname, use hostname instead.
+	if az.VMType == consts.VMTypeVMSS {
+		metadataVMName, err = os.Hostname()
+		if err != nil {
+			return false, err
+		}
+
+		// Use name from env variable "NODE_NAME" if it is set.
+		nodeNameEnv := os.Getenv(nodeNameEnvironmentName)
+		if nodeNameEnv != "" {
+			metadataVMName = nodeNameEnv
+		}
+	}
+
+	metadataVMName = strings.ToLower(metadataVMName)
+	return metadataVMName == nodeName, nil
+}
+
+// InstanceID returns the cloud provider ID of the specified instance.
+// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
+func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
+	nodeName := mapNodeNameToVMName(name)
+	unmanaged, err := az.IsNodeUnmanaged(nodeName)
+	if err != nil {
+		return "", err
+	}
+	if unmanaged {
+		// InstanceID is same with nodeName for unmanaged nodes.
+		klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
+		return nodeName, nil
+	}
+
+	if az.UseInstanceMetadata {
+		metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
+		if err != nil {
+			return "", err
+		}
+
+		if metadata.Compute == nil {
+			return "", fmt.Errorf("failure of getting instance metadata")
+		}
+
+		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
+		if err != nil {
+			return "", err
+		}
+
+		// Not local instance, get instanceID from Azure ARM API.
+		if !isLocalInstance {
+			if az.VMSet != nil {
+				return az.VMSet.GetInstanceIDByNodeName(nodeName)
+			}
+
+			// vmSet == nil indicates credentials are not provided.
+			return "", fmt.Errorf("no credentials provided for Azure cloud provider")
+		}
+		return az.getLocalInstanceProviderID(metadata, nodeName)
+	}
+
+	return az.VMSet.GetInstanceIDByNodeName(nodeName)
+}
+
+func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) {
+	// Get resource group name and subscription ID.
+	resourceGroup := strings.ToLower(metadata.Compute.ResourceGroup)
+	subscriptionID := strings.ToLower(metadata.Compute.SubscriptionID)
+
+	// Compose instanceID based on nodeName for standard instance.
+	if metadata.Compute.VMScaleSetName == "" {
+		return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
+	}
+
+	// Get scale set name and instanceID from vmName for vmss.
+	ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
+	if err != nil {
+		if errors.Is(err, ErrorNotVmssInstance) {
+			// Compose machineID for standard Node.
+			return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
+		}
+		return "", err
+	}
+	// Compose instanceID based on ssName and instanceID for vmss instance.
+	return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil
+}
+
+// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
+	if providerID == "" {
+		return "", errNodeNotInitialized
+	}
+
+	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	if az.IsNodeUnmanagedByProviderID(providerID) {
+		klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
+		return "", nil
+	}
+
+	if az.VMSet == nil {
+		// vmSet == nil indicates credentials are not provided.
+		return "", fmt.Errorf("no credentials provided for Azure cloud provider")
+	}
+
+	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		return "", err
+	}
+
+	return az.InstanceType(ctx, name)
+}
+
+// InstanceType returns the type of the specified instance.
+// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
+// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
+//       Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
+func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
+	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	unmanaged, err := az.IsNodeUnmanaged(string(name))
+	if err != nil {
+		return "", err
+	}
+	if unmanaged {
+		klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
+		return "", nil
+	}
+
+	if az.UseInstanceMetadata {
+		metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
+		if err != nil {
+			return "", err
+		}
+
+		if metadata.Compute == nil {
+			return "", fmt.Errorf("failure of getting instance metadata")
+		}
+
+		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
+		if err != nil {
+			return "", err
+		}
+		if !isLocalInstance {
+			if az.VMSet != nil {
+				return az.VMSet.GetInstanceTypeByNodeName(string(name))
+			}
+
+			// vmSet == nil indicates credentials are not provided.
+			return "", fmt.Errorf("no credentials provided for Azure cloud provider")
+		}
+
+		if metadata.Compute.VMSize != "" {
+			return metadata.Compute.VMSize, nil
+		}
+	}
+
+	if az.VMSet == nil {
+		// vmSet == nil indicates credentials are not provided.
+		return "", fmt.Errorf("no credentials provided for Azure cloud provider")
+	}
+
+	return az.VMSet.GetInstanceTypeByNodeName(string(name))
+}
+
+// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
+// expected format for the key is standard ssh-keygen format: <protocol> <blob>
+func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
+	return cloudprovider.NotImplemented
+}
+
+// CurrentNodeName returns the name of the node we are currently running on.
+// On Azure this is the hostname, so we just return the hostname.
+func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
+	return types.NodeName(hostname), nil
+}
+
+// InstanceMetadata returns the instance's metadata. The values returned in InstanceMetadata are
+// translated into specific fields in the Node object on registration.
+// Use the node.name or node.spec.providerID field to find the node in the cloud provider.
+func (az *Cloud) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloudprovider.InstanceMetadata, error) {
+	if node == nil {
+		return &cloudprovider.InstanceMetadata{}, nil
+	}
+
+	meta := cloudprovider.InstanceMetadata{}
+
+	if node.Spec.ProviderID != "" {
+		meta.ProviderID = node.Spec.ProviderID
+	} else {
+		providerID, err := cloudprovider.GetInstanceProviderID(ctx, az, types.NodeName(node.Name))
+		if err != nil {
+			klog.Errorf("InstanceMetadata: failed to get the provider ID by node name %s: %v", node.Name, err)
+			return nil, err
+		}
+		meta.ProviderID = providerID
+	}
+
+	instanceType, err := az.InstanceType(ctx, types.NodeName(node.Name))
+	if err != nil {
+		klog.Errorf("InstanceMetadata: failed to get the instance type of %s: %v", node.Name, err)
+		return &cloudprovider.InstanceMetadata{}, err
+	}
+	meta.InstanceType = instanceType
+
+	nodeAddresses, err := az.NodeAddresses(ctx, types.NodeName(node.Name))
+	if err != nil {
+		klog.Errorf("InstanceMetadata: failed to get the node address of %s: %v", node.Name, err)
+		return &cloudprovider.InstanceMetadata{}, err
+	}
+	meta.NodeAddresses = nodeAddresses
+
+	zone, err := az.GetZoneByNodeName(ctx, types.NodeName(node.Name))
+	if err != nil {
+		klog.Errorf("InstanceMetadata: failed to get the node zone of %s: %v", node.Name, err)
+		return &cloudprovider.InstanceMetadata{}, err
+	}
+	meta.Zone = zone.FailureDomain
+	meta.Region = zone.Region
+
+	return &meta, nil
+}
+
+// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
+// This is a simple string cast.
+func mapNodeNameToVMName(nodeName types.NodeName) string {
+	return string(nodeName)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
new file mode 100644
index 0000000000000000000000000000000000000000..0cc786ffe22aec2847e88a1be3ec078ce12f691d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
@@ -0,0 +1,3201 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+	cloudprovider "k8s.io/cloud-provider"
+	servicehelpers "k8s.io/cloud-provider/service/helpers"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// GetLoadBalancer returns whether the specified load balancer and its components exist, and
+// if so, what its status is.
+func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
+	// Since public IP is not a part of the load balancer on Azure,
+	// there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571).
+	// We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure.
+	existsPip := func() bool {
+		pipName, _, err := az.determinePublicIPName(clusterName, service)
+		if err != nil {
+			return false
+		}
+		pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+		_, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
+		if err != nil {
+			return false
+		}
+		return existsPip
+	}()
+
+	_, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, []network.LoadBalancer{})
+	if err != nil {
+		return nil, existsPip, err
+	}
+
+	// Return exists = false only if the load balancer and the public IP are not found on Azure
+	if !existsLb && !existsPip {
+		serviceName := getServiceName(service)
+		klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
+		return nil, false, nil
+	}
+
+	// Return exists = true if either the load balancer or the public IP (or both) exists
+	return status, true, nil
+}
+
+func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) {
+	if labelName, found := service.Annotations[consts.ServiceAnnotationDNSLabelName]; found {
+		return labelName, found
+	}
+	return "", false
+}
+
+// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
+func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
+	// When a client updates the internal load balancer annotation,
+	// the service may be switched from an internal LB to a public one, or vise versa.
+	// Here we'll firstly ensure service do not lie in the opposite LB.
+	serviceName := getServiceName(service)
+	klog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q, service: %v", serviceName, clusterName, service)
+
+	mc := metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.SubscriptionID, serviceName)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
+	if err != nil {
+		klog.Errorf("reconcileLoadBalancer(%s) failed: %v", serviceName, err)
+		return nil, err
+	}
+
+	lbStatus, _, err := az.getServiceLoadBalancerStatus(service, lb)
+	if err != nil {
+		klog.Errorf("getServiceLoadBalancerStatus(%s) failed: %v", serviceName, err)
+		return nil, err
+	}
+
+	var serviceIP *string
+	if lbStatus != nil && len(lbStatus.Ingress) > 0 {
+		serviceIP = &lbStatus.Ingress[0].IP
+	}
+	klog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
+	if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
+		klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err)
+		return nil, err
+	}
+
+	updateService := updateServiceLoadBalancerIP(service, to.String(serviceIP))
+	flippedService := flipServiceInternalAnnotation(updateService)
+	if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil {
+		klog.Errorf("reconcileLoadBalancer(%s) failed: %#v", serviceName, err)
+		return nil, err
+	}
+
+	// lb is not reused here because the ETAG may be changed in above operations, hence reconcilePublicIP() would get lb again from cache.
+	klog.V(2).Infof("EnsureLoadBalancer: reconciling pip")
+	if _, err := az.reconcilePublicIP(clusterName, updateService, to.String(lb.Name), true /* wantLb */); err != nil {
+		klog.Errorf("reconcilePublicIP(%s) failed: %#v", serviceName, err)
+		return nil, err
+	}
+
+	isOperationSucceeded = true
+	return lbStatus, nil
+}
+
+// UpdateLoadBalancer updates hosts under the specified load balancer.
+func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
+	shouldUpdateLB, err := az.shouldUpdateLoadBalancer(clusterName, service, nodes)
+	if err != nil {
+		return err
+	}
+
+	if !shouldUpdateLB {
+		klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name)
+		return nil
+	}
+
+	_, err = az.EnsureLoadBalancer(ctx, clusterName, service, nodes)
+	return err
+}
+
+// EnsureLoadBalancerDeleted deletes the specified load balancer if it
+// exists, returning nil if the load balancer specified either didn't exist or
+// was successfully deleted.
+// This construction is useful because many cloud providers' load balancers
+// have multiple underlying components, meaning a Get could say that the LB
+// doesn't exist even if some part of it is still laying around.
+func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
+	isInternal := requiresInternalLoadBalancer(service)
+	serviceName := getServiceName(service)
+	klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName)
+
+	mc := metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.SubscriptionID, serviceName)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
+	if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
+		return err
+	}
+
+	klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
+	if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
+		return err
+	}
+
+	if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
+		return err
+	}
+
+	if _, err := az.reconcilePublicIP(clusterName, service, "", false /* wantLb */); err != nil {
+		return err
+	}
+
+	klog.V(2).Infof("Delete service (%s): FINISH", serviceName)
+	isOperationSucceeded = true
+
+	return nil
+}
+
+// GetLoadBalancerName returns the LoadBalancer name.
+func (az *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
+	return cloudprovider.DefaultLoadBalancerName(service)
+}
+
+func (az *Cloud) getLoadBalancerResourceGroup() string {
+	if az.LoadBalancerResourceGroup != "" {
+		return az.LoadBalancerResourceGroup
+	}
+
+	return az.ResourceGroup
+}
+
+// shouldChangeLoadBalancer determines if the load balancer of the service should be switched to another one
+// according to the mode annotation on the service. This could be happened when the LB selection mode of an
+// existing service is changed to another VMSS/VMAS.
+func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clusterName string) bool {
+	hasMode, isAuto, vmSetName := az.getServiceLoadBalancerMode(service)
+
+	// if no mode is given or the mode is `__auto__`, the current LB should be kept
+	if !hasMode || isAuto {
+		return false
+	}
+
+	// if using the single standard load balancer, the current LB should be kept
+	useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
+	if useSingleSLB {
+		return false
+	}
+
+	// if the current LB is what we want, keep it
+	lbName := strings.TrimSuffix(currLBName, consts.InternalLoadBalancerNameSuffix)
+	if strings.EqualFold(lbName, vmSetName) {
+		return false
+	}
+	if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) && strings.EqualFold(clusterName, lbName) {
+		return false
+	}
+
+	// if the vmSet selected by the annotation is sharing the primary slb, and the service
+	// has been associated to the primary slb, keep it
+	useMultipleSLBs := az.useStandardLoadBalancer() && az.EnableMultipleStandardLoadBalancers
+	if useMultipleSLBs &&
+		az.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(vmSetName)) &&
+		strings.EqualFold(lbName, clusterName) {
+		return false
+	}
+
+	// if the VMSS/VMAS of the current LB is different from the mode, change the LB
+	// to another one
+	klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName)
+	return true
+}
+
+func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, fip *network.FrontendIPConfiguration, clusterName string, service *v1.Service) error {
+	if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
+		return nil
+	}
+	fipConfigs := *lb.FrontendIPConfigurations
+	for i, fipConfig := range fipConfigs {
+		if strings.EqualFold(to.String(fipConfig.Name), to.String(fip.Name)) {
+			fipConfigs = append(fipConfigs[:i], fipConfigs[i+1:]...)
+			break
+		}
+	}
+	lb.FrontendIPConfigurations = &fipConfigs
+
+	// also remove the corresponding rules/probes
+	if lb.LoadBalancingRules != nil {
+		lbRules := *lb.LoadBalancingRules
+		for i := len(lbRules) - 1; i >= 0; i-- {
+			if strings.Contains(to.String(lbRules[i].Name), to.String(fip.Name)) {
+				lbRules = append(lbRules[:i], lbRules[i+1:]...)
+			}
+		}
+		lb.LoadBalancingRules = &lbRules
+	}
+	if lb.Probes != nil {
+		lbProbes := *lb.Probes
+		for i := len(lbProbes) - 1; i >= 0; i-- {
+			if strings.Contains(to.String(lbProbes[i].Name), to.String(fip.Name)) {
+				lbProbes = append(lbProbes[:i], lbProbes[i+1:]...)
+			}
+		}
+		lb.Probes = &lbProbes
+	}
+
+	if len(fipConfigs) == 0 {
+		klog.V(2).Infof("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): deleting load balancer because there is no remaining frontend IP configurations", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name)
+		err := az.cleanOrphanedLoadBalancer(lb, existingLBs, service, clusterName)
+		if err != nil {
+			klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to cleanupOrphanedLoadBalancer: %v", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name, err)
+			return err
+		}
+	} else {
+		klog.V(2).Infof("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): updating the load balancer", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name)
+		err := az.CreateOrUpdateLB(service, *lb)
+		if err != nil {
+			klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to CreateOrUpdateLB: %v", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name, err)
+			return err
+		}
+		_ = az.lbCache.Delete(to.String(lb.Name))
+	}
+	return nil
+}
+
+func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, service *v1.Service, clusterName string) error {
+	lbName := to.String(lb.Name)
+	serviceName := getServiceName(service)
+	isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service)
+	lbResourceGroup := az.getLoadBalancerResourceGroup()
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	lbBackendPoolID := az.getBackendPoolID(lbName, lbResourceGroup, lbBackendPoolName)
+	if isBackendPoolPreConfigured {
+		klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): ignore cleanup of dirty lb because the lb is pre-configured", lbName, serviceName, clusterName)
+	} else {
+		foundLB := false
+		for _, existingLB := range existingLBs {
+			if strings.EqualFold(to.String(lb.Name), to.String(existingLB.Name)) {
+				foundLB = true
+				break
+			}
+		}
+		if !foundLB {
+			klog.V(2).Infof("cleanOrphanedLoadBalancer: the LB %s doesn't exist, will not delete it", to.String(lb.Name))
+			return nil
+		}
+
+		// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
+		// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
+		klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): deleting the LB since there are no remaining frontendIPConfigurations", lbName, serviceName, clusterName)
+
+		// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
+		vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
+		if _, ok := az.VMSet.(*availabilitySet); ok {
+			// do nothing for availability set
+			lb.BackendAddressPools = nil
+		}
+
+		deleteErr := az.safeDeleteLoadBalancer(*lb, clusterName, vmSetName, service)
+		if deleteErr != nil {
+			klog.Warningf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to DeleteLB: %v", lbName, serviceName, clusterName, deleteErr)
+
+			rgName, vmssName, parseErr := retry.GetVMSSMetadataByRawError(deleteErr)
+			if parseErr != nil {
+				klog.Warningf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to parse error: %v", lbName, serviceName, clusterName, parseErr)
+				return deleteErr.Error()
+			}
+			if rgName == "" || vmssName == "" {
+				klog.Warningf("cleanOrphanedLoadBalancer(%s, %s, %s): empty rgName or vmssName", lbName, serviceName, clusterName)
+				return deleteErr.Error()
+			}
+
+			// if we reach here, it means the VM couldn't be deleted because it is being referenced by a VMSS
+			if _, ok := az.VMSet.(*ScaleSet); !ok {
+				klog.Warningf("cleanOrphanedLoadBalancer(%s, %s, %s): unexpected VMSet type, expected VMSS", lbName, serviceName, clusterName)
+				return deleteErr.Error()
+			}
+
+			if !strings.EqualFold(rgName, az.ResourceGroup) {
+				return fmt.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): the VMSS %s is in the resource group %s, but is referencing the LB in %s", lbName, serviceName, clusterName, vmssName, rgName, az.ResourceGroup)
+			}
+
+			vmssNamesMap := map[string]bool{vmssName: true}
+			err := az.VMSet.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, lbBackendPoolID)
+			if err != nil {
+				klog.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to EnsureBackendPoolDeletedFromVMSets: %v", lbName, serviceName, clusterName, err)
+				return err
+			}
+
+			deleteErr := az.DeleteLB(service, lbName)
+			if deleteErr != nil {
+				klog.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): failed delete lb for the second time, stop retrying: %v", lbName, serviceName, clusterName, deleteErr)
+				return deleteErr.Error()
+			}
+		}
+		klog.V(10).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): az.DeleteLB finished", lbName, serviceName, clusterName)
+	}
+	return nil
+}
+
+// safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet
+func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vmSetName string, service *v1.Service) *retry.Error {
+	if strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) {
+		lbBackendPoolID := az.getBackendPoolID(to.String(lb.Name), az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
+		err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
+		if err != nil {
+			return retry.NewError(false, fmt.Errorf("safeDeleteLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err))
+		}
+	}
+
+	klog.V(2).Infof("safeDeleteLoadBalancer: deleting LB %s because the corresponding vmSet is supposed to be in the primary SLB", to.String(lb.Name))
+	rerr := az.DeleteLB(service, to.String(lb.Name))
+	if rerr != nil {
+		return rerr
+	}
+	_ = az.lbCache.Delete(to.String(lb.Name))
+
+	return nil
+}
+
+func extractBackendIPConfigurationIDsFromLB(lb network.LoadBalancer, lbBackendPoolName string) []string {
+	result := make([]string, 0)
+	if lb.LoadBalancerPropertiesFormat != nil &&
+		lb.BackendAddressPools != nil {
+		for i := 0; i < len(*lb.BackendAddressPools); i++ {
+			backendPool := (*lb.BackendAddressPools)[i]
+			if strings.EqualFold(to.String(backendPool.Name), lbBackendPoolName) {
+				if backendPool.BackendAddressPoolPropertiesFormat != nil &&
+					backendPool.BackendIPConfigurations != nil {
+					for _, ipConfiguration := range *backendPool.BackendIPConfigurations {
+						if ipConfiguration.ID != nil {
+							result = append(result, to.String(ipConfiguration.ID))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return result
+}
+
+// reconcileSharedLoadBalancer deletes the dedicated SLBs of the non-primary vmSets. There are
+// two scenarios where this operation is needed:
+// 1. Using multiple slbs and the vmSet is supposed to share the primary slb.
+// 2. When migrating from multiple slbs to single slb mode.
+// It also ensures those vmSets are joint the backend pools of the primary SLBs.
+// It runs only once everytime the cloud controller manager restarts.
+func (az *Cloud) reconcileSharedLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node) ([]network.LoadBalancer, error) {
+	var (
+		primarySLBs, existingLBs []network.LoadBalancer
+		changed                  bool
+		err                      error
+	)
+
+	existingLBs, err = az.ListManagedLBs(service, nodes, clusterName)
+	if err != nil {
+		return nil, fmt.Errorf("reconcileSharedLoadBalancer: failed to list managed LB: %w", err)
+	}
+
+	// only run once since the controller manager rebooted
+	if az.isSharedLoadBalancerSynced {
+		return existingLBs, nil
+	}
+	defer func() {
+		if err == nil {
+			az.isSharedLoadBalancerSynced = true
+		}
+	}()
+
+	// skip if the cluster is using basic LB
+	if !az.useStandardLoadBalancer() {
+		return existingLBs, nil
+	}
+
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	lbNamesToBeDeleted := sets.NewString()
+	// 1: delete unwanted LBs
+	for _, lb := range existingLBs {
+		lbNamePrefix := strings.TrimSuffix(to.String(lb.Name), consts.InternalLoadBalancerNameSuffix)
+
+		// skip the internal or external primary load balancer
+		if strings.EqualFold(lbNamePrefix, clusterName) {
+			primarySLBs = append(primarySLBs, lb)
+			continue
+		}
+
+		// skip if the multiple slbs mode is enabled and
+		// the vmSet is supposed to have dedicated SLBs
+		vmSetName := strings.ToLower(az.mapLoadBalancerNameToVMSet(to.String(lb.Name), clusterName))
+		if az.EnableMultipleStandardLoadBalancers && !az.getVMSetNamesSharingPrimarySLB().Has(vmSetName) {
+			continue
+		}
+
+		// For non-primary load balancer, the lb name is the name of the VMSet.
+		// If the VMSet name is in az.NodePoolsWithoutDedicatedSLB, we should
+		// decouple the VMSet from the lb and delete the lb. Then adding the VMSet
+		// to the backend pool of the primary slb.
+		rerr := az.safeDeleteLoadBalancer(lb, clusterName, vmSetName, service)
+		if rerr != nil {
+			return nil, rerr.Error()
+		}
+
+		// remove the deleted lb from the list and construct a new primary
+		// lb, so that getServiceLoadBalancer doesn't have to call list api again
+		lbNamesToBeDeleted.Insert(strings.ToLower(to.String(lb.Name)))
+		changed = true
+	}
+
+	if !changed {
+		klog.V(4).Infof("reconcileSharedLoadBalancer: no changes made, return now")
+		return existingLBs, nil
+	}
+
+	vmSetsToBeMovedToPrimarySLB := sets.NewString()
+	ipConfigIDsToBeAddedToPrimarySLB := sets.NewString()
+	// 2: add nodes to the backend pool of the primary SLBs
+	for i := len(existingLBs) - 1; i >= 0; i-- {
+		lb := existingLBs[i]
+		if !lbNamesToBeDeleted.Has(strings.ToLower(to.String(lb.Name))) {
+			continue
+		}
+
+		vmSetName := strings.ToLower(az.mapLoadBalancerNameToVMSet(to.String(lb.Name), clusterName))
+		vmSetsToBeMovedToPrimarySLB.Insert(vmSetName)
+		isInternalLB := strings.HasSuffix(to.String(lb.Name), consts.InternalLoadBalancerNameSuffix)
+		primarySLBName := clusterName
+		if isInternalLB {
+			primarySLBName = fmt.Sprintf("%s%s", clusterName, consts.InternalLoadBalancerNameSuffix)
+		}
+		primaryLBBackendPoolID := az.getBackendPoolID(primarySLBName, az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
+
+		klog.V(2).Infof("reconcileSharedLoadBalancer: binding the vmSet %s to the backend pool %s", vmSetName, primaryLBBackendPoolID)
+		if strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) {
+			err = az.VMSet.EnsureHostsInPool(service, nodes, primaryLBBackendPoolID, vmSetName)
+			if err != nil {
+				return nil, fmt.Errorf("reconcileSharedLoadBalancer: failed to EnsureHostsInPool: %w", err)
+			}
+
+			for _, id := range extractBackendIPConfigurationIDsFromLB(lb, lbBackendPoolName) {
+				ipConfigIDsToBeAddedToPrimarySLB.Insert(id)
+			}
+		}
+
+		// remove the deleted LB from the list
+		existingLBs = append(existingLBs[:i], existingLBs[i+1:]...)
+	}
+
+	for _, primarySLB := range primarySLBs {
+		if primarySLB.LoadBalancerPropertiesFormat != nil &&
+			primarySLB.BackendAddressPools != nil {
+			for i := 0; i < len(*primarySLB.BackendAddressPools); i++ {
+				if strings.EqualFold(to.String((*primarySLB.BackendAddressPools)[i].Name), lbBackendPoolName) {
+					if az.isLBBackendPoolTypeNodeIPConfig() {
+						backendPoolIPConfigs := (*primarySLB.BackendAddressPools)[i].BackendIPConfigurations
+						for _, id := range ipConfigIDsToBeAddedToPrimarySLB.List() {
+							*backendPoolIPConfigs = append(*backendPoolIPConfigs, network.InterfaceIPConfiguration{
+								ID: to.StringPtr(id),
+							})
+						}
+					} else if az.isLBBackendPoolTypeNodeIP() {
+						backendPool := (*primarySLB.BackendAddressPools)[i]
+						if backendPool.LoadBalancerBackendAddresses == nil {
+							lbBackendPoolAddresses := make([]network.LoadBalancerBackendAddress, 0)
+							backendPool.LoadBalancerBackendAddresses = &lbBackendPoolAddresses
+						}
+
+						if err := az.LoadBalancerBackendPool.EnsureHostsInPool(service, nodes, "", "", clusterName, to.String(primarySLB.Name), backendPool); err != nil {
+							return nil, fmt.Errorf("reconcileSharedLoadBalancer: failed to EnsureHostsInPool: %w", err)
+						}
+
+						(*primarySLB.BackendAddressPools)[i] = backendPool
+					}
+
+					break
+				}
+			}
+		}
+	}
+
+	for i, existingLB := range existingLBs {
+		for _, primarySLB := range primarySLBs {
+			if strings.EqualFold(to.String(existingLB.Name), to.String(primarySLB.Name)) {
+				// Proactively disable the etag to prevent etag mismatch error when putting lb later.
+				// This could happen because when we remove the hosts from the lb, the nrp
+				// would put the lb to remove the backend references as well.
+				primarySLB.Etag = nil
+
+				existingLBs[i] = primarySLB
+			}
+		}
+	}
+
+	return existingLBs, nil
+}
+
+// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
+// If wantLb is TRUE then -it selects a new load balancer.
+// In case the selected load balancer does not exist it returns network.LoadBalancer struct
+// with added metadata (such as name, location) and existsLB set to FALSE.
+// By default - cluster default LB is returned.
+func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool, existingLBs []network.LoadBalancer) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
+	isInternal := requiresInternalLoadBalancer(service)
+	var defaultLB *network.LoadBalancer
+	primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
+	defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
+	useMultipleSLBs := az.useStandardLoadBalancer() && az.EnableMultipleStandardLoadBalancers
+
+	// reuse the lb list from reconcileSharedLoadBalancer to reduce the api call
+	if len(existingLBs) == 0 {
+		existingLBs, err = az.ListLB(service)
+		if err != nil {
+			return nil, nil, false, err
+		}
+	}
+
+	// check if the service already has a load balancer
+	for i := range existingLBs {
+		existingLB := existingLBs[i]
+		existingLBNamePrefix := strings.TrimSuffix(to.String(existingLB.Name), consts.InternalLoadBalancerNameSuffix)
+
+		// for the primary standard load balancer (internal or external), when enabled multiple slbs
+		if strings.EqualFold(existingLBNamePrefix, clusterName) && useMultipleSLBs {
+			shouldRemoveVMSetFromSLB := func(vmSetName string) bool {
+				// not removing the vmSet from the primary SLB
+				// if it is supposed to share the primary SLB.
+				if az.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(vmSetName)) {
+					return false
+				}
+
+				// removing the vmSet from the primary SLB if
+				// it is not the primary vmSet. There are two situations:
+				// 1. when migrating from single SLB to multiple SLBs, we
+				// need to remove all non-primary vmSets from the primary SLB;
+				// 2. when migrating from shared mode to dedicated SLB, we
+				// need to remove the specific vmSet from the primary SLB.
+				return !strings.EqualFold(vmSetName, primaryVMSetName) && vmSetName != ""
+			}
+			cleanedLB, err := az.LoadBalancerBackendPool.CleanupVMSetFromBackendPoolByCondition(&existingLB, service, nodes, clusterName, shouldRemoveVMSetFromSLB)
+			if err != nil {
+				return nil, nil, false, err
+			}
+			existingLB = *cleanedLB
+			existingLBs[i] = *cleanedLB
+		}
+		if strings.EqualFold(*existingLB.Name, defaultLBName) {
+			defaultLB = &existingLB
+		}
+		if isInternalLoadBalancer(&existingLB) != isInternal {
+			continue
+		}
+		status, fipConfig, err := az.getServiceLoadBalancerStatus(service, &existingLB)
+		if err != nil {
+			return nil, nil, false, err
+		}
+		if status == nil {
+			// service is not on this load balancer
+			continue
+		}
+
+		// select another load balancer instead of returning
+		// the current one if the change is needed
+		if wantLb && az.shouldChangeLoadBalancer(service, to.String(existingLB.Name), clusterName) {
+			if err := az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, existingLBs, fipConfig, clusterName, service); err != nil {
+				klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configuration from load balancer: %v", service.Name, clusterName, wantLb, err)
+				return nil, nil, false, err
+			}
+			break
+		}
+
+		return &existingLB, status, true, nil
+	}
+
+	// Service does not have a load balancer, select one.
+	// Single standard load balancer doesn't need this because
+	// all backends nodes should be added to same LB.
+	useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
+	if wantLb && !useSingleSLB {
+		// select new load balancer for service
+		selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes)
+		if err != nil {
+			return nil, nil, false, err
+		}
+
+		return selectedLB, nil, exists, err
+	}
+
+	// create a default LB with meta data if not present
+	if defaultLB == nil {
+		defaultLB = &network.LoadBalancer{
+			Name:                         &defaultLBName,
+			Location:                     &az.Location,
+			LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
+		}
+		if az.useStandardLoadBalancer() {
+			defaultLB.Sku = &network.LoadBalancerSku{
+				Name: network.LoadBalancerSkuNameStandard,
+			}
+		}
+		if az.HasExtendedLocation() {
+			defaultLB.ExtendedLocation = &network.ExtendedLocation{
+				Name: &az.ExtendedLocationName,
+				Type: getExtendedLocationTypeFromString(az.ExtendedLocationType),
+			}
+		}
+	}
+
+	return defaultLB, nil, false, nil
+}
+
+// selectLoadBalancer selects load balancer for the service in the cluster.
+// The selection algorithm selects the load balancer which currently has
+// the minimum lb rules. If there are multiple LBs with same number of rules,
+// then selects the first one (sorted based on name).
+func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
+	isInternal := requiresInternalLoadBalancer(service)
+	serviceName := getServiceName(service)
+	klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
+	vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes)
+	if err != nil {
+		klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
+		return nil, false, err
+	}
+	klog.V(2).Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
+
+	mapExistingLBs := map[string]network.LoadBalancer{}
+	for _, lb := range *existingLBs {
+		mapExistingLBs[*lb.Name] = lb
+	}
+	selectedLBRuleCount := math.MaxInt32
+	for _, currVMSetName := range *vmSetNames {
+		currLBName := az.getAzureLoadBalancerName(clusterName, currVMSetName, isInternal)
+		lb, exists := mapExistingLBs[currLBName]
+		if !exists {
+			// select this LB as this is a new LB and will have minimum rules
+			// create tmp lb struct to hold metadata for the new load-balancer
+			var loadBalancerSKU network.LoadBalancerSkuName
+			if az.useStandardLoadBalancer() {
+				loadBalancerSKU = network.LoadBalancerSkuNameStandard
+			} else {
+				loadBalancerSKU = network.LoadBalancerSkuNameBasic
+			}
+			selectedLB = &network.LoadBalancer{
+				Name:                         &currLBName,
+				Location:                     &az.Location,
+				Sku:                          &network.LoadBalancerSku{Name: loadBalancerSKU},
+				LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
+			}
+			if az.HasExtendedLocation() {
+				selectedLB.ExtendedLocation = &network.ExtendedLocation{
+					Name: &az.ExtendedLocationName,
+					Type: getExtendedLocationTypeFromString(az.ExtendedLocationType),
+				}
+			}
+
+			return selectedLB, false, nil
+		}
+
+		lbRules := *lb.LoadBalancingRules
+		currLBRuleCount := 0
+		if lbRules != nil {
+			currLBRuleCount = len(lbRules)
+		}
+		if currLBRuleCount < selectedLBRuleCount {
+			selectedLBRuleCount = currLBRuleCount
+			selectedLB = &lb
+		}
+	}
+
+	if selectedLB == nil {
+		err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames)
+		klog.Error(err)
+		return nil, false, err
+	}
+	// validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount
+	if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount {
+		err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) -  all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames)
+		klog.Error(err)
+		return selectedLB, existsLb, err
+	}
+
+	return selectedLB, existsLb, nil
+}
+
+func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, fipConfig *network.FrontendIPConfiguration, err error) {
+	if lb == nil {
+		klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
+		return nil, nil, nil
+	}
+	if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
+		klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
+		return nil, nil, nil
+	}
+	isInternal := requiresInternalLoadBalancer(service)
+	serviceName := getServiceName(service)
+	for _, ipConfiguration := range *lb.FrontendIPConfigurations {
+		owns, isPrimaryService, err := az.serviceOwnsFrontendIP(ipConfiguration, service)
+		if err != nil {
+			return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to filter frontend IP configs with error: %w", serviceName, to.String(lb.Name), err)
+		}
+		if owns {
+			klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, to.String(lb.Name), isPrimaryService)
+
+			var lbIP *string
+			if isInternal {
+				lbIP = ipConfiguration.PrivateIPAddress
+			} else {
+				if ipConfiguration.PublicIPAddress == nil {
+					return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress is Nil", serviceName, *lb.Name)
+				}
+				pipID := ipConfiguration.PublicIPAddress.ID
+				if pipID == nil {
+					return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name)
+				}
+				pipName, err := getLastSegment(*pipID, "/")
+				if err != nil {
+					return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID)
+				}
+				pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName)
+				if err != nil {
+					return nil, nil, err
+				}
+				if existsPip {
+					lbIP = pip.IPAddress
+				}
+			}
+
+			klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), to.String(ipConfiguration.Name), serviceName)
+
+			// set additional public IPs to LoadBalancerStatus, so that kube-proxy would create their iptables rules.
+			lbIngress := []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}
+			additionalIPs, err := getServiceAdditionalPublicIPs(service)
+			if err != nil {
+				return &v1.LoadBalancerStatus{Ingress: lbIngress}, &ipConfiguration, err
+			}
+			if len(additionalIPs) > 0 {
+				for _, pip := range additionalIPs {
+					lbIngress = append(lbIngress, v1.LoadBalancerIngress{
+						IP: pip,
+					})
+				}
+			}
+
+			return &v1.LoadBalancerStatus{Ingress: lbIngress}, &ipConfiguration, nil
+		}
+	}
+
+	return nil, nil, nil
+}
+
+func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, bool, error) {
+	var shouldPIPExisted bool
+	if name, found := service.Annotations[consts.ServiceAnnotationPIPName]; found && name != "" {
+		shouldPIPExisted = true
+		return name, shouldPIPExisted, nil
+	}
+
+	pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+	loadBalancerIP := service.Spec.LoadBalancerIP
+
+	// Assume that the service without loadBalancerIP set is a primary service.
+	// If a secondary service doesn't set the loadBalancerIP, it is not allowed to share the IP.
+	if len(loadBalancerIP) == 0 {
+		return az.getPublicIPName(clusterName, service), shouldPIPExisted, nil
+	}
+
+	// For the services with loadBalancerIP set, an existing public IP is required, primary
+	// or secondary, or a public IP not found error would be reported.
+	pip, err := az.findMatchedPIPByLoadBalancerIP(service, loadBalancerIP, pipResourceGroup)
+	if err != nil {
+		return "", shouldPIPExisted, err
+	}
+
+	if pip != nil && pip.Name != nil {
+		return *pip.Name, shouldPIPExisted, nil
+	}
+
+	return "", shouldPIPExisted, fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup)
+}
+
+func (az *Cloud) findMatchedPIPByLoadBalancerIP(service *v1.Service, loadBalancerIP, pipResourceGroup string) (*network.PublicIPAddress, error) {
+	pips, err := az.ListPIP(service, pipResourceGroup)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, pip := range pips {
+		if pip.PublicIPAddressPropertiesFormat.IPAddress != nil &&
+			*pip.PublicIPAddressPropertiesFormat.IPAddress == loadBalancerIP {
+			return &pip, nil
+		}
+	}
+
+	return nil, fmt.Errorf("findMatchedPIPByLoadBalancerIP: cannot find public IP with IP address %s in resource group %s", loadBalancerIP, pipResourceGroup)
+}
+
+func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
+	copyService := service.DeepCopy()
+	if copyService.Annotations == nil {
+		copyService.Annotations = map[string]string{}
+	}
+	if v, ok := copyService.Annotations[consts.ServiceAnnotationLoadBalancerInternal]; ok && v == consts.TrueAnnotationValue {
+		// If it is internal now, we make it external by remove the annotation
+		delete(copyService.Annotations, consts.ServiceAnnotationLoadBalancerInternal)
+	} else {
+		// If it is external now, we make it internal
+		copyService.Annotations[consts.ServiceAnnotationLoadBalancerInternal] = consts.TrueAnnotationValue
+	}
+	return copyService
+}
+
+func updateServiceLoadBalancerIP(service *v1.Service, serviceIP string) *v1.Service {
+	copyService := service.DeepCopy()
+	if len(serviceIP) > 0 && copyService != nil {
+		copyService.Spec.LoadBalancerIP = serviceIP
+	}
+	return copyService
+}
+
+func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
+	if len(service.Spec.LoadBalancerIP) > 0 {
+		return service.Spec.LoadBalancerIP, nil
+	}
+
+	if len(service.Status.LoadBalancer.Ingress) > 0 && len(service.Status.LoadBalancer.Ingress[0].IP) > 0 {
+		return service.Status.LoadBalancer.Ingress[0].IP, nil
+	}
+
+	_, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, []network.LoadBalancer{})
+	if err != nil {
+		return "", err
+	}
+	if !existsLb {
+		klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name)
+		return "", nil
+	}
+	if len(lbStatus.Ingress) < 1 {
+		klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name)
+		return "", nil
+	}
+
+	return lbStatus.Ingress[0].IP, nil
+}
+
+func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted, foundDNSLabelAnnotation bool) (*network.PublicIPAddress, error) {
+	pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+	pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
+	if err != nil {
+		return nil, err
+	}
+
+	serviceName := getServiceName(service)
+
+	var changed bool
+	if existsPip {
+		// ensure that the service tag is good for managed pips
+		owns, isUserAssignedPIP := serviceOwnsPublicIP(service, &pip, clusterName)
+		if owns && !isUserAssignedPIP {
+			changed, err = bindServicesToPIP(&pip, []string{serviceName}, false)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		if pip.Tags == nil {
+			pip.Tags = make(map[string]*string)
+		}
+
+		// return if pip exist and dns label is the same
+		if strings.EqualFold(getDomainNameLabel(&pip), domainNameLabel) {
+			if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
+				klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - "+
+					"the service is using the DNS label on the public IP", serviceName, pipName)
+
+				var rerr *retry.Error
+				if changed {
+					klog.V(2).Infof("ensurePublicIPExists: updating the PIP %s for the incoming service %s", pipName, serviceName)
+					err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip)
+					if err != nil {
+						return nil, err
+					}
+
+					ctx, cancel := getContextWithCancel()
+					defer cancel()
+					pip, rerr = az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "")
+					if rerr != nil {
+						return nil, rerr.Error()
+					}
+				}
+
+				return &pip, nil
+			}
+		}
+
+		klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - updating", serviceName, to.String(pip.Name))
+		if pip.PublicIPAddressPropertiesFormat == nil {
+			pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{
+				PublicIPAllocationMethod: network.IPAllocationMethodStatic,
+			}
+			changed = true
+		}
+	} else {
+		if shouldPIPExisted {
+			return nil, fmt.Errorf("PublicIP from annotation azure-pip-name=%s for service %s doesn't exist", pipName, serviceName)
+		}
+
+		changed = true
+
+		pip.Name = to.StringPtr(pipName)
+		pip.Location = to.StringPtr(az.Location)
+		if az.HasExtendedLocation() {
+			klog.V(2).Infof("Using extended location with name %s, and type %s for PIP", az.ExtendedLocationName, az.ExtendedLocationType)
+			pip.ExtendedLocation = &network.ExtendedLocation{
+				Name: &az.ExtendedLocationName,
+				Type: getExtendedLocationTypeFromString(az.ExtendedLocationType),
+			}
+		}
+		pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{
+			PublicIPAllocationMethod: network.IPAllocationMethodStatic,
+			IPTags:                   getServiceIPTagRequestForPublicIP(service).IPTags,
+		}
+		pip.Tags = map[string]*string{
+			consts.ServiceTagKey:  to.StringPtr(""),
+			consts.ClusterNameKey: &clusterName,
+		}
+		if _, err = bindServicesToPIP(&pip, []string{serviceName}, false); err != nil {
+			return nil, err
+		}
+
+		if az.useStandardLoadBalancer() {
+			pip.Sku = &network.PublicIPAddressSku{
+				Name: network.PublicIPAddressSkuNameStandard,
+			}
+
+			// skip adding zone info since edge zones doesn't support multiple availability zones.
+			if !az.HasExtendedLocation() {
+				// only add zone information for the new standard pips
+				zones, err := az.getRegionZonesBackoff(to.String(pip.Location))
+				if err != nil {
+					return nil, err
+				}
+				if len(zones) > 0 {
+					pip.Zones = &zones
+				}
+			}
+		}
+		klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
+	}
+
+	if foundDNSLabelAnnotation {
+		updatedDNSSettings, err := reconcileDNSSettings(&pip, domainNameLabel, serviceName, pipName)
+		if err != nil {
+			return nil, fmt.Errorf("ensurePublicIPExists for service(%s): failed to reconcileDNSSettings: %w", serviceName, err)
+		}
+
+		if updatedDNSSettings {
+			changed = true
+		}
+	}
+
+	// use the same family as the clusterIP as we support IPv6 single stack as well
+	// as dual-stack clusters
+	updatedIPSettings := az.reconcileIPSettings(&pip, service)
+	if updatedIPSettings {
+		changed = true
+	}
+
+	if changed {
+		klog.V(2).Infof("CreateOrUpdatePIP(%s, %q): start", pipResourceGroup, *pip.Name)
+		err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip)
+		if err != nil {
+			klog.V(2).Infof("ensure(%s) abort backoff: pip(%s)", serviceName, *pip.Name)
+			return nil, err
+		}
+
+		klog.V(10).Infof("CreateOrUpdatePIP(%s, %q): end", pipResourceGroup, *pip.Name)
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	pip, rerr := az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "")
+	if rerr != nil {
+		return nil, rerr.Error()
+	}
+	return &pip, nil
+}
+
+func (az *Cloud) reconcileIPSettings(pip *network.PublicIPAddress, service *v1.Service) bool {
+	var changed bool
+
+	serviceName := getServiceName(service)
+	ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+	if ipv6 {
+		klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
+
+		if !strings.EqualFold(string(pip.PublicIPAddressVersion), string(network.IPVersionIPv6)) {
+			pip.PublicIPAddressVersion = network.IPVersionIPv6
+			changed = true
+		}
+
+		if az.useStandardLoadBalancer() {
+			// standard sku must have static allocation method for ipv6
+			if !strings.EqualFold(string(pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod), string(network.IPAllocationMethodStatic)) {
+				pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.IPAllocationMethodStatic
+				changed = true
+			}
+		} else if !strings.EqualFold(string(pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod), string(network.IPAllocationMethodDynamic)) {
+			pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.IPAllocationMethodDynamic
+			changed = true
+		}
+	} else {
+		klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
+
+		if !strings.EqualFold(string(pip.PublicIPAddressVersion), string(network.IPVersionIPv6)) {
+			pip.PublicIPAddressVersion = network.IPVersionIPv4
+			changed = true
+		}
+	}
+
+	return changed
+}
+
+func reconcileDNSSettings(pip *network.PublicIPAddress, domainNameLabel, serviceName, pipName string) (bool, error) {
+	var changed bool
+
+	if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && !strings.EqualFold(existingServiceName, serviceName) {
+		return false, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing service %s consuming the DNS label on the public IP, so the service cannot set the DNS label annotation with this value", serviceName, pipName, existingServiceName)
+	}
+
+	if len(domainNameLabel) == 0 {
+		if pip.PublicIPAddressPropertiesFormat.DNSSettings != nil {
+			pip.PublicIPAddressPropertiesFormat.DNSSettings = nil
+			changed = true
+		}
+	} else {
+		if pip.PublicIPAddressPropertiesFormat.DNSSettings == nil ||
+			pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel == nil {
+			klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - no existing DNS label on the public IP, create one", serviceName, pipName)
+			pip.PublicIPAddressPropertiesFormat.DNSSettings = &network.PublicIPAddressDNSSettings{
+				DomainNameLabel: &domainNameLabel,
+			}
+			changed = true
+		} else {
+			existingDNSLabel := pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel
+			if !strings.EqualFold(to.String(existingDNSLabel), domainNameLabel) {
+				return false, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing DNS label %s on the public IP", serviceName, pipName, *existingDNSLabel)
+			}
+		}
+
+		if svc := getServiceFromPIPDNSTags(pip.Tags); svc == "" || !strings.EqualFold(svc, serviceName) {
+			pip.Tags[consts.ServiceUsingDNSKey] = &serviceName
+			changed = true
+		}
+	}
+
+	return changed, nil
+}
+
+func getServiceFromPIPDNSTags(tags map[string]*string) string {
+	v, ok := tags[consts.ServiceUsingDNSKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	v, ok = tags[consts.LegacyServiceUsingDNSKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	return ""
+}
+
+func getServiceFromPIPServiceTags(tags map[string]*string) string {
+	v, ok := tags[consts.ServiceTagKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	v, ok = tags[consts.LegacyServiceTagKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	return ""
+}
+
+func getClusterFromPIPClusterTags(tags map[string]*string) string {
+	v, ok := tags[consts.ClusterNameKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	v, ok = tags[consts.LegacyClusterNameKey]
+	if ok && v != nil {
+		return *v
+	}
+
+	return ""
+}
+
+type serviceIPTagRequest struct {
+	IPTagsRequestedByAnnotation bool
+	IPTags                      *[]network.IPTag
+}
+
+// Get the ip tag Request for the public ip from service annotations.
+func getServiceIPTagRequestForPublicIP(service *v1.Service) serviceIPTagRequest {
+	if service != nil {
+		if ipTagString, found := service.Annotations[consts.ServiceAnnotationIPTagsForPublicIP]; found {
+			return serviceIPTagRequest{
+				IPTagsRequestedByAnnotation: true,
+				IPTags:                      convertIPTagMapToSlice(getIPTagMap(ipTagString)),
+			}
+		}
+	}
+
+	return serviceIPTagRequest{
+		IPTagsRequestedByAnnotation: false,
+		IPTags:                      nil,
+	}
+}
+
+func getIPTagMap(ipTagString string) map[string]string {
+	outputMap := make(map[string]string)
+	commaDelimitedPairs := strings.Split(strings.TrimSpace(ipTagString), ",")
+	for _, commaDelimitedPair := range commaDelimitedPairs {
+		splitKeyValue := strings.Split(commaDelimitedPair, "=")
+
+		// Include only valid pairs in the return value
+		// Last Write wins.
+		if len(splitKeyValue) == 2 {
+			tagKey := strings.TrimSpace(splitKeyValue[0])
+			tagValue := strings.TrimSpace(splitKeyValue[1])
+
+			outputMap[tagKey] = tagValue
+		}
+	}
+
+	return outputMap
+}
+
+func sortIPTags(ipTags *[]network.IPTag) {
+	if ipTags != nil {
+		sort.Slice(*ipTags, func(i, j int) bool {
+			ipTag := *ipTags
+			return to.String(ipTag[i].IPTagType) < to.String(ipTag[j].IPTagType) ||
+				to.String(ipTag[i].Tag) < to.String(ipTag[j].Tag)
+		})
+	}
+}
+
+func areIPTagsEquivalent(ipTags1 *[]network.IPTag, ipTags2 *[]network.IPTag) bool {
+	sortIPTags(ipTags1)
+	sortIPTags(ipTags2)
+
+	if ipTags1 == nil {
+		ipTags1 = &[]network.IPTag{}
+	}
+
+	if ipTags2 == nil {
+		ipTags2 = &[]network.IPTag{}
+	}
+
+	return reflect.DeepEqual(ipTags1, ipTags2)
+}
+
+func convertIPTagMapToSlice(ipTagMap map[string]string) *[]network.IPTag {
+	if ipTagMap == nil {
+		return nil
+	}
+
+	if len(ipTagMap) == 0 {
+		return &[]network.IPTag{}
+	}
+
+	outputTags := []network.IPTag{}
+	for k, v := range ipTagMap {
+		ipTag := network.IPTag{
+			IPTagType: to.StringPtr(k),
+			Tag:       to.StringPtr(v),
+		}
+		outputTags = append(outputTags, ipTag)
+	}
+
+	return &outputTags
+}
+
+func getDomainNameLabel(pip *network.PublicIPAddress) string {
+	if pip == nil || pip.PublicIPAddressPropertiesFormat == nil || pip.PublicIPAddressPropertiesFormat.DNSSettings == nil {
+		return ""
+	}
+	return to.String(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel)
+}
+
+func getIdleTimeout(s *v1.Service) (*int32, error) {
+	const (
+		min = 4
+		max = 30
+	)
+
+	val, ok := s.Annotations[consts.ServiceAnnotationLoadBalancerIdleTimeout]
+	if !ok {
+		// Return a nil here as this will set the value to the azure default
+		return nil, nil
+	}
+
+	errInvalidTimeout := fmt.Errorf("idle timeout value must be a whole number representing minutes between %d and %d", min, max)
+	toInt, err := strconv.ParseInt(val, 10, 32)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing idle timeout value: %w: %v", err, errInvalidTimeout)
+	}
+	to32 := int32(toInt)
+
+	if to32 < min || to32 > max {
+		return nil, errInvalidTimeout
+	}
+	return &to32, nil
+}
+
+func (az *Cloud) isFrontendIPChanged(clusterName string, config network.FrontendIPConfiguration, service *v1.Service, lbFrontendIPConfigName string) (bool, error) {
+	isServiceOwnsFrontendIP, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service)
+	if err != nil {
+		return false, err
+	}
+	if isServiceOwnsFrontendIP && isPrimaryService && !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) {
+		return true, nil
+	}
+	if !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) {
+		return false, nil
+	}
+	loadBalancerIP := service.Spec.LoadBalancerIP
+	isInternal := requiresInternalLoadBalancer(service)
+	if isInternal {
+		// Judge subnet
+		subnetName := subnet(service)
+		if subnetName != nil {
+			subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName)
+			if err != nil {
+				return false, err
+			}
+			if !existsSubnet {
+				return false, fmt.Errorf("failed to get subnet")
+			}
+			if config.Subnet != nil && !strings.EqualFold(to.String(config.Subnet.Name), to.String(subnet.Name)) {
+				return true, nil
+			}
+		}
+		if loadBalancerIP == "" {
+			return config.PrivateIPAllocationMethod == network.IPAllocationMethodStatic, nil
+		}
+		return config.PrivateIPAllocationMethod != network.IPAllocationMethodStatic || !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil
+	}
+	pipName, _, err := az.determinePublicIPName(clusterName, service)
+	if err != nil {
+		return false, err
+	}
+	pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+	pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
+	if err != nil {
+		return false, err
+	}
+	if !existsPip {
+		return true, nil
+	}
+	return config.PublicIPAddress != nil && !strings.EqualFold(to.String(pip.ID), to.String(config.PublicIPAddress.ID)), nil
+}
+
+// isFrontendIPConfigUnsafeToDelete checks if a frontend IP config is safe to be deleted.
+// It is safe to be deleted if and only if there is no reference from other
+// loadBalancing resources, including loadBalancing rules, outbound rules, inbound NAT rules
+// and inbound NAT pools.
+func (az *Cloud) isFrontendIPConfigUnsafeToDelete(
+	lb *network.LoadBalancer,
+	service *v1.Service,
+	fipConfigID *string,
+) (bool, error) {
+	if lb == nil || fipConfigID == nil || *fipConfigID == "" {
+		return false, fmt.Errorf("isFrontendIPConfigUnsafeToDelete: incorrect parameters")
+	}
+
+	var (
+		lbRules         []network.LoadBalancingRule
+		outboundRules   []network.OutboundRule
+		inboundNatRules []network.InboundNatRule
+		inboundNatPools []network.InboundNatPool
+		unsafe          bool
+	)
+
+	if lb.LoadBalancerPropertiesFormat != nil {
+		if lb.LoadBalancingRules != nil {
+			lbRules = *lb.LoadBalancingRules
+		}
+		if lb.OutboundRules != nil {
+			outboundRules = *lb.OutboundRules
+		}
+		if lb.InboundNatRules != nil {
+			inboundNatRules = *lb.InboundNatRules
+		}
+		if lb.InboundNatPools != nil {
+			inboundNatPools = *lb.InboundNatPools
+		}
+	}
+
+	// check if there are load balancing rules from other services
+	// referencing this frontend IP configuration
+	for _, lbRule := range lbRules {
+		if lbRule.LoadBalancingRulePropertiesFormat != nil &&
+			lbRule.FrontendIPConfiguration != nil &&
+			lbRule.FrontendIPConfiguration.ID != nil &&
+			strings.EqualFold(*lbRule.FrontendIPConfiguration.ID, *fipConfigID) {
+			if !az.serviceOwnsRule(service, *lbRule.Name) {
+				warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by load balancing rules of other services", *fipConfigID, *lb.Name)
+				klog.Warning(warningMsg)
+				az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
+				unsafe = true
+				break
+			}
+		}
+	}
+
+	// check if there are outbound rules
+	// referencing this frontend IP configuration
+	for _, outboundRule := range outboundRules {
+		if outboundRule.OutboundRulePropertiesFormat != nil && outboundRule.FrontendIPConfigurations != nil {
+			outboundRuleFIPConfigs := *outboundRule.FrontendIPConfigurations
+			if found := findMatchedOutboundRuleFIPConfig(fipConfigID, outboundRuleFIPConfigs); found {
+				warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the outbound rule %s", *fipConfigID, *lb.Name, *outboundRule.Name)
+				klog.Warning(warningMsg)
+				az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
+				unsafe = true
+				break
+			}
+		}
+	}
+
+	// check if there are inbound NAT rules
+	// referencing this frontend IP configuration
+	for _, inboundNatRule := range inboundNatRules {
+		if inboundNatRule.InboundNatRulePropertiesFormat != nil &&
+			inboundNatRule.FrontendIPConfiguration != nil &&
+			inboundNatRule.FrontendIPConfiguration.ID != nil &&
+			strings.EqualFold(*inboundNatRule.FrontendIPConfiguration.ID, *fipConfigID) {
+			warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT rule %s", *fipConfigID, *lb.Name, *inboundNatRule.Name)
+			klog.Warning(warningMsg)
+			az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
+			unsafe = true
+			break
+		}
+	}
+
+	// check if there are inbound NAT pools
+	// referencing this frontend IP configuration
+	for _, inboundNatPool := range inboundNatPools {
+		if inboundNatPool.InboundNatPoolPropertiesFormat != nil &&
+			inboundNatPool.FrontendIPConfiguration != nil &&
+			inboundNatPool.FrontendIPConfiguration.ID != nil &&
+			strings.EqualFold(*inboundNatPool.FrontendIPConfiguration.ID, *fipConfigID) {
+			warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT pool %s", *fipConfigID, *lb.Name, *inboundNatPool.Name)
+			klog.Warning(warningMsg)
+			az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
+			unsafe = true
+			break
+		}
+	}
+
+	return unsafe, nil
+}
+
+func findMatchedOutboundRuleFIPConfig(fipConfigID *string, outboundRuleFIPConfigs []network.SubResource) bool {
+	var found bool
+	for _, config := range outboundRuleFIPConfigs {
+		if config.ID != nil && strings.EqualFold(*config.ID, *fipConfigID) {
+			found = true
+		}
+	}
+	return found
+}
+
+func (az *Cloud) findFrontendIPConfigOfService(
+	fipConfigs *[]network.FrontendIPConfiguration,
+	service *v1.Service,
+) (*network.FrontendIPConfiguration, bool, error) {
+	for _, config := range *fipConfigs {
+		owns, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service)
+		if err != nil {
+			return nil, false, err
+		}
+		if owns {
+			return &config, isPrimaryService, nil
+		}
+	}
+
+	return nil, false, nil
+}
+
+// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
+// This also reconciles the Service's Ports  with the LoadBalancer config.
+// This entails adding rules/probes for expected Ports and removing stale rules/ports.
+// nodes only used if wantLb is true
+func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
+	isInternal := requiresInternalLoadBalancer(service)
+	isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service)
+	serviceName := getServiceName(service)
+	klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
+
+	existingLBs, err := az.reconcileSharedLoadBalancer(service, clusterName, nodes)
+	if err != nil {
+		klog.Errorf("reconcileLoadBalancer: failed to reconcile shared load balancer: %v", err)
+		return nil, err
+	}
+
+	lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs)
+	if err != nil {
+		klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
+		return nil, err
+	}
+	lbName := *lb.Name
+	lbResourceGroup := az.getLoadBalancerResourceGroup()
+	lbBackendPoolID := az.getBackendPoolID(lbName, az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
+	klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s/%s) wantLb(%t) resolved load balancer name", serviceName, lbResourceGroup, lbName, wantLb)
+	defaultLBFrontendIPConfigName := az.getDefaultFrontendIPConfigName(service)
+	defaultLBFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbResourceGroup, defaultLBFrontendIPConfigName)
+	dirtyLb := false
+
+	lbIdleTimeout, err := getIdleTimeout(service)
+	if wantLb && err != nil {
+		return nil, err
+	}
+
+	// reconcile the load balancer's backend pool configuration.
+	if wantLb {
+		preConfig, changed, err := az.LoadBalancerBackendPool.ReconcileBackendPools(clusterName, service, lb)
+		if err != nil {
+			return lb, err
+		}
+		if changed {
+			dirtyLb = true
+		}
+		isBackendPoolPreConfigured = preConfig
+	}
+
+	// reconcile the load balancer's frontend IP configurations.
+	ownedFIPConfig, changed, err := az.reconcileFrontendIPConfigs(clusterName, service, lb, wantLb, defaultLBFrontendIPConfigName)
+	if err != nil {
+		return lb, err
+	}
+	if changed {
+		dirtyLb = true
+	}
+
+	// update probes/rules
+	if ownedFIPConfig != nil {
+		if ownedFIPConfig.ID != nil {
+			defaultLBFrontendIPConfigID = *ownedFIPConfig.ID
+		} else {
+			return nil, fmt.Errorf("reconcileLoadBalancer for service (%s)(%t): nil ID for frontend IP config", serviceName, wantLb)
+		}
+	}
+
+	if wantLb {
+		err = az.checkLoadBalancerResourcesConflicts(lb, defaultLBFrontendIPConfigID, service)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	expectedProbes, expectedRules, err := az.getExpectedLBRules(service, wantLb, defaultLBFrontendIPConfigID, lbBackendPoolID, lbName, lbIdleTimeout)
+	if err != nil {
+		return nil, err
+	}
+
+	if changed := az.reconcileLBProbes(lb, service, serviceName, wantLb, expectedProbes); changed {
+		dirtyLb = true
+	}
+
+	if changed := az.reconcileLBRules(lb, service, serviceName, wantLb, expectedRules); changed {
+		dirtyLb = true
+	}
+
+	if changed := az.ensureLoadBalancerTagged(lb); changed {
+		dirtyLb = true
+	}
+
+	// We don't care if the LB exists or not
+	// We only care about if there is any change in the LB, which means dirtyLB
+	// If it is not exist, and no change to that, we don't CreateOrUpdate LB
+	if dirtyLb {
+		if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
+			err := az.cleanOrphanedLoadBalancer(lb, existingLBs, service, clusterName)
+			if err != nil {
+				klog.Errorf("reconcileLoadBalancer for service(%s): lb(%s) - failed to cleanOrphanedLoadBalancer: %v", serviceName, lbName, err)
+				return nil, err
+			}
+		} else {
+			klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
+			err := az.CreateOrUpdateLB(service, *lb)
+			if err != nil {
+				klog.Errorf("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
+				return nil, err
+			}
+
+			if isInternal {
+				// Refresh updated lb which will be used later in other places.
+				newLB, exist, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault)
+				if err != nil {
+					klog.Errorf("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
+					return nil, err
+				}
+				if !exist {
+					return nil, fmt.Errorf("load balancer %q not found", lbName)
+				}
+				lb = &newLB
+			}
+		}
+	}
+
+	if wantLb && nodes != nil && !isBackendPoolPreConfigured {
+		// Add the machines to the backend pool if they're not already
+		vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
+		// Etag would be changed when updating backend pools, so invalidate lbCache after it.
+		defer func() {
+			_ = az.lbCache.Delete(lbName)
+		}()
+
+		if lb.LoadBalancerPropertiesFormat != nil && lb.BackendAddressPools != nil {
+			backendPools := *lb.BackendAddressPools
+			for _, backendPool := range backendPools {
+				if strings.EqualFold(to.String(backendPool.Name), getBackendPoolName(clusterName, service)) {
+					if err := az.LoadBalancerBackendPool.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, clusterName, lbName, backendPool); err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+	}
+
+	klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
+	return lb, nil
+}
+
+func (az *Cloud) reconcileLBProbes(lb *network.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedProbes []network.Probe) bool {
+	// remove unwanted probes
+	dirtyProbes := false
+	var updatedProbes []network.Probe
+	if lb.Probes != nil {
+		updatedProbes = *lb.Probes
+	}
+	for i := len(updatedProbes) - 1; i >= 0; i-- {
+		existingProbe := updatedProbes[i]
+		if az.serviceOwnsRule(service, *existingProbe.Name) {
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
+			keepProbe := false
+			if findProbe(expectedProbes, existingProbe) {
+				klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
+				keepProbe = true
+			}
+			if !keepProbe {
+				updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
+				klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
+				dirtyProbes = true
+			}
+		}
+	}
+	// add missing, wanted probes
+	for _, expectedProbe := range expectedProbes {
+		foundProbe := false
+		if findProbe(updatedProbes, expectedProbe) {
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
+			foundProbe = true
+		}
+		if !foundProbe {
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
+			updatedProbes = append(updatedProbes, expectedProbe)
+			dirtyProbes = true
+		}
+	}
+	if dirtyProbes {
+		probesJSON, _ := json.Marshal(expectedProbes)
+		klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb probes updated: %s", serviceName, wantLb, string(probesJSON))
+		lb.Probes = &updatedProbes
+	}
+	return dirtyProbes
+}
+
+func (az *Cloud) reconcileLBRules(lb *network.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedRules []network.LoadBalancingRule) bool {
+	// update rules
+	dirtyRules := false
+	var updatedRules []network.LoadBalancingRule
+	if lb.LoadBalancingRules != nil {
+		updatedRules = *lb.LoadBalancingRules
+	}
+
+	// update rules: remove unwanted
+	for i := len(updatedRules) - 1; i >= 0; i-- {
+		existingRule := updatedRules[i]
+		if az.serviceOwnsRule(service, *existingRule.Name) {
+			keepRule := false
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
+			if findRule(expectedRules, existingRule, wantLb) {
+				klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
+				keepRule = true
+			}
+			if !keepRule {
+				klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
+				updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
+				dirtyRules = true
+			}
+		}
+	}
+	// update rules: add needed
+	for _, expectedRule := range expectedRules {
+		foundRule := false
+		if findRule(updatedRules, expectedRule, wantLb) {
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
+			foundRule = true
+		}
+		if !foundRule {
+			klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
+			updatedRules = append(updatedRules, expectedRule)
+			dirtyRules = true
+		}
+	}
+	if dirtyRules {
+		ruleJSON, _ := json.Marshal(expectedRules)
+		klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rules updated: %s", serviceName, wantLb, string(ruleJSON))
+		lb.LoadBalancingRules = &updatedRules
+	}
+	return dirtyRules
+}
+
+func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Service, lb *network.LoadBalancer, wantLb bool, defaultLBFrontendIPConfigName string) (*network.FrontendIPConfiguration, bool, error) {
+	var err error
+	lbName := *lb.Name
+	serviceName := getServiceName(service)
+	isInternal := requiresInternalLoadBalancer(service)
+	dirtyConfigs := false
+	var newConfigs []network.FrontendIPConfiguration
+	if lb.FrontendIPConfigurations != nil {
+		newConfigs = *lb.FrontendIPConfigurations
+	}
+
+	var ownedFIPConfig *network.FrontendIPConfiguration
+	if !wantLb {
+		for i := len(newConfigs) - 1; i >= 0; i-- {
+			config := newConfigs[i]
+			isServiceOwnsFrontendIP, _, err := az.serviceOwnsFrontendIP(config, service)
+			if err != nil {
+				return nil, false, err
+			}
+			if isServiceOwnsFrontendIP {
+				unsafe, err := az.isFrontendIPConfigUnsafeToDelete(lb, service, config.ID)
+				if err != nil {
+					return nil, false, err
+				}
+
+				// If the frontend IP configuration is not being referenced by:
+				// 1. loadBalancing rules of other services with different ports;
+				// 2. outbound rules;
+				// 3. inbound NAT rules;
+				// 4. inbound NAT pools,
+				// do the deletion, or skip it.
+				if !unsafe {
+					var configNameToBeDeleted string
+					if newConfigs[i].Name != nil {
+						configNameToBeDeleted = *newConfigs[i].Name
+						klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, configNameToBeDeleted)
+					} else {
+						klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): nil name of lb frontendconfig", serviceName, wantLb)
+					}
+
+					newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
+					dirtyConfigs = true
+				}
+			}
+		}
+	} else {
+		for i := len(newConfigs) - 1; i >= 0; i-- {
+			config := newConfigs[i]
+			isFipChanged, err := az.isFrontendIPChanged(clusterName, config, service, defaultLBFrontendIPConfigName)
+			if err != nil {
+				return nil, false, err
+			}
+			if isFipChanged {
+				klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
+				newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
+				dirtyConfigs = true
+			}
+		}
+
+		ownedFIPConfig, _, err = az.findFrontendIPConfigOfService(&newConfigs, service)
+		if err != nil {
+			return nil, false, err
+		}
+
+		if ownedFIPConfig == nil {
+			klog.V(4).Infof("ensure(%s): lb(%s) - creating a new frontend IP config", serviceName, lbName)
+
+			// construct FrontendIPConfigurationPropertiesFormat
+			var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat
+			if isInternal {
+				subnetName := subnet(service)
+				if subnetName == nil {
+					subnetName = &az.SubnetName
+				}
+				subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName)
+				if err != nil {
+					return nil, false, err
+				}
+
+				if !existsSubnet {
+					return nil, false, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName)
+				}
+
+				configProperties := network.FrontendIPConfigurationPropertiesFormat{
+					Subnet: &subnet,
+				}
+
+				if utilnet.IsIPv6String(service.Spec.ClusterIP) {
+					configProperties.PrivateIPAddressVersion = network.IPVersionIPv6
+				}
+
+				loadBalancerIP := service.Spec.LoadBalancerIP
+				if loadBalancerIP != "" {
+					configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic
+					configProperties.PrivateIPAddress = &loadBalancerIP
+				} else {
+					// We'll need to call GetLoadBalancer later to retrieve allocated IP.
+					configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodDynamic
+				}
+
+				fipConfigurationProperties = &configProperties
+			} else {
+				pipName, shouldPIPExisted, err := az.determinePublicIPName(clusterName, service)
+				if err != nil {
+					return nil, false, err
+				}
+				domainNameLabel, found := getPublicIPDomainNameLabel(service)
+				pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel, clusterName, shouldPIPExisted, found)
+				if err != nil {
+					return nil, false, err
+				}
+				fipConfigurationProperties = &network.FrontendIPConfigurationPropertiesFormat{
+					PublicIPAddress: &network.PublicIPAddress{ID: pip.ID},
+				}
+			}
+
+			newConfig := network.FrontendIPConfiguration{
+				Name:                                    to.StringPtr(defaultLBFrontendIPConfigName),
+				ID:                                      to.StringPtr(fmt.Sprintf(consts.FrontendIPConfigIDTemplate, az.SubscriptionID, az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)),
+				FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
+			}
+
+			// only add zone information for new internal frontend IP configurations for standard load balancer not deployed to an edge zone.
+			location := az.Location
+			zones, err := az.getRegionZonesBackoff(location)
+			if err != nil {
+				return nil, false, err
+			}
+			if isInternal && az.useStandardLoadBalancer() && len(zones) > 0 && !az.HasExtendedLocation() {
+				newConfig.Zones = &zones
+			}
+			newConfigs = append(newConfigs, newConfig)
+			klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, defaultLBFrontendIPConfigName)
+			dirtyConfigs = true
+		}
+	}
+
+	if dirtyConfigs {
+		lb.FrontendIPConfigurations = &newConfigs
+	}
+
+	return ownedFIPConfig, dirtyConfigs, err
+}
+
+// checkLoadBalancerResourcesConflicts checks if the service is consuming
+// ports which conflict with the existing loadBalancer resources,
+// including inbound NAT rule, inbound NAT pools and loadBalancing rules
+func (az *Cloud) checkLoadBalancerResourcesConflicts(
+	lb *network.LoadBalancer,
+	frontendIPConfigID string,
+	service *v1.Service,
+) error {
+	if service.Spec.Ports == nil {
+		return nil
+	}
+	ports := service.Spec.Ports
+
+	for _, port := range ports {
+		if lb.LoadBalancingRules != nil {
+			for _, rule := range *lb.LoadBalancingRules {
+				if lbRuleConflictsWithPort(rule, frontendIPConfigID, port) {
+					// ignore self-owned rules for unit test
+					if rule.Name != nil && az.serviceOwnsRule(service, *rule.Name) {
+						continue
+					}
+					return fmt.Errorf("checkLoadBalancerResourcesConflicts: service port %s is trying to "+
+						"consume the port %d which is being referenced by an existing loadBalancing rule %s with "+
+						"the same protocol %s and frontend IP config with ID %s",
+						port.Name,
+						*rule.FrontendPort,
+						*rule.Name,
+						rule.Protocol,
+						*rule.FrontendIPConfiguration.ID)
+				}
+			}
+		}
+
+		if lb.InboundNatRules != nil {
+			for _, inboundNatRule := range *lb.InboundNatRules {
+				if inboundNatRuleConflictsWithPort(inboundNatRule, frontendIPConfigID, port) {
+					return fmt.Errorf("checkLoadBalancerResourcesConflicts: service port %s is trying to "+
+						"consume the port %d which is being referenced by an existing inbound NAT rule %s with "+
+						"the same protocol %s and frontend IP config with ID %s",
+						port.Name,
+						*inboundNatRule.FrontendPort,
+						*inboundNatRule.Name,
+						inboundNatRule.Protocol,
+						*inboundNatRule.FrontendIPConfiguration.ID)
+				}
+			}
+		}
+
+		if lb.InboundNatPools != nil {
+			for _, pool := range *lb.InboundNatPools {
+				if inboundNatPoolConflictsWithPort(pool, frontendIPConfigID, port) {
+					return fmt.Errorf("checkLoadBalancerResourcesConflicts: service port %s is trying to "+
+						"consume the port %d which is being in the range (%d-%d) of an existing "+
+						"inbound NAT pool %s with the same protocol %s and frontend IP config with ID %s",
+						port.Name,
+						port.Port,
+						*pool.FrontendPortRangeStart,
+						*pool.FrontendPortRangeEnd,
+						*pool.Name,
+						pool.Protocol,
+						*pool.FrontendIPConfiguration.ID)
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+func inboundNatPoolConflictsWithPort(pool network.InboundNatPool, frontendIPConfigID string, port v1.ServicePort) bool {
+	return pool.InboundNatPoolPropertiesFormat != nil &&
+		pool.FrontendIPConfiguration != nil &&
+		pool.FrontendIPConfiguration.ID != nil &&
+		strings.EqualFold(*pool.FrontendIPConfiguration.ID, frontendIPConfigID) &&
+		strings.EqualFold(string(pool.Protocol), string(port.Protocol)) &&
+		pool.FrontendPortRangeStart != nil &&
+		pool.FrontendPortRangeEnd != nil &&
+		*pool.FrontendPortRangeStart <= port.Port &&
+		*pool.FrontendPortRangeEnd >= port.Port
+}
+
+func inboundNatRuleConflictsWithPort(inboundNatRule network.InboundNatRule, frontendIPConfigID string, port v1.ServicePort) bool {
+	return inboundNatRule.InboundNatRulePropertiesFormat != nil &&
+		inboundNatRule.FrontendIPConfiguration != nil &&
+		inboundNatRule.FrontendIPConfiguration.ID != nil &&
+		strings.EqualFold(*inboundNatRule.FrontendIPConfiguration.ID, frontendIPConfigID) &&
+		strings.EqualFold(string(inboundNatRule.Protocol), string(port.Protocol)) &&
+		inboundNatRule.FrontendPort != nil &&
+		*inboundNatRule.FrontendPort == port.Port
+}
+
+func lbRuleConflictsWithPort(rule network.LoadBalancingRule, frontendIPConfigID string, port v1.ServicePort) bool {
+	return rule.LoadBalancingRulePropertiesFormat != nil &&
+		rule.FrontendIPConfiguration != nil &&
+		rule.FrontendIPConfiguration.ID != nil &&
+		strings.EqualFold(*rule.FrontendIPConfiguration.ID, frontendIPConfigID) &&
+		strings.EqualFold(string(rule.Protocol), string(port.Protocol)) &&
+		rule.FrontendPort != nil &&
+		*rule.FrontendPort == port.Port
+}
+
+func parseHealthProbeProtocolAndPath(service *v1.Service) (string, string) {
+	var protocol, path string
+	if v, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerHealthProbeProtocol]; ok {
+		protocol = v
+	} else {
+		return protocol, path
+	}
+	// ignore the request path if using TCP
+	if strings.EqualFold(protocol, string(network.ProbeProtocolHTTP)) ||
+		strings.EqualFold(protocol, string(network.ProbeProtocolHTTPS)) {
+		if v, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath]; ok {
+			path = v
+		}
+	}
+	return protocol, path
+}
+
+func (az *Cloud) getExpectedLBRules(
+	service *v1.Service,
+	wantLb bool,
+	lbFrontendIPConfigID string,
+	lbBackendPoolID string,
+	lbName string,
+	lbIdleTimeout *int32) ([]network.Probe, []network.LoadBalancingRule, error) {
+
+	var ports []v1.ServicePort
+	if wantLb {
+		ports = service.Spec.Ports
+	} else {
+		ports = []v1.ServicePort{}
+	}
+
+	var enableTCPReset *bool
+	if az.useStandardLoadBalancer() {
+		enableTCPReset = to.BoolPtr(true)
+	}
+
+	var expectedProbes []network.Probe
+	var expectedRules []network.LoadBalancingRule
+	highAvailabilityPortsEnabled := false
+	for _, port := range ports {
+		if !requiresInternalLoadBalancer(service) && port.Protocol == v1.ProtocolSCTP {
+			return nil, nil, fmt.Errorf("SCTP is only supported on internal LoadBalancer")
+		}
+
+		if highAvailabilityPortsEnabled {
+			// Since the port is always 0 when enabling HA, only one rule should be configured.
+			break
+		}
+
+		lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port)
+		klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName)
+
+		transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
+		if err != nil {
+			return expectedProbes, expectedRules, err
+		}
+
+		probeProtocol, requestPath := parseHealthProbeProtocolAndPath(service)
+		if servicehelpers.NeedsHealthCheck(service) {
+			podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service)
+			if probeProtocol == "" {
+				probeProtocol = string(network.ProbeProtocolHTTP)
+			}
+
+			needRequestPath := strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTP)) || strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTPS))
+			if requestPath == "" && needRequestPath {
+				requestPath = podPresencePath
+			}
+
+			expectedProbes = append(expectedProbes, network.Probe{
+				Name: &lbRuleName,
+				ProbePropertiesFormat: &network.ProbePropertiesFormat{
+					RequestPath:       to.StringPtr(requestPath),
+					Protocol:          network.ProbeProtocol(probeProtocol),
+					Port:              to.Int32Ptr(podPresencePort),
+					IntervalInSeconds: to.Int32Ptr(5),
+					NumberOfProbes:    to.Int32Ptr(2),
+				},
+			})
+		} else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
+			// we only add the expected probe if we're doing TCP
+			if probeProtocol == "" {
+				probeProtocol = string(*probeProto)
+			}
+			var actualPath *string
+			if !strings.EqualFold(probeProtocol, string(network.ProbeProtocolTCP)) {
+				if requestPath != "" {
+					actualPath = to.StringPtr(requestPath)
+				} else {
+					actualPath = to.StringPtr("/healthz")
+				}
+			}
+			expectedProbes = append(expectedProbes, network.Probe{
+				Name: &lbRuleName,
+				ProbePropertiesFormat: &network.ProbePropertiesFormat{
+					Protocol:          network.ProbeProtocol(probeProtocol),
+					RequestPath:       actualPath,
+					Port:              to.Int32Ptr(port.NodePort),
+					IntervalInSeconds: to.Int32Ptr(5),
+					NumberOfProbes:    to.Int32Ptr(2),
+				},
+			})
+		}
+
+		loadDistribution := network.LoadDistributionDefault
+		if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
+			loadDistribution = network.LoadDistributionSourceIP
+		}
+
+		expectedRule := network.LoadBalancingRule{
+			Name: &lbRuleName,
+			LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
+				Protocol: *transportProto,
+				FrontendIPConfiguration: &network.SubResource{
+					ID: to.StringPtr(lbFrontendIPConfigID),
+				},
+				BackendAddressPool: &network.SubResource{
+					ID: to.StringPtr(lbBackendPoolID),
+				},
+				LoadDistribution:    loadDistribution,
+				FrontendPort:        to.Int32Ptr(port.Port),
+				BackendPort:         to.Int32Ptr(port.Port),
+				DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()),
+				EnableTCPReset:      enableTCPReset,
+				EnableFloatingIP:    to.BoolPtr(true),
+			},
+		}
+
+		if port.Protocol == v1.ProtocolTCP {
+			expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout
+		}
+
+		if requiresInternalLoadBalancer(service) &&
+			strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuStandard) &&
+			(strings.EqualFold(service.Annotations[consts.ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts], consts.TrueAnnotationValue) || port.Protocol == v1.ProtocolSCTP) {
+			expectedRule.FrontendPort = to.Int32Ptr(0)
+			expectedRule.BackendPort = to.Int32Ptr(0)
+			expectedRule.Protocol = network.TransportProtocolAll
+			highAvailabilityPortsEnabled = true
+		}
+
+		// we didn't construct the probe objects for UDP or SCTP because they're not allowed on Azure.
+		// However, when externalTrafficPolicy is Local, Kubernetes HTTP health check would be used for probing.
+		if servicehelpers.NeedsHealthCheck(service) || (port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP) {
+			expectedRule.Probe = &network.SubResource{
+				ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), lbRuleName)),
+			}
+		}
+
+		expectedRules = append(expectedRules, expectedRule)
+	}
+
+	return expectedProbes, expectedRules, nil
+}
+
+// This reconciles the Network Security Group similar to how the LB is reconciled.
+// This entails adding required, missing SecurityRules and removing stale rules.
+func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
+	serviceName := getServiceName(service)
+	klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
+
+	ports := service.Spec.Ports
+	if ports == nil {
+		if useSharedSecurityRule(service) {
+			klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name)
+			return nil, fmt.Errorf("no port info for reconciling shared rule for service %s", service.Name)
+		}
+		ports = []v1.ServicePort{}
+	}
+
+	sg, err := az.getSecurityGroup(azcache.CacheReadTypeDefault)
+	if err != nil {
+		return nil, err
+	}
+
+	destinationIPAddress := ""
+	if wantLb && lbIP == nil {
+		return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name)
+	}
+	if lbIP != nil {
+		destinationIPAddress = *lbIP
+	}
+
+	if destinationIPAddress == "" {
+		destinationIPAddress = "*"
+	}
+
+	additionalIPs, err := getServiceAdditionalPublicIPs(service)
+	if err != nil {
+		return nil, fmt.Errorf("unable to get additional public IPs, error=%v", err)
+	}
+
+	destinationIPAddresses := []string{destinationIPAddress}
+	if destinationIPAddress != "*" {
+		destinationIPAddresses = append(destinationIPAddresses, additionalIPs...)
+	}
+
+	sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(service)
+	if err != nil {
+		return nil, err
+	}
+	serviceTags := getServiceTags(service)
+	if len(serviceTags) != 0 {
+		delete(sourceRanges, consts.DefaultLoadBalancerSourceRanges)
+	}
+
+	var sourceAddressPrefixes []string
+	if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 {
+		if !requiresInternalLoadBalancer(service) {
+			sourceAddressPrefixes = []string{"Internet"}
+		}
+	} else {
+		for _, ip := range sourceRanges {
+			sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
+		}
+		sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTags...)
+	}
+
+	expectedSecurityRules, err := az.getExpectedSecurityRules(wantLb, ports, sourceAddressPrefixes, service, destinationIPAddresses, sourceRanges)
+	if err != nil {
+		return nil, err
+	}
+
+	// update security rules
+	dirtySg, updatedRules, err := az.reconcileSecurityRules(sg, service, serviceName, wantLb, expectedSecurityRules, ports, sourceAddressPrefixes, destinationIPAddresses)
+	if err != nil {
+		return nil, err
+	}
+
+	changed := az.ensureSecurityGroupTagged(&sg)
+	if changed {
+		dirtySg = true
+	}
+
+	if dirtySg {
+		sg.SecurityRules = &updatedRules
+		klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
+		klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): start", *sg.Name)
+		err := az.CreateOrUpdateSecurityGroup(sg)
+		if err != nil {
+			klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name)
+			return nil, err
+		}
+		klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): end", *sg.Name)
+		_ = az.nsgCache.Delete(to.String(sg.Name))
+	}
+	return &sg, nil
+}
+
+func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, service *v1.Service, serviceName string, wantLb bool, expectedSecurityRules []network.SecurityRule, ports []v1.ServicePort, sourceAddressPrefixes []string, destinationIPAddresses []string) (bool, []network.SecurityRule, error) {
+	dirtySg := false
+	var updatedRules []network.SecurityRule
+	if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil {
+		updatedRules = *sg.SecurityGroupPropertiesFormat.SecurityRules
+	}
+
+	for _, r := range updatedRules {
+		klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
+	}
+
+	// update security rules: remove unwanted rules that belong privately
+	// to this service
+	for i := len(updatedRules) - 1; i >= 0; i-- {
+		existingRule := updatedRules[i]
+		if az.serviceOwnsRule(service, *existingRule.Name) {
+			klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
+			keepRule := false
+			if findSecurityRule(expectedSecurityRules, existingRule) {
+				klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
+				keepRule = true
+			}
+			if !keepRule {
+				klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
+				updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
+				dirtySg = true
+			}
+		}
+	}
+
+	// update security rules: if the service uses a shared rule and is being deleted,
+	// then remove it from the shared rule
+	if useSharedSecurityRule(service) && !wantLb {
+		for _, port := range ports {
+			for _, sourceAddressPrefix := range sourceAddressPrefixes {
+				sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix)
+				sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
+				if !sharedRuleFound {
+					klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name)
+					continue
+				}
+				if sharedRule.DestinationAddressPrefixes == nil {
+					klog.V(4).Infof("Didn't find DestinationAddressPrefixes in shared rule for service %s", service.Name)
+					continue
+				}
+				existingPrefixes := *sharedRule.DestinationAddressPrefixes
+				for _, destinationIPAddress := range destinationIPAddresses {
+					addressIndex, found := findIndex(existingPrefixes, destinationIPAddress)
+					if !found {
+						klog.Warningf("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name)
+						continue
+					}
+					if len(existingPrefixes) == 1 {
+						updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...)
+					} else {
+						newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...)
+						sharedRule.DestinationAddressPrefixes = &newDestinations
+						updatedRules[sharedIndex] = sharedRule
+					}
+					dirtySg = true
+				}
+
+			}
+		}
+	}
+
+	// update security rules: prepare rules for consolidation
+	for index, rule := range updatedRules {
+		if allowsConsolidation(rule) {
+			updatedRules[index] = makeConsolidatable(rule)
+		}
+	}
+	for index, rule := range expectedSecurityRules {
+		if allowsConsolidation(rule) {
+			expectedSecurityRules[index] = makeConsolidatable(rule)
+		}
+	}
+	// update security rules: add needed
+	for _, expectedRule := range expectedSecurityRules {
+		foundRule := false
+		if findSecurityRule(updatedRules, expectedRule) {
+			klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
+			foundRule = true
+		}
+		if foundRule && allowsConsolidation(expectedRule) {
+			index, _ := findConsolidationCandidate(updatedRules, expectedRule)
+			updatedRules[index] = consolidate(updatedRules[index], expectedRule)
+			dirtySg = true
+		}
+		if !foundRule {
+			klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name)
+
+			nextAvailablePriority, err := getNextAvailablePriority(updatedRules)
+			if err != nil {
+				return false, nil, err
+			}
+
+			expectedRule.Priority = to.Int32Ptr(nextAvailablePriority)
+			updatedRules = append(updatedRules, expectedRule)
+			dirtySg = true
+		}
+	}
+
+	for _, r := range updatedRules {
+		klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange))
+	}
+	return dirtySg, updatedRules, nil
+}
+
+func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, sourceAddressPrefixes []string, service *v1.Service, destinationIPAddresses []string, sourceRanges utilnet.IPNetSet) ([]network.SecurityRule, error) {
+	expectedSecurityRules := []network.SecurityRule{}
+
+	if wantLb {
+		expectedSecurityRules = make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes))
+
+		for i, port := range ports {
+			_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol)
+			if err != nil {
+				return nil, err
+			}
+			for j := range sourceAddressPrefixes {
+				ix := i*len(sourceAddressPrefixes) + j
+				securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j])
+				nsgRule := network.SecurityRule{
+					Name: to.StringPtr(securityRuleName),
+					SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
+						Protocol:             *securityProto,
+						SourcePortRange:      to.StringPtr("*"),
+						DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))),
+						SourceAddressPrefix:  to.StringPtr(sourceAddressPrefixes[j]),
+						Access:               network.SecurityRuleAccessAllow,
+						Direction:            network.SecurityRuleDirectionInbound,
+					},
+				}
+				if len(destinationIPAddresses) == 1 {
+					// continue to use DestinationAddressPrefix to avoid NSG updates for existing rules.
+					nsgRule.DestinationAddressPrefix = to.StringPtr(destinationIPAddresses[0])
+				} else {
+					nsgRule.DestinationAddressPrefixes = to.StringSlicePtr(destinationIPAddresses)
+				}
+				expectedSecurityRules[ix] = nsgRule
+			}
+		}
+
+		shouldAddDenyRule := false
+		if len(sourceRanges) > 0 && !servicehelpers.IsAllowAll(sourceRanges) {
+			if v, ok := service.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges]; ok && strings.EqualFold(v, consts.TrueAnnotationValue) {
+				shouldAddDenyRule = true
+			}
+		}
+		if shouldAddDenyRule {
+			for _, port := range ports {
+				_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol)
+				if err != nil {
+					return nil, err
+				}
+				securityRuleName := az.getSecurityRuleName(service, port, "deny_all")
+				nsgRule := network.SecurityRule{
+					Name: to.StringPtr(securityRuleName),
+					SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
+						Protocol:             *securityProto,
+						SourcePortRange:      to.StringPtr("*"),
+						DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))),
+						SourceAddressPrefix:  to.StringPtr("*"),
+						Access:               network.SecurityRuleAccessDeny,
+						Direction:            network.SecurityRuleDirectionInbound,
+					},
+				}
+				if len(destinationIPAddresses) == 1 {
+					// continue to use DestinationAddressPrefix to avoid NSG updates for existing rules.
+					nsgRule.DestinationAddressPrefix = to.StringPtr(destinationIPAddresses[0])
+				} else {
+					nsgRule.DestinationAddressPrefixes = to.StringSlicePtr(destinationIPAddresses)
+				}
+				expectedSecurityRules = append(expectedSecurityRules, nsgRule)
+			}
+		}
+	}
+
+	for _, r := range expectedSecurityRules {
+		klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %v %v :%s", service.Name, to.String(r.SourceAddressPrefix), to.String(r.SourcePortRange), to.String(r.DestinationAddressPrefix), to.StringSlice(r.DestinationAddressPrefixes), to.String(r.DestinationPortRange))
+	}
+	return expectedSecurityRules, nil
+}
+
+func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (bool, error) {
+	existingManagedLBs, err := az.ListManagedLBs(service, nodes, clusterName)
+	if err != nil {
+		return false, fmt.Errorf("shouldUpdateLoadBalancer: failed to list managed load balancers: %w", err)
+	}
+
+	_, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nodes, false, existingManagedLBs)
+	return existsLb && service.ObjectMeta.DeletionTimestamp == nil, nil
+}
+
+func logSafe(s *string) string {
+	if s == nil {
+		return "(nil)"
+	}
+	return *s
+}
+
+func logSafeCollection(s *string, strs *[]string) string {
+	if s == nil {
+		if strs == nil {
+			return "(nil)"
+		}
+		return "[" + strings.Join(*strs, ",") + "]"
+	}
+	return *s
+}
+
+func findSecurityRuleByName(rules []network.SecurityRule, ruleName string) (int, network.SecurityRule, bool) {
+	for index, rule := range rules {
+		if rule.Name != nil && strings.EqualFold(*rule.Name, ruleName) {
+			return index, rule, true
+		}
+	}
+	return 0, network.SecurityRule{}, false
+}
+
+func findIndex(strs []string, s string) (int, bool) {
+	for index, str := range strs {
+		if strings.EqualFold(str, s) {
+			return index, true
+		}
+	}
+	return 0, false
+}
+
+func allowsConsolidation(rule network.SecurityRule) bool {
+	return strings.HasPrefix(to.String(rule.Name), "shared")
+}
+
+func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) {
+	for index, r := range rules {
+		if allowsConsolidation(r) {
+			if strings.EqualFold(to.String(r.Name), to.String(rule.Name)) {
+				return index, true
+			}
+		}
+	}
+
+	return 0, false
+}
+
+func makeConsolidatable(rule network.SecurityRule) network.SecurityRule {
+	return network.SecurityRule{
+		Name: rule.Name,
+		SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
+			Priority:                   rule.Priority,
+			Protocol:                   rule.Protocol,
+			SourcePortRange:            rule.SourcePortRange,
+			SourcePortRanges:           rule.SourcePortRanges,
+			DestinationPortRange:       rule.DestinationPortRange,
+			DestinationPortRanges:      rule.DestinationPortRanges,
+			SourceAddressPrefix:        rule.SourceAddressPrefix,
+			SourceAddressPrefixes:      rule.SourceAddressPrefixes,
+			DestinationAddressPrefixes: collectionOrSingle(rule.DestinationAddressPrefixes, rule.DestinationAddressPrefix),
+			Access:                     rule.Access,
+			Direction:                  rule.Direction,
+		},
+	}
+}
+
+func consolidate(existingRule network.SecurityRule, newRule network.SecurityRule) network.SecurityRule {
+	destinations := appendElements(existingRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes, newRule.DestinationAddressPrefix, newRule.DestinationAddressPrefixes)
+	destinations = deduplicate(destinations) // there are transient conditions during controller startup where it tries to add a service that is already added
+
+	return network.SecurityRule{
+		Name: existingRule.Name,
+		SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
+			Priority:                   existingRule.Priority,
+			Protocol:                   existingRule.Protocol,
+			SourcePortRange:            existingRule.SourcePortRange,
+			SourcePortRanges:           existingRule.SourcePortRanges,
+			DestinationPortRange:       existingRule.DestinationPortRange,
+			DestinationPortRanges:      existingRule.DestinationPortRanges,
+			SourceAddressPrefix:        existingRule.SourceAddressPrefix,
+			SourceAddressPrefixes:      existingRule.SourceAddressPrefixes,
+			DestinationAddressPrefixes: destinations,
+			Access:                     existingRule.Access,
+			Direction:                  existingRule.Direction,
+		},
+	}
+}
+
+func collectionOrSingle(collection *[]string, s *string) *[]string {
+	if collection != nil && len(*collection) > 0 {
+		return collection
+	}
+	if s == nil {
+		return &[]string{}
+	}
+	return &[]string{*s}
+}
+
+func appendElements(collection *[]string, appendString *string, appendStrings *[]string) *[]string {
+	newCollection := []string{}
+
+	if collection != nil {
+		newCollection = append(newCollection, *collection...)
+	}
+	if appendString != nil {
+		newCollection = append(newCollection, *appendString)
+	}
+	if appendStrings != nil {
+		newCollection = append(newCollection, *appendStrings...)
+	}
+
+	return &newCollection
+}
+
+func deduplicate(collection *[]string) *[]string {
+	if collection == nil {
+		return nil
+	}
+
+	seen := map[string]bool{}
+	result := make([]string, 0, len(*collection))
+
+	for _, v := range *collection {
+		if seen[v] {
+			// skip this element
+		} else {
+			seen[v] = true
+			result = append(result, v)
+		}
+	}
+
+	return &result
+}
+
+// Determine if we should release existing owned public IPs
+func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lbShouldExist, lbIsInternal, isUserAssignedPIP bool, desiredPipName string, ipTagRequest serviceIPTagRequest) bool {
+	// skip deleting user created pip
+	if isUserAssignedPIP {
+		return false
+	}
+
+	// Latch some variables for readability purposes.
+	pipName := *(*existingPip).Name
+
+	// Assume the current IP Tags are empty by default unless properties specify otherwise.
+	currentIPTags := &[]network.IPTag{}
+	pipPropertiesFormat := (*existingPip).PublicIPAddressPropertiesFormat
+	if pipPropertiesFormat != nil {
+		currentIPTags = (*pipPropertiesFormat).IPTags
+	}
+
+	// Check whether the public IP is being referenced by other service.
+	// The owned public IP can be released only when there is not other service using it.
+	if serviceTag := getServiceFromPIPServiceTags(existingPip.Tags); serviceTag != "" {
+		// case 1: there is at least one reference when deleting the PIP
+		if !lbShouldExist && len(parsePIPServiceTag(&serviceTag)) > 0 {
+			return false
+		}
+
+		// case 2: there is at least one reference from other service
+		if lbShouldExist && len(parsePIPServiceTag(&serviceTag)) > 1 {
+			return false
+		}
+	}
+
+	// Release the ip under the following criteria -
+	// #1 - If we don't actually want a load balancer,
+	return !lbShouldExist ||
+		// #2 - If the load balancer is internal, and thus doesn't require public exposure
+		lbIsInternal ||
+		// #3 - If the name of this public ip does not match the desired name,
+		(pipName != desiredPipName) ||
+		// #4 If the service annotations have specified the ip tags that the public ip must have, but they do not match the ip tags of the existing instance
+		(ipTagRequest.IPTagsRequestedByAnnotation && !areIPTagsEquivalent(currentIPTags, ipTagRequest.IPTags))
+}
+
+// ensurePIPTagged ensures the public IP of the service is tagged as configured
+func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddress) bool {
+	configTags := parseTags(az.Tags, az.TagsMap)
+	annotationTags := make(map[string]*string)
+	if _, ok := service.Annotations[consts.ServiceAnnotationAzurePIPTags]; ok {
+		annotationTags = parseTags(service.Annotations[consts.ServiceAnnotationAzurePIPTags], map[string]string{})
+	}
+
+	for k, v := range annotationTags {
+		found, key := findKeyInMapCaseInsensitive(configTags, k)
+		if !found {
+			configTags[k] = v
+		} else if !strings.EqualFold(to.String(v), to.String(configTags[key])) {
+			configTags[key] = v
+		}
+	}
+
+	// include the cluster name and service names tags when comparing
+	var clusterName, serviceNames *string
+	if v := getClusterFromPIPClusterTags(pip.Tags); v != "" {
+		clusterName = &v
+	}
+	if v := getServiceFromPIPServiceTags(pip.Tags); v != "" {
+		serviceNames = &v
+	}
+	if clusterName != nil {
+		configTags[consts.ClusterNameKey] = clusterName
+	}
+	if serviceNames != nil {
+		configTags[consts.ServiceTagKey] = serviceNames
+	}
+
+	tags, changed := az.reconcileTags(pip.Tags, configTags)
+	pip.Tags = tags
+
+	return changed
+}
+
+// This reconciles the PublicIP resources similar to how the LB is reconciled.
+func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbName string, wantLb bool) (*network.PublicIPAddress, error) {
+	isInternal := requiresInternalLoadBalancer(service)
+	serviceName := getServiceName(service)
+	serviceIPTagRequest := getServiceIPTagRequestForPublicIP(service)
+
+	var (
+		lb               *network.LoadBalancer
+		desiredPipName   string
+		err              error
+		shouldPIPExisted bool
+	)
+
+	if !isInternal && wantLb {
+		desiredPipName, shouldPIPExisted, err = az.determinePublicIPName(clusterName, service)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if lbName != "" {
+		loadBalancer, _, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault)
+		if err != nil {
+			return nil, err
+		}
+		lb = &loadBalancer
+	}
+
+	pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+
+	pips, err := az.ListPIP(service, pipResourceGroup)
+	if err != nil {
+		return nil, err
+	}
+
+	discoveredDesiredPublicIP, pipsToBeDeleted, deletedDesiredPublicIP, pipsToBeUpdated, err := az.getPublicIPUpdates(clusterName, service, pips, wantLb, isInternal, desiredPipName, serviceName, serviceIPTagRequest, shouldPIPExisted)
+	if err != nil {
+		return nil, err
+	}
+
+	var deleteFuncs, updateFuncs []func() error
+	for _, pip := range pipsToBeUpdated {
+		pipCopy := *pip
+		updateFuncs = append(updateFuncs, func() error {
+			klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - updating", serviceName, *pip.Name)
+			return az.CreateOrUpdatePIP(service, pipResourceGroup, pipCopy)
+		})
+	}
+	errs := utilerrors.AggregateGoroutines(updateFuncs...)
+	if errs != nil {
+		return nil, utilerrors.Flatten(errs)
+	}
+
+	for _, pip := range pipsToBeDeleted {
+		pipCopy := *pip
+		deleteFuncs = append(deleteFuncs, func() error {
+			klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, *pip.Name)
+			return az.safeDeletePublicIP(service, pipResourceGroup, &pipCopy, lb)
+		})
+	}
+	errs = utilerrors.AggregateGoroutines(deleteFuncs...)
+	if errs != nil {
+		return nil, utilerrors.Flatten(errs)
+	}
+
+	if !isInternal && wantLb {
+		// Confirm desired public ip resource exists
+		var pip *network.PublicIPAddress
+		domainNameLabel, found := getPublicIPDomainNameLabel(service)
+		errorIfPublicIPDoesNotExist := shouldPIPExisted && discoveredDesiredPublicIP && !deletedDesiredPublicIP
+		if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel, clusterName, errorIfPublicIPDoesNotExist, found); err != nil {
+			return nil, err
+		}
+		return pip, nil
+	}
+	return nil, nil
+}
+
+func (az *Cloud) getPublicIPUpdates(clusterName string, service *v1.Service, pips []network.PublicIPAddress, wantLb bool, isInternal bool, desiredPipName string, serviceName string, serviceIPTagRequest serviceIPTagRequest, serviceAnnotationRequestsNamedPublicIP bool) (bool, []*network.PublicIPAddress, bool, []*network.PublicIPAddress, error) {
+	var (
+		err                       error
+		discoveredDesiredPublicIP bool
+		deletedDesiredPublicIP    bool
+		pipsToBeDeleted           []*network.PublicIPAddress
+		pipsToBeUpdated           []*network.PublicIPAddress
+	)
+	for i := range pips {
+		pip := pips[i]
+		pipName := *pip.Name
+
+		// If we've been told to use a specific public ip by the client, let's track whether or not it actually existed
+		// when we inspect the set in Azure.
+		discoveredDesiredPublicIP = discoveredDesiredPublicIP || wantLb && !isInternal && pipName == desiredPipName
+
+		// Now, let's perform additional analysis to determine if we should release the public ips we have found.
+		// We can only let them go if (a) they are owned by this service and (b) they meet the criteria for deletion.
+		owns, isUserAssignedPIP := serviceOwnsPublicIP(service, &pip, clusterName)
+		if owns {
+			var dirtyPIP, toBeDeleted bool
+			if !wantLb && !isUserAssignedPIP {
+				klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name)
+				err = unbindServiceFromPIP(&pip, service, serviceName, clusterName)
+				if err != nil {
+					return false, nil, false, nil, err
+				}
+				dirtyPIP = true
+			}
+			if !isUserAssignedPIP {
+				changed := az.ensurePIPTagged(service, &pip)
+				if changed {
+					dirtyPIP = true
+				}
+			}
+			if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, isUserAssignedPIP, desiredPipName, serviceIPTagRequest) {
+				// Then, release the public ip
+				pipsToBeDeleted = append(pipsToBeDeleted, &pip)
+
+				// Flag if we deleted the desired public ip
+				deletedDesiredPublicIP = deletedDesiredPublicIP || pipName == desiredPipName
+
+				// An aside: It would be unusual, but possible, for us to delete a public ip referred to explicitly by name
+				// in Service annotations (which is usually reserved for non-service-owned externals), if that IP is tagged as
+				// having been owned by a particular Kubernetes cluster.
+
+				// If the pip is going to be deleted, we do not need to update it
+				toBeDeleted = true
+			}
+
+			// Update tags of PIP only instead of deleting it.
+			if !toBeDeleted && dirtyPIP {
+				pipsToBeUpdated = append(pipsToBeUpdated, &pip)
+			}
+		}
+	}
+
+	if !isInternal && serviceAnnotationRequestsNamedPublicIP && !discoveredDesiredPublicIP && wantLb {
+		return false, nil, false, nil, fmt.Errorf("reconcilePublicIP for service(%s): pip(%s) not found", serviceName, desiredPipName)
+	}
+	return discoveredDesiredPublicIP, pipsToBeDeleted, deletedDesiredPublicIP, pipsToBeUpdated, err
+}
+
+// safeDeletePublicIP deletes public IP by removing its reference first.
+func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string, pip *network.PublicIPAddress, lb *network.LoadBalancer) error {
+	// Remove references if pip.IPConfiguration is not nil.
+	if pip.PublicIPAddressPropertiesFormat != nil &&
+		pip.PublicIPAddressPropertiesFormat.IPConfiguration != nil &&
+		lb != nil && lb.LoadBalancerPropertiesFormat != nil &&
+		lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil {
+		referencedLBRules := []network.SubResource{}
+		frontendIPConfigUpdated := false
+		loadBalancerRuleUpdated := false
+
+		// Check whether there are still frontend IP configurations referring to it.
+		ipConfigurationID := to.String(pip.PublicIPAddressPropertiesFormat.IPConfiguration.ID)
+		if ipConfigurationID != "" {
+			lbFrontendIPConfigs := *lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations
+			for i := len(lbFrontendIPConfigs) - 1; i >= 0; i-- {
+				config := lbFrontendIPConfigs[i]
+				if strings.EqualFold(ipConfigurationID, to.String(config.ID)) {
+					if config.FrontendIPConfigurationPropertiesFormat != nil &&
+						config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules != nil {
+						referencedLBRules = *config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules
+					}
+
+					frontendIPConfigUpdated = true
+					lbFrontendIPConfigs = append(lbFrontendIPConfigs[:i], lbFrontendIPConfigs[i+1:]...)
+					break
+				}
+			}
+
+			if frontendIPConfigUpdated {
+				lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations = &lbFrontendIPConfigs
+			}
+		}
+
+		// Check whether there are still load balancer rules referring to it.
+		if len(referencedLBRules) > 0 {
+			referencedLBRuleIDs := sets.NewString()
+			for _, refer := range referencedLBRules {
+				referencedLBRuleIDs.Insert(to.String(refer.ID))
+			}
+
+			if lb.LoadBalancerPropertiesFormat.LoadBalancingRules != nil {
+				lbRules := *lb.LoadBalancerPropertiesFormat.LoadBalancingRules
+				for i := len(lbRules) - 1; i >= 0; i-- {
+					ruleID := to.String(lbRules[i].ID)
+					if ruleID != "" && referencedLBRuleIDs.Has(ruleID) {
+						loadBalancerRuleUpdated = true
+						lbRules = append(lbRules[:i], lbRules[i+1:]...)
+					}
+				}
+
+				if loadBalancerRuleUpdated {
+					lb.LoadBalancerPropertiesFormat.LoadBalancingRules = &lbRules
+				}
+			}
+		}
+
+		// Update load balancer when frontendIPConfigUpdated or loadBalancerRuleUpdated.
+		if frontendIPConfigUpdated || loadBalancerRuleUpdated {
+			err := az.CreateOrUpdateLB(service, *lb)
+			if err != nil {
+				klog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err)
+				return err
+			}
+		}
+	}
+
+	pipName := to.String(pip.Name)
+	klog.V(10).Infof("DeletePublicIP(%s, %q): start", pipResourceGroup, pipName)
+	err := az.DeletePublicIP(service, pipResourceGroup, pipName)
+	if err != nil {
+		return err
+	}
+	klog.V(10).Infof("DeletePublicIP(%s, %q): end", pipResourceGroup, pipName)
+
+	return nil
+}
+
+func findProbe(probes []network.Probe, probe network.Probe) bool {
+	for _, existingProbe := range probes {
+		if strings.EqualFold(to.String(existingProbe.Name), to.String(probe.Name)) && to.Int32(existingProbe.Port) == to.Int32(probe.Port) {
+			return true
+		}
+	}
+	return false
+}
+
+func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule, wantLB bool) bool {
+	for _, existingRule := range rules {
+		if strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) &&
+			equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat, wantLB) {
+			return true
+		}
+	}
+	return false
+}
+
+// equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal.
+// Note: only fields used in reconcileLoadBalancer are considered.
+func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRulePropertiesFormat, t *network.LoadBalancingRulePropertiesFormat, wantLB bool) bool {
+	if s == nil || t == nil {
+		return false
+	}
+
+	properties := reflect.DeepEqual(s.Protocol, t.Protocol) &&
+		reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
+		reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) &&
+		reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
+		reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
+		reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
+		reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) &&
+		reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset)) &&
+		reflect.DeepEqual(to.Bool(s.DisableOutboundSnat), to.Bool(t.DisableOutboundSnat))
+
+	if wantLB && s.IdleTimeoutInMinutes != nil && t.IdleTimeoutInMinutes != nil {
+		return properties && reflect.DeepEqual(s.IdleTimeoutInMinutes, t.IdleTimeoutInMinutes)
+	}
+	return properties
+}
+
+// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
+// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
+// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
+// despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules.
+func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool {
+	for _, existingRule := range rules {
+		if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) {
+			continue
+		}
+		if !strings.EqualFold(string(existingRule.Protocol), string(rule.Protocol)) {
+			continue
+		}
+		if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) {
+			continue
+		}
+		if !strings.EqualFold(to.String(existingRule.DestinationPortRange), to.String(rule.DestinationPortRange)) {
+			continue
+		}
+		if !strings.EqualFold(to.String(existingRule.SourceAddressPrefix), to.String(rule.SourceAddressPrefix)) {
+			continue
+		}
+		if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) {
+			if !strings.EqualFold(to.String(existingRule.DestinationAddressPrefix), to.String(rule.DestinationAddressPrefix)) {
+				continue
+			}
+			if !reflect.DeepEqual(to.StringSlice(existingRule.DestinationAddressPrefixes), to.StringSlice(rule.DestinationAddressPrefixes)) {
+				continue
+			}
+		}
+		if !strings.EqualFold(string(existingRule.Access), string(rule.Access)) {
+			continue
+		}
+		if !strings.EqualFold(string(existingRule.Direction), string(rule.Direction)) {
+			continue
+		}
+		return true
+	}
+	return false
+}
+
+func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string {
+	if resourceGroup, found := service.Annotations[consts.ServiceAnnotationLoadBalancerResourceGroup]; found {
+		resourceGroupName := strings.TrimSpace(resourceGroup)
+		if len(resourceGroupName) > 0 {
+			return resourceGroupName
+		}
+	}
+
+	return az.ResourceGroup
+}
+
+func (az *Cloud) isBackendPoolPreConfigured(service *v1.Service) bool {
+	preConfigured := false
+	isInternal := requiresInternalLoadBalancer(service)
+
+	if az.PreConfiguredBackendPoolLoadBalancerTypes == consts.PreConfiguredBackendPoolLoadBalancerTypesAll {
+		preConfigured = true
+	}
+	if (az.PreConfiguredBackendPoolLoadBalancerTypes == consts.PreConfiguredBackendPoolLoadBalancerTypesInternal) && isInternal {
+		preConfigured = true
+	}
+	if (az.PreConfiguredBackendPoolLoadBalancerTypes == consts.PreConfiguredBackendPoolLoadBalancerTypesExternal) && !isInternal {
+		preConfigured = true
+	}
+
+	return preConfigured
+}
+
+// Check if service requires an internal load balancer.
+func requiresInternalLoadBalancer(service *v1.Service) bool {
+	if l, found := service.Annotations[consts.ServiceAnnotationLoadBalancerInternal]; found {
+		return l == consts.TrueAnnotationValue
+	}
+
+	return false
+}
+
+func subnet(service *v1.Service) *string {
+	if requiresInternalLoadBalancer(service) {
+		if l, found := service.Annotations[consts.ServiceAnnotationLoadBalancerInternalSubnet]; found && strings.TrimSpace(l) != "" {
+			return &l
+		}
+	}
+
+	return nil
+}
+
+// getServiceLoadBalancerMode parses the mode value.
+// if the value is __auto__ it returns isAuto = TRUE.
+// if anything else it returns the unique VM set names after trimming spaces.
+func (az *Cloud) getServiceLoadBalancerMode(service *v1.Service) (bool, bool, string) {
+	mode, hasMode := service.Annotations[consts.ServiceAnnotationLoadBalancerMode]
+	useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
+	if useSingleSLB && hasMode {
+		klog.Warningf("single standard load balancer doesn't work with annotation %q, would ignore it", consts.ServiceAnnotationLoadBalancerMode)
+	}
+	mode = strings.TrimSpace(mode)
+	isAuto := strings.EqualFold(mode, consts.ServiceAnnotationLoadBalancerAutoModeValue)
+
+	return hasMode, isAuto, mode
+}
+
+func useSharedSecurityRule(service *v1.Service) bool {
+	if l, ok := service.Annotations[consts.ServiceAnnotationSharedSecurityRule]; ok {
+		return l == consts.TrueAnnotationValue
+	}
+
+	return false
+}
+
+func getServiceTags(service *v1.Service) []string {
+	if service == nil {
+		return nil
+	}
+
+	if serviceTags, found := service.Annotations[consts.ServiceAnnotationAllowedServiceTag]; found {
+		result := []string{}
+		tags := strings.Split(strings.TrimSpace(serviceTags), ",")
+		for _, tag := range tags {
+			serviceTag := strings.TrimSpace(tag)
+			if serviceTag != "" {
+				result = append(result, serviceTag)
+			}
+		}
+
+		return result
+	}
+
+	return nil
+}
+
+// serviceOwnsPublicIP checks if the service owns the pip and if the pip is user-created.
+// The pip is user-created if and only if there is no service tags.
+// The service owns the pip if:
+// 1. The serviceName is included in the service tags of a system-created pip.
+// 2. The service.Spec.LoadBalancerIP matches the IP address of a user-created pip.
+func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clusterName string) (bool, bool) {
+	if service == nil || pip == nil {
+		klog.Warningf("serviceOwnsPublicIP: nil service or public IP")
+		return false, false
+	}
+
+	if pip.PublicIPAddressPropertiesFormat == nil || to.String(pip.IPAddress) == "" {
+		klog.Warningf("serviceOwnsPublicIP: empty pip.IPAddress")
+		return false, false
+	}
+
+	serviceName := getServiceName(service)
+
+	if pip.Tags != nil {
+		serviceTag := getServiceFromPIPServiceTags(pip.Tags)
+		clusterTag := getClusterFromPIPClusterTags(pip.Tags)
+
+		// if there is no service tag on the pip, it is user-created pip
+		if serviceTag == "" {
+			return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), true
+		}
+
+		// if there is service tag on the pip, it is system-created pip
+		if isSVCNameInPIPTag(serviceTag, serviceName) {
+			// Backward compatible for clusters upgraded from old releases.
+			// In such case, only "service" tag is set.
+			if clusterTag == "" {
+				return true, false
+			}
+
+			// If cluster name tag is set, then return true if it matches.
+			if clusterTag == clusterName {
+				return true, false
+			}
+		} else {
+			// if the service is not included in te tags of the system-created pip, check the ip address
+			// this could happen for secondary services
+			return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false
+		}
+	}
+
+	return false, false
+}
+
+func isSVCNameInPIPTag(tag, svcName string) bool {
+	svcNames := parsePIPServiceTag(&tag)
+
+	for _, name := range svcNames {
+		if strings.EqualFold(name, svcName) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func parsePIPServiceTag(serviceTag *string) []string {
+	if serviceTag == nil || len(*serviceTag) == 0 {
+		return []string{}
+	}
+
+	serviceNames := strings.FieldsFunc(*serviceTag, func(r rune) bool {
+		return r == ','
+	})
+	for i, name := range serviceNames {
+		serviceNames[i] = strings.TrimSpace(name)
+	}
+
+	return serviceNames
+}
+
+// bindServicesToPIP add the incoming service name to the PIP's tag
+// parameters: public IP address to be updated and incoming service names
+// return values:
+// 1. a bool flag to indicate if there is a new service added
+// 2. an error when the pip is nil
+// example:
+// "ns1/svc1" + ["ns1/svc1", "ns2/svc2"] = "ns1/svc1,ns2/svc2"
+func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []string, replace bool) (bool, error) {
+	if pip == nil {
+		return false, fmt.Errorf("nil public IP")
+	}
+
+	if pip.Tags == nil {
+		pip.Tags = map[string]*string{consts.ServiceTagKey: to.StringPtr("")}
+	}
+
+	serviceTagValue := to.StringPtr(getServiceFromPIPServiceTags(pip.Tags))
+	serviceTagValueSet := make(map[string]struct{})
+	existingServiceNames := parsePIPServiceTag(serviceTagValue)
+	addedNew := false
+
+	// replace is used when unbinding the service from PIP so addedNew remains false all the time
+	if replace {
+		serviceTagValue = to.StringPtr(strings.Join(incomingServiceNames, ","))
+		pip.Tags[consts.ServiceTagKey] = serviceTagValue
+
+		return false, nil
+	}
+
+	for _, name := range existingServiceNames {
+		if _, ok := serviceTagValueSet[name]; !ok {
+			serviceTagValueSet[name] = struct{}{}
+		}
+	}
+
+	for _, serviceName := range incomingServiceNames {
+		if serviceTagValue == nil || *serviceTagValue == "" {
+			serviceTagValue = to.StringPtr(serviceName)
+			addedNew = true
+		} else {
+			// detect duplicates
+			if _, ok := serviceTagValueSet[serviceName]; !ok {
+				*serviceTagValue += fmt.Sprintf(",%s", serviceName)
+				addedNew = true
+			} else {
+				klog.V(10).Infof("service %s has been bound to the pip already", serviceName)
+			}
+		}
+	}
+	pip.Tags[consts.ServiceTagKey] = serviceTagValue
+
+	return addedNew, nil
+}
+
+func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, serviceName, clusterName string) error {
+	if pip == nil || pip.Tags == nil {
+		return fmt.Errorf("nil public IP or tags")
+	}
+
+	// skip removing tags for user assigned pips
+	serviceTagValue := to.StringPtr(getServiceFromPIPServiceTags(pip.Tags))
+	existingServiceNames := parsePIPServiceTag(serviceTagValue)
+	var found bool
+	for i := len(existingServiceNames) - 1; i >= 0; i-- {
+		if strings.EqualFold(existingServiceNames[i], serviceName) {
+			existingServiceNames = append(existingServiceNames[:i], existingServiceNames[i+1:]...)
+			found = true
+		}
+	}
+	if !found {
+		klog.Warningf("cannot find the service %s in the corresponding PIP", serviceName)
+	}
+
+	_, err := bindServicesToPIP(pip, existingServiceNames, true)
+	if err != nil {
+		return err
+	}
+
+	if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
+		pip.Tags[consts.ServiceUsingDNSKey] = to.StringPtr("")
+	}
+
+	return nil
+}
+
+// ensureLoadBalancerTagged ensures every load balancer in the resource group is tagged as configured
+func (az *Cloud) ensureLoadBalancerTagged(lb *network.LoadBalancer) bool {
+	if az.Tags == "" && (az.TagsMap == nil || len(az.TagsMap) == 0) {
+		return false
+	}
+	tags := parseTags(az.Tags, az.TagsMap)
+	if lb.Tags == nil {
+		lb.Tags = make(map[string]*string)
+	}
+
+	tags, changed := az.reconcileTags(lb.Tags, tags)
+	lb.Tags = tags
+
+	return changed
+}
+
+// ensureSecurityGroupTagged ensures the security group is tagged as configured
+func (az *Cloud) ensureSecurityGroupTagged(sg *network.SecurityGroup) bool {
+	if az.Tags == "" && (az.TagsMap == nil || len(az.TagsMap) == 0) {
+		return false
+	}
+	tags := parseTags(az.Tags, az.TagsMap)
+	if sg.Tags == nil {
+		sg.Tags = make(map[string]*string)
+	}
+
+	tags, changed := az.reconcileTags(sg.Tags, tags)
+	sg.Tags = tags
+
+	return changed
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go
new file mode 100644
index 0000000000000000000000000000000000000000..2d3c6f86c88ce0cb083509dfc3fc2092742c374f
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go
@@ -0,0 +1,445 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+//go:generate sh -c "mockgen -destination=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go -package=provider BackendPool"
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/sets"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+type BackendPool interface {
+	// EnsureHostsInPool ensures the nodes join the backend pool of the load balancer
+	EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error
+
+	// CleanupVMSetFromBackendPoolByCondition removes nodes of the unwanted vmSet from the lb backend pool.
+	// This is needed in two scenarios:
+	// 1. When migrating from single SLB to multiple SLBs, the existing
+	// SLB's backend pool contains nodes from different agent pools, while we only want the
+	// nodes from the primary agent pool to join the backend pool.
+	// 2. When migrating from dedicated SLB to shared SLB (or vice versa), we should move the vmSet from
+	// one SLB to another one.
+	CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error)
+
+	// ReconcileBackendPools creates the inbound backend pool if it is not existed, and removes nodes that are supposed to be
+	// excluded from the load balancers.
+	ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error)
+}
+
+type backendPoolTypeNodeIPConfig struct {
+	*Cloud
+}
+
+func newBackendPoolTypeNodeIPConfig(c *Cloud) BackendPool {
+	return &backendPoolTypeNodeIPConfig{c}
+}
+
+func (bc *backendPoolTypeNodeIPConfig) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error {
+	return bc.VMSet.EnsureHostsInPool(service, nodes, backendPoolID, vmSetName)
+}
+
+func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) {
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	lbResourceGroup := bc.getLoadBalancerResourceGroup()
+	lbBackendPoolID := bc.getBackendPoolID(to.String(slb.Name), lbResourceGroup, lbBackendPoolName)
+	newBackendPools := make([]network.BackendAddressPool, 0)
+	if slb.LoadBalancerPropertiesFormat != nil && slb.BackendAddressPools != nil {
+		newBackendPools = *slb.BackendAddressPools
+	}
+	vmSetNameToBackendIPConfigurationsToBeDeleted := make(map[string][]network.InterfaceIPConfiguration)
+
+	for j, bp := range newBackendPools {
+		if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
+			klog.V(2).Infof("bc.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", to.String(bp.Name), to.String(slb.Name))
+			if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil {
+				for i := len(*bp.BackendIPConfigurations) - 1; i >= 0; i-- {
+					ipConf := (*bp.BackendIPConfigurations)[i]
+					ipConfigID := to.String(ipConf.ID)
+					_, vmSetName, err := bc.VMSet.GetNodeNameByIPConfigurationID(ipConfigID)
+					if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
+						return nil, err
+					}
+
+					if shouldRemoveVMSetFromSLB(vmSetName) {
+						klog.V(2).Infof("bc.CleanupVMSetFromBackendPoolByCondition: found unwanted vmSet %s, decouple it from the LB", vmSetName)
+						// construct a backendPool that only contains the IP config of the node to be deleted
+						interfaceIPConfigToBeDeleted := network.InterfaceIPConfiguration{
+							ID: to.StringPtr(ipConfigID),
+						}
+						vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName] = append(vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName], interfaceIPConfigToBeDeleted)
+						*bp.BackendIPConfigurations = append((*bp.BackendIPConfigurations)[:i], (*bp.BackendIPConfigurations)[i+1:]...)
+					}
+				}
+			}
+
+			newBackendPools[j] = bp
+			break
+		}
+	}
+
+	for vmSetName := range vmSetNameToBackendIPConfigurationsToBeDeleted {
+		backendIPConfigurationsToBeDeleted := vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName]
+		backendpoolToBeDeleted := &[]network.BackendAddressPool{
+			{
+				ID: to.StringPtr(lbBackendPoolID),
+				BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
+					BackendIPConfigurations: &backendIPConfigurationsToBeDeleted,
+				},
+			},
+		}
+		// decouple the backendPool from the node
+		err := bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
+		if err != nil {
+			return nil, err
+		}
+		slb.BackendAddressPools = &newBackendPools
+		// Proactively disable the etag to prevent etag mismatch error when putting lb later.
+		// This could happen because when we remove the hosts from the lb, the nrp
+		// would put the lb to remove the backend references as well.
+		slb.Etag = nil
+	}
+
+	return slb, nil
+}
+
+func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error) {
+	var newBackendPools []network.BackendAddressPool
+	var err error
+	if lb.BackendAddressPools != nil {
+		newBackendPools = *lb.BackendAddressPools
+	}
+
+	foundBackendPool := false
+	wantLb := true
+	changed := false
+	lbName := *lb.Name
+
+	serviceName := getServiceName(service)
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	lbBackendPoolID := bc.getBackendPoolID(lbName, bc.getLoadBalancerResourceGroup(), lbBackendPoolName)
+	vmSetName := bc.mapLoadBalancerNameToVMSet(lbName, clusterName)
+
+	for _, bp := range newBackendPools {
+		if strings.EqualFold(*bp.Name, lbBackendPoolName) {
+			klog.V(10).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
+			foundBackendPool = true
+
+			var backendIPConfigurationsToBeDeleted []network.InterfaceIPConfiguration
+			if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil {
+				for _, ipConf := range *bp.BackendIPConfigurations {
+					ipConfID := to.String(ipConf.ID)
+					nodeName, _, err := bc.VMSet.GetNodeNameByIPConfigurationID(ipConfID)
+					if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
+						return false, false, err
+					}
+
+					// If a node is not supposed to be included in the LB, it
+					// would not be in the `nodes` slice. We need to check the nodes that
+					// have been added to the LB's backendpool, find the unwanted ones and
+					// delete them from the pool.
+					shouldExcludeLoadBalancer, err := bc.ShouldNodeExcludedFromLoadBalancer(nodeName)
+					if err != nil {
+						klog.Errorf("bc.ReconcileBackendPools: ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
+						return false, false, err
+					}
+					if shouldExcludeLoadBalancer {
+						klog.V(2).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB %s", serviceName, wantLb, nodeName, lbName)
+						// construct a backendPool that only contains the IP config of the node to be deleted
+						backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
+					}
+				}
+			}
+			if len(backendIPConfigurationsToBeDeleted) > 0 {
+				backendpoolToBeDeleted := &[]network.BackendAddressPool{
+					{
+						ID: to.StringPtr(lbBackendPoolID),
+						BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
+							BackendIPConfigurations: &backendIPConfigurationsToBeDeleted,
+						},
+					},
+				}
+				// decouple the backendPool from the node
+				err = bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
+				if err != nil {
+					return false, false, err
+				}
+			}
+			break
+		} else {
+			klog.V(10).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unmanaged backendpool %s", serviceName, wantLb, *bp.Name)
+		}
+	}
+
+	isBackendPoolPreConfigured := bc.isBackendPoolPreConfigured(service)
+	if !foundBackendPool {
+		isBackendPoolPreConfigured = newBackendPool(lb, isBackendPoolPreConfigured, bc.PreConfiguredBackendPoolLoadBalancerTypes, getServiceName(service), getBackendPoolName(clusterName, service))
+		changed = true
+	}
+
+	return isBackendPoolPreConfigured, changed, err
+}
+
+type backendPoolTypeNodeIP struct {
+	*Cloud
+}
+
+func newBackendPoolTypeNodeIP(c *Cloud) BackendPool {
+	return &backendPoolTypeNodeIP{c}
+}
+
+func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error {
+	vnetResourceGroup := bi.ResourceGroup
+	if len(bi.VnetResourceGroup) > 0 {
+		vnetResourceGroup = bi.VnetResourceGroup
+	}
+	vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", bi.SubscriptionID, vnetResourceGroup, bi.VnetName)
+
+	changed := false
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	if strings.EqualFold(to.String(backendPool.Name), lbBackendPoolName) &&
+		backendPool.BackendAddressPoolPropertiesFormat != nil {
+		if backendPool.LoadBalancerBackendAddresses == nil {
+			lbBackendPoolAddresses := make([]network.LoadBalancerBackendAddress, 0)
+			backendPool.LoadBalancerBackendAddresses = &lbBackendPoolAddresses
+		}
+
+		existingIPs := sets.NewString()
+		for _, loadBalancerBackendAddress := range *backendPool.LoadBalancerBackendAddresses {
+			if loadBalancerBackendAddress.LoadBalancerBackendAddressPropertiesFormat != nil &&
+				loadBalancerBackendAddress.IPAddress != nil {
+				klog.V(4).Infof("bi.EnsureHostsInPool: found existing IP %s in the backend pool %s", to.String(loadBalancerBackendAddress.IPAddress), lbBackendPoolName)
+				existingIPs.Insert(to.String(loadBalancerBackendAddress.IPAddress))
+			}
+		}
+
+		for _, node := range nodes {
+			if isControlPlaneNode(node) {
+				klog.V(4).Infof("bi.EnsureHostsInPool: skipping control plane node %s", node.Name)
+				continue
+			}
+
+			var err error
+			shouldSkip := false
+			useSingleSLB := strings.EqualFold(bi.LoadBalancerSku, consts.LoadBalancerSkuStandard) && !bi.EnableMultipleStandardLoadBalancers
+			if !useSingleSLB {
+				vmSetName, err = bi.VMSet.GetNodeVMSetName(node)
+				if err != nil {
+					klog.Errorf("bi.EnsureHostsInPool: failed to get vmSet name by node name: %s", err.Error())
+					return err
+				}
+
+				if !strings.EqualFold(vmSetName, bi.mapLoadBalancerNameToVMSet(lbName, clusterName)) {
+					shouldSkip = true
+
+					lbNamePrefix := strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix)
+					if strings.EqualFold(lbNamePrefix, clusterName) &&
+						strings.EqualFold(bi.LoadBalancerSku, consts.LoadBalancerSkuStandard) &&
+						bi.getVMSetNamesSharingPrimarySLB().Has(vmSetName) {
+						shouldSkip = false
+					}
+				}
+			}
+			if shouldSkip {
+				klog.V(4).Infof("bi.EnsureHostsInPool: skipping attaching node %s to lb %s, because the vmSet of the node is %s", node.Name, lbName, vmSetName)
+				continue
+			}
+
+			privateIP := getNodePrivateIPAddress(service, node)
+			if !existingIPs.Has(privateIP) {
+				name := node.Name
+				if utilnet.IsIPv6String(privateIP) {
+					name = fmt.Sprintf("%s-ipv6", name)
+				}
+
+				klog.V(4).Infof("bi.EnsureHostsInPool: adding %s with ip address %s", name, privateIP)
+				*backendPool.LoadBalancerBackendAddresses = append(*backendPool.LoadBalancerBackendAddresses, network.LoadBalancerBackendAddress{
+					Name: to.StringPtr(name),
+					LoadBalancerBackendAddressPropertiesFormat: &network.LoadBalancerBackendAddressPropertiesFormat{
+						IPAddress:      to.StringPtr(privateIP),
+						VirtualNetwork: &network.SubResource{ID: to.StringPtr(vnetID)},
+					},
+				})
+				changed = true
+			}
+		}
+	}
+	if changed {
+		klog.V(2).Infof("bi.EnsureHostsInPool: updating backend pool %s of load balancer %s", lbBackendPoolName, lbName)
+		if err := bi.CreateOrUpdateLBBackendPool(lbName, backendPool); err != nil {
+			return fmt.Errorf("bi.EnsureHostsInPool: failed to update backend pool %s: %w", lbBackendPoolName, err)
+		}
+	}
+
+	return nil
+}
+
+func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) {
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+	newBackendPools := make([]network.BackendAddressPool, 0)
+	if slb.LoadBalancerPropertiesFormat != nil && slb.BackendAddressPools != nil {
+		newBackendPools = *slb.BackendAddressPools
+	}
+
+	var updatedPrivateIPs bool
+	for j, bp := range newBackendPools {
+		if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
+			klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", to.String(bp.Name), to.String(slb.Name))
+			vmIPsToBeDeleted := sets.NewString()
+			for _, node := range nodes {
+				vmSetName, err := bi.VMSet.GetNodeVMSetName(node)
+				if err != nil {
+					return nil, err
+				}
+
+				if shouldRemoveVMSetFromSLB(vmSetName) {
+					privateIP := getNodePrivateIPAddress(service, node)
+					klog.V(4).Infof("bi.CleanupVMSetFromBackendPoolByCondition: removing ip %s from the backend pool %s", privateIP, lbBackendPoolName)
+					vmIPsToBeDeleted.Insert(privateIP)
+				}
+			}
+
+			if bp.BackendAddressPoolPropertiesFormat != nil && bp.LoadBalancerBackendAddresses != nil {
+				for i := len(*bp.LoadBalancerBackendAddresses) - 1; i >= 0; i-- {
+					if (*bp.LoadBalancerBackendAddresses)[i].LoadBalancerBackendAddressPropertiesFormat != nil &&
+						vmIPsToBeDeleted.Has(to.String((*bp.LoadBalancerBackendAddresses)[i].IPAddress)) {
+						*bp.LoadBalancerBackendAddresses = append((*bp.LoadBalancerBackendAddresses)[:i], (*bp.LoadBalancerBackendAddresses)[i+1:]...)
+						updatedPrivateIPs = true
+					}
+				}
+			}
+
+			newBackendPools[j] = bp
+			break
+		}
+	}
+	if updatedPrivateIPs {
+		klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: updating lb %s since there are private IP updates", to.String(slb.Name))
+		slb.BackendAddressPools = &newBackendPools
+
+		for _, backendAddressPool := range *slb.BackendAddressPools {
+			if strings.EqualFold(lbBackendPoolName, to.String(backendAddressPool.Name)) {
+				if err := bi.CreateOrUpdateLBBackendPool(to.String(slb.Name), backendAddressPool); err != nil {
+					return nil, fmt.Errorf("bi.CleanupVMSetFromBackendPoolByCondition: failed to create or update backend pool %s: %w", lbBackendPoolName, err)
+				}
+			}
+		}
+	}
+
+	return slb, nil
+}
+
+func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error) {
+	var newBackendPools []network.BackendAddressPool
+	var err error
+	if lb.BackendAddressPools != nil {
+		newBackendPools = *lb.BackendAddressPools
+	}
+
+	foundBackendPool := false
+	wantLb := true
+	changed := false
+	lbName := *lb.Name
+	serviceName := getServiceName(service)
+	lbBackendPoolName := getBackendPoolName(clusterName, service)
+
+	for i, bp := range newBackendPools {
+		if strings.EqualFold(*bp.Name, lbBackendPoolName) {
+			klog.V(10).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
+			foundBackendPool = true
+
+			var nodeIPAddressesToBeDeleted []string
+			for nodeName := range bi.excludeLoadBalancerNodes {
+				for ip := range bi.nodePrivateIPs[nodeName] {
+					klog.V(2).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unwanted node private IP %s, decouple it from the LB %s", serviceName, wantLb, ip, lbName)
+					nodeIPAddressesToBeDeleted = append(nodeIPAddressesToBeDeleted, ip)
+				}
+			}
+			if len(nodeIPAddressesToBeDeleted) > 0 {
+				updated := removeNodeIPAddressesFromBackendPool(bp, nodeIPAddressesToBeDeleted)
+				if updated {
+					(*lb.BackendAddressPools)[i] = bp
+					if err := bi.CreateOrUpdateLBBackendPool(lbName, bp); err != nil {
+						return false, false, fmt.Errorf("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - failed to update backend pool %s for load balancer %s: %w", serviceName, wantLb, lbBackendPoolName, lbName, err)
+					}
+				}
+			}
+			break
+		} else {
+			klog.V(10).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unmanaged backendpool %s", serviceName, wantLb, *bp.Name)
+		}
+	}
+
+	isBackendPoolPreConfigured := bi.isBackendPoolPreConfigured(service)
+	if !foundBackendPool {
+		isBackendPoolPreConfigured = newBackendPool(lb, isBackendPoolPreConfigured, bi.PreConfiguredBackendPoolLoadBalancerTypes, getServiceName(service), getBackendPoolName(clusterName, service))
+		changed = true
+	}
+
+	return isBackendPoolPreConfigured, changed, err
+}
+
+func newBackendPool(lb *network.LoadBalancer, isBackendPoolPreConfigured bool, preConfiguredBackendPoolLoadBalancerTypes, serviceName, lbBackendPoolName string) bool {
+	if isBackendPoolPreConfigured {
+		klog.V(2).Infof("newBackendPool for service (%s)(true): lb backendpool - PreConfiguredBackendPoolLoadBalancerTypes %s has been set but can not find corresponding backend pool, ignoring it",
+			serviceName,
+			preConfiguredBackendPoolLoadBalancerTypes)
+		isBackendPoolPreConfigured = false
+	}
+
+	if lb.BackendAddressPools == nil {
+		lb.BackendAddressPools = &[]network.BackendAddressPool{}
+	}
+	*lb.BackendAddressPools = append(*lb.BackendAddressPools, network.BackendAddressPool{
+		Name:                               to.StringPtr(lbBackendPoolName),
+		BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{},
+	})
+
+	return isBackendPoolPreConfigured
+}
+
+func removeNodeIPAddressesFromBackendPool(backendPool network.BackendAddressPool, nodeIPAddresses []string) bool {
+	changed := false
+	nodeIPsSet := sets.NewString(nodeIPAddresses...)
+	if backendPool.BackendAddressPoolPropertiesFormat != nil &&
+		backendPool.LoadBalancerBackendAddresses != nil {
+		for i := len(*backendPool.LoadBalancerBackendAddresses) - 1; i >= 0; i-- {
+			if (*backendPool.LoadBalancerBackendAddresses)[i].LoadBalancerBackendAddressPropertiesFormat != nil {
+				ipAddress := to.String((*backendPool.LoadBalancerBackendAddresses)[i].IPAddress)
+				if nodeIPsSet.Has(ipAddress) {
+					klog.V(4).Infof("removeNodeIPAddressFromBackendPool: removing %s from the backend pool %s", ipAddress, to.String(backendPool.Name))
+					*backendPool.LoadBalancerBackendAddresses = append((*backendPool.LoadBalancerBackendAddresses)[:i], (*backendPool.LoadBalancerBackendAddresses)[i+1:]...)
+					changed = true
+				}
+			}
+		}
+	}
+
+	return changed
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
new file mode 100644
index 0000000000000000000000000000000000000000..09e439a646874eaa04aef487f6f9313e5ad727d2
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
@@ -0,0 +1,431 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"path"
+	"strconv"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	kwait "k8s.io/apimachinery/pkg/util/wait"
+	cloudvolume "k8s.io/cloud-provider/volume"
+	volumehelpers "k8s.io/cloud-provider/volume/helpers"
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+//ManagedDiskController : managed disk controller struct
+type ManagedDiskController struct {
+	common *controllerCommon
+}
+
+// ManagedDiskOptions specifies the options of managed disks.
+type ManagedDiskOptions struct {
+	// The SKU of storage account.
+	StorageAccountType compute.DiskStorageAccountTypes
+	// The name of the disk.
+	DiskName string
+	// The name of PVC.
+	PVCName string
+	// The name of resource group.
+	ResourceGroup string
+	// The AvailabilityZone to create the disk.
+	AvailabilityZone string
+	// The tags of the disk.
+	Tags map[string]string
+	// IOPS Caps for UltraSSD disk
+	DiskIOPSReadWrite string
+	// Throughput Cap (MBps) for UltraSSD disk
+	DiskMBpsReadWrite string
+	// if SourceResourceID is not empty, then it's a disk copy operation(for snapshot)
+	SourceResourceID string
+	// The type of source
+	SourceType string
+	// ResourceId of the disk encryption set to use for enabling encryption at rest.
+	DiskEncryptionSetID string
+	// The size in GB.
+	SizeGB int
+	// The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
+	MaxShares int32
+	// Logical sector size in bytes for Ultra disks
+	LogicalSectorSize int32
+	// SkipGetDiskOperation indicates whether skip GetDisk operation(mainly due to throttling)
+	SkipGetDiskOperation bool
+	// NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll'
+	NetworkAccessPolicy compute.NetworkAccessPolicy
+	// DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
+	DiskAccessID *string
+	// BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk.
+	BurstingEnabled *bool
+}
+
+//CreateManagedDisk : create managed disk
+func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
+	var err error
+	klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
+
+	var createZones []string
+	if len(options.AvailabilityZone) > 0 {
+		requestedZone := c.common.cloud.GetZoneID(options.AvailabilityZone)
+		if requestedZone != "" {
+			createZones = append(createZones, requestedZone)
+		}
+	}
+
+	// insert original tags to newTags
+	newTags := make(map[string]*string)
+	azureDDTag := "kubernetes-azure-dd"
+	newTags[consts.CreatedByTag] = &azureDDTag
+	if options.Tags != nil {
+		for k, v := range options.Tags {
+			// Azure won't allow / (forward slash) in tags
+			newKey := strings.Replace(k, "/", "-", -1)
+			newValue := strings.Replace(v, "/", "-", -1)
+			newTags[newKey] = &newValue
+		}
+	}
+
+	diskSizeGB := int32(options.SizeGB)
+	diskSku := options.StorageAccountType
+
+	creationData, err := getValidCreationData(c.common.subscriptionID, options.ResourceGroup, options.SourceResourceID, options.SourceType)
+	if err != nil {
+		return "", err
+	}
+	diskProperties := compute.DiskProperties{
+		DiskSizeGB:      &diskSizeGB,
+		CreationData:    &creationData,
+		BurstingEnabled: options.BurstingEnabled,
+	}
+
+	if options.NetworkAccessPolicy != "" {
+		diskProperties.NetworkAccessPolicy = options.NetworkAccessPolicy
+		if options.NetworkAccessPolicy == compute.AllowPrivate {
+			if options.DiskAccessID == nil {
+				return "", fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate")
+			}
+			diskProperties.DiskAccessID = options.DiskAccessID
+		} else {
+			if options.DiskAccessID != nil {
+				return "", fmt.Errorf("DiskAccessID(%s) must be empty when NetworkAccessPolicy(%s) is not AllowPrivate", *options.DiskAccessID, options.NetworkAccessPolicy)
+			}
+		}
+	}
+
+	if diskSku == compute.UltraSSDLRS {
+		diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite)
+		if options.DiskIOPSReadWrite != "" {
+			v, err := strconv.Atoi(options.DiskIOPSReadWrite)
+			if err != nil {
+				return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %w", err)
+			}
+			diskIOPSReadWrite = int64(v)
+		}
+		diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
+
+		diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite)
+		if options.DiskMBpsReadWrite != "" {
+			v, err := strconv.Atoi(options.DiskMBpsReadWrite)
+			if err != nil {
+				return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %w", err)
+			}
+			diskMBpsReadWrite = int64(v)
+		}
+		diskProperties.DiskMBpsReadWrite = to.Int64Ptr(diskMBpsReadWrite)
+
+		if options.LogicalSectorSize != 0 {
+			klog.V(2).Infof("AzureDisk - requested LogicalSectorSize: %v", options.LogicalSectorSize)
+			diskProperties.CreationData.LogicalSectorSize = to.Int32Ptr(options.LogicalSectorSize)
+		}
+	} else {
+		if options.DiskIOPSReadWrite != "" {
+			return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type")
+		}
+		if options.DiskMBpsReadWrite != "" {
+			return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type")
+		}
+		if options.LogicalSectorSize != 0 {
+			return "", fmt.Errorf("AzureDisk - LogicalSectorSize parameter is only applicable in UltraSSD_LRS disk type")
+		}
+	}
+
+	if options.DiskEncryptionSetID != "" {
+		if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 {
+			return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, consts.DiskEncryptionSetIDFormat)
+		}
+		diskProperties.Encryption = &compute.Encryption{
+			DiskEncryptionSetID: &options.DiskEncryptionSetID,
+			Type:                compute.EncryptionTypeEncryptionAtRestWithCustomerKey,
+		}
+	}
+
+	if options.MaxShares > 1 {
+		diskProperties.MaxShares = &options.MaxShares
+	}
+
+	model := compute.Disk{
+		Location: &c.common.location,
+		Tags:     newTags,
+		Sku: &compute.DiskSku{
+			Name: diskSku,
+		},
+		DiskProperties: &diskProperties,
+	}
+
+	if el := c.common.extendedLocation; el != nil {
+		model.ExtendedLocation = &compute.ExtendedLocation{
+			Name: to.StringPtr(el.Name),
+			Type: compute.ExtendedLocationTypes(el.Type),
+		}
+	}
+
+	if len(createZones) > 0 {
+		model.Zones = &createZones
+	}
+
+	if options.ResourceGroup == "" {
+		options.ResourceGroup = c.common.resourceGroup
+	}
+
+	cloud := c.common.cloud
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	rerr := cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
+	if rerr != nil {
+		return "", rerr.Error()
+	}
+
+	diskID := fmt.Sprintf(managedDiskPath, cloud.subscriptionID, options.ResourceGroup, options.DiskName)
+
+	if options.SkipGetDiskOperation {
+		klog.Warningf("azureDisk - GetDisk(%s, StorageAccountType:%s) is throttled, unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType)
+	} else {
+		err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
+			provisionState, id, err := c.GetDisk(options.ResourceGroup, options.DiskName)
+			if err == nil {
+				if id != "" {
+					diskID = id
+				}
+			} else {
+				// We are waiting for provisioningState==Succeeded
+				// We don't want to hand-off managed disks to k8s while they are
+				//still being provisioned, this is to avoid some race conditions
+				return false, err
+			}
+			if strings.ToLower(provisionState) == "succeeded" {
+				return true, nil
+			}
+			return false, nil
+		})
+
+		if err != nil {
+			klog.Warningf("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
+		}
+	}
+
+	klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
+	return diskID, nil
+}
+
+//DeleteManagedDisk : delete managed disk
+func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI string) error {
+	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
+	if err != nil {
+		return err
+	}
+
+	if _, ok := c.common.diskStateMap.Load(strings.ToLower(diskURI)); ok {
+		return fmt.Errorf("failed to delete disk(%s) since it's in attaching or detaching state", diskURI)
+	}
+
+	diskName := path.Base(diskURI)
+	disk, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
+	if rerr != nil {
+		if rerr.HTTPStatusCode == http.StatusNotFound {
+			klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI)
+			return nil
+		}
+		// ignore GetDisk throttling
+		if !rerr.IsThrottled() && !strings.Contains(rerr.RawError.Error(), consts.RateLimited) {
+			return rerr.Error()
+		}
+	}
+	if disk.ManagedBy != nil {
+		return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy)
+	}
+
+	if rerr := c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName); rerr != nil {
+		return rerr.Error()
+	}
+	// We don't need poll here, k8s will immediately stop referencing the disk
+	// the disk will be eventually deleted - cleanly - by ARM
+
+	klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
+
+	return nil
+}
+
+// GetDisk return: disk provisionState, diskID, error
+func (c *ManagedDiskController) GetDisk(resourceGroup, diskName string) (string, string, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
+	if rerr != nil {
+		return "", "", rerr.Error()
+	}
+
+	if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
+		return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
+	}
+
+	return "", "", nil
+}
+
+// ResizeDisk Expand the disk to new size
+func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity, supportOnlineResize bool) (resource.Quantity, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	diskName := path.Base(diskURI)
+	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
+	if err != nil {
+		return oldSize, err
+	}
+
+	result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
+	if rerr != nil {
+		return oldSize, rerr.Error()
+	}
+
+	if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
+		return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
+	}
+
+	// Azure resizes in chunks of GiB (not GB)
+	requestGiB, err := volumehelpers.RoundUpToGiBInt32(newSize)
+	if err != nil {
+		return oldSize, err
+	}
+
+	newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
+
+	klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
+	// If disk already of greater or equal size than requested we return
+	if *result.DiskProperties.DiskSizeGB >= requestGiB {
+		return newSizeQuant, nil
+	}
+
+	if !supportOnlineResize && result.DiskProperties.DiskState != compute.Unattached {
+		return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, to.String(result.ManagedBy))
+	}
+
+	diskParameter := compute.DiskUpdate{
+		DiskUpdateProperties: &compute.DiskUpdateProperties{
+			DiskSizeGB: &requestGiB,
+		},
+	}
+
+	ctx, cancel = getContextWithCancel()
+	defer cancel()
+	if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil {
+		return oldSize, rerr.Error()
+	}
+
+	klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
+
+	return newSizeQuant, nil
+}
+
+// get resource group name from a managed disk URI, e.g. return {group-name} according to
+// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
+// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
+func getResourceGroupFromDiskURI(diskURI string) (string, error) {
+	fields := strings.Split(diskURI, "/")
+	if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" {
+		return "", fmt.Errorf("invalid disk URI: %s", diskURI)
+	}
+	return fields[4], nil
+}
+
+// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
+func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
+	// Ignore if not AzureDisk.
+	if pv.Spec.AzureDisk == nil {
+		return nil, nil
+	}
+
+	// Ignore any volumes that are being provisioned
+	if pv.Spec.AzureDisk.DiskName == cloudvolume.ProvisionedVolumeName {
+		return nil, nil
+	}
+
+	return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
+}
+
+// GetAzureDiskLabels gets availability zone labels for Azuredisk.
+func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
+	// Get disk's resource group.
+	diskName := path.Base(diskURI)
+	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
+	if err != nil {
+		klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
+		return nil, err
+	}
+
+	labels := map[string]string{
+		consts.LabelFailureDomainBetaRegion: c.Location,
+	}
+	// no azure credential is set, return nil
+	if c.DisksClient == nil {
+		return labels, nil
+	}
+	// Get information of the disk.
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	disk, rerr := c.DisksClient.Get(ctx, resourceGroup, diskName)
+	if rerr != nil {
+		klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, rerr)
+		return nil, rerr.Error()
+	}
+
+	// Check whether availability zone is specified.
+	if disk.Zones == nil || len(*disk.Zones) == 0 {
+		klog.V(4).Infof("Azure disk %q is not zoned", diskName)
+		return labels, nil
+	}
+
+	zones := *disk.Zones
+	zoneID, err := strconv.Atoi(zones[0])
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %w", zones, diskName, err)
+	}
+
+	zone := c.makeZone(c.Location, zoneID)
+	klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
+	labels[consts.LabelFailureDomainBetaZone] = zone
+	return labels, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go
new file mode 100644
index 0000000000000000000000000000000000000000..d0611a98a510d59afb9f0ae0e0313170b99c0c29
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	reflect "reflect"
+
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	gomock "github.com/golang/mock/gomock"
+
+	v1 "k8s.io/api/core/v1"
+)
+
+// MockBackendPool is a mock of BackendPool interface
+type MockBackendPool struct {
+	ctrl     *gomock.Controller
+	recorder *MockBackendPoolMockRecorder
+}
+
+// MockBackendPoolMockRecorder is the mock recorder for MockBackendPool
+type MockBackendPoolMockRecorder struct {
+	mock *MockBackendPool
+}
+
+// NewMockBackendPool creates a new mock instance
+func NewMockBackendPool(ctrl *gomock.Controller) *MockBackendPool {
+	mock := &MockBackendPool{ctrl: ctrl}
+	mock.recorder = &MockBackendPoolMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockBackendPool) EXPECT() *MockBackendPoolMockRecorder {
+	return m.recorder
+}
+
+// EnsureHostsInPool mocks base method
+func (m *MockBackendPool) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnsureHostsInPool", service, nodes, backendPoolID, vmSetName, clusterName, lbName, backendPool)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// EnsureHostsInPool indicates an expected call of EnsureHostsInPool
+func (mr *MockBackendPoolMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName, clusterName, lbName, backendPool interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockBackendPool)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName, clusterName, lbName, backendPool)
+}
+
+// CleanupVMSetFromBackendPoolByCondition mocks base method
+func (m *MockBackendPool) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CleanupVMSetFromBackendPoolByCondition", slb, service, nodes, clusterName, shouldRemoveVMSetFromSLB)
+	ret0, _ := ret[0].(*network.LoadBalancer)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// CleanupVMSetFromBackendPoolByCondition indicates an expected call of CleanupVMSetFromBackendPoolByCondition
+func (mr *MockBackendPoolMockRecorder) CleanupVMSetFromBackendPoolByCondition(slb, service, nodes, clusterName, shouldRemoveVMSetFromSLB interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupVMSetFromBackendPoolByCondition", reflect.TypeOf((*MockBackendPool)(nil).CleanupVMSetFromBackendPoolByCondition), slb, service, nodes, clusterName, shouldRemoveVMSetFromSLB)
+}
+
+// ReconcileBackendPools mocks base method
+func (m *MockBackendPool) ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ReconcileBackendPools", clusterName, service, lb)
+	ret0, _ := ret[0].(bool)
+	ret1, _ := ret[1].(bool)
+	ret2, _ := ret[2].(error)
+	return ret0, ret1, ret2
+}
+
+// ReconcileBackendPools indicates an expected call of ReconcileBackendPools
+func (mr *MockBackendPoolMockRecorder) ReconcileBackendPools(clusterName, service, lb interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileBackendPools", reflect.TypeOf((*MockBackendPool)(nil).ReconcileBackendPools), clusterName, service, lb)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go
new file mode 100644
index 0000000000000000000000000000000000000000..e606b554a95da2e9becf48667aedafd6661a374c
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go
@@ -0,0 +1,414 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	context "context"
+	reflect "reflect"
+
+	compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	azure "github.com/Azure/go-autorest/autorest/azure"
+	gomock "github.com/golang/mock/gomock"
+	v1 "k8s.io/api/core/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	cloud_provider "k8s.io/cloud-provider"
+	cache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+)
+
+// MockVMSet is a mock of VMSet interface
+type MockVMSet struct {
+	ctrl     *gomock.Controller
+	recorder *MockVMSetMockRecorder
+}
+
+// MockVMSetMockRecorder is the mock recorder for MockVMSet
+type MockVMSetMockRecorder struct {
+	mock *MockVMSet
+}
+
+// NewMockVMSet creates a new mock instance
+func NewMockVMSet(ctrl *gomock.Controller) *MockVMSet {
+	mock := &MockVMSet{ctrl: ctrl}
+	mock.recorder = &MockVMSetMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockVMSet) EXPECT() *MockVMSetMockRecorder {
+	return m.recorder
+}
+
+// GetInstanceIDByNodeName mocks base method
+func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetInstanceIDByNodeName", name)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName
+func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name)
+}
+
+// GetInstanceTypeByNodeName mocks base method
+func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetInstanceTypeByNodeName", name)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName
+func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name)
+}
+
+// GetIPByNodeName mocks base method
+func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetIPByNodeName", name)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(string)
+	ret2, _ := ret[2].(error)
+	return ret0, ret1, ret2
+}
+
+// GetIPByNodeName indicates an expected call of GetIPByNodeName
+func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name)
+}
+
+// GetPrimaryInterface mocks base method
+func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetPrimaryInterface", nodeName)
+	ret0, _ := ret[0].(network.Interface)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetPrimaryInterface indicates an expected call of GetPrimaryInterface
+func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName)
+}
+
+// GetNodeNameByProviderID mocks base method
+func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetNodeNameByProviderID", providerID)
+	ret0, _ := ret[0].(types.NodeName)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID
+func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID)
+}
+
+// GetZoneByNodeName mocks base method
+func (m *MockVMSet) GetZoneByNodeName(name string) (cloud_provider.Zone, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetZoneByNodeName", name)
+	ret0, _ := ret[0].(cloud_provider.Zone)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetZoneByNodeName indicates an expected call of GetZoneByNodeName
+func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name)
+}
+
+// GetPrimaryVMSetName mocks base method
+func (m *MockVMSet) GetPrimaryVMSetName() string {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetPrimaryVMSetName")
+	ret0, _ := ret[0].(string)
+	return ret0
+}
+
+// GetPrimaryVMSetName indicates an expected call of GetPrimaryVMSetName
+func (mr *MockVMSetMockRecorder) GetPrimaryVMSetName() *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryVMSetName))
+}
+
+// GetVMSetNames mocks base method
+func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetVMSetNames", service, nodes)
+	ret0, _ := ret[0].(*[]string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetVMSetNames indicates an expected call of GetVMSetNames
+func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes)
+}
+
+// GetNodeVMSetName mocks base method
+func (m *MockVMSet) GetNodeVMSetName(node *v1.Node) (string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetNodeVMSetName", node)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetNodeVMSetName indicates an expected call of GetNodeVMSetName
+func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetNodeVMSetName), node)
+}
+
+// EnsureHostsInPool mocks base method
+func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName string) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnsureHostsInPool", service, nodes, backendPoolID, vmSetName)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// EnsureHostsInPool indicates an expected call of EnsureHostsInPool
+func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName)
+}
+
+// EnsureHostInPool mocks base method
+func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnsureHostInPool", service, nodeName, backendPoolID, vmSetName)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(string)
+	ret2, _ := ret[2].(string)
+	ret3, _ := ret[3].(*compute.VirtualMachineScaleSetVM)
+	ret4, _ := ret[4].(error)
+	return ret0, ret1, ret2, ret3, ret4
+}
+
+// EnsureHostInPool indicates an expected call of EnsureHostInPool
+func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName)
+}
+
+// EnsureBackendPoolDeleted mocks base method
+func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted
+func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
+}
+
+// EnsureBackendPoolDeletedFromVMSets mocks base method
+func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnsureBackendPoolDeletedFromVMSets", vmSetNamesMap, backendPoolID)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets
+func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolID interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolID)
+}
+
+// AttachDisk mocks base method
+func (m *MockVMSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "AttachDisk", nodeName, diskMap)
+	ret0, _ := ret[0].(*azure.Future)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// AttachDisk indicates an expected call of AttachDisk
+func (mr *MockVMSetMockRecorder) AttachDisk(nodeName, diskMap interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), nodeName, diskMap)
+}
+
+// DetachDisk mocks base method
+func (m *MockVMSet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DetachDisk", nodeName, diskMap)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// DetachDisk indicates an expected call of DetachDisk
+func (mr *MockVMSetMockRecorder) DetachDisk(nodeName, diskMap interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), nodeName, diskMap)
+}
+
+// WaitForUpdateResult mocks base method
+func (m *MockVMSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// WaitForUpdateResult indicates an expected call of WaitForUpdateResult
+func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockVMSet)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source)
+}
+
+// GetDataDisks mocks base method
+func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetDataDisks", nodeName, crt)
+	ret0, _ := ret[0].([]compute.DataDisk)
+	ret1, _ := ret[1].(*string)
+	ret2, _ := ret[2].(error)
+	return ret0, ret1, ret2
+}
+
+// GetDataDisks indicates an expected call of GetDataDisks
+func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, crt)
+}
+
+// UpdateVM mocks base method
+func (m *MockVMSet) UpdateVM(nodeName types.NodeName) error {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UpdateVM", nodeName)
+	ret0, _ := ret[0].(error)
+	return ret0
+}
+
+// UpdateVM indicates an expected call of UpdateVM
+func (mr *MockVMSetMockRecorder) UpdateVM(nodeName interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), nodeName)
+}
+
+// GetPowerStatusByNodeName mocks base method
+func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetPowerStatusByNodeName", name)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName
+func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name)
+}
+
+// GetProvisioningStateByNodeName mocks base method
+func (m *MockVMSet) GetProvisioningStateByNodeName(name string) (string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetProvisioningStateByNodeName", name)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetProvisioningStateByNodeName indicates an expected call of GetProvisioningStateByNodeName
+func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisioningStateByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetProvisioningStateByNodeName), name)
+}
+
+// GetPrivateIPsByNodeName mocks base method
+func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetPrivateIPsByNodeName", name)
+	ret0, _ := ret[0].([]string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName
+func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name)
+}
+
+// GetNodeNameByIPConfigurationID mocks base method
+func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetNodeNameByIPConfigurationID", ipConfigurationID)
+	ret0, _ := ret[0].(string)
+	ret1, _ := ret[1].(string)
+	ret2, _ := ret[2].(error)
+	return ret0, ret1, ret2
+}
+
+// GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID
+func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID)
+}
+
+// GetNodeCIDRMasksByProviderID mocks base method
+func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetNodeCIDRMasksByProviderID", providerID)
+	ret0, _ := ret[0].(int)
+	ret1, _ := ret[1].(int)
+	ret2, _ := ret[2].(error)
+	return ret0, ret1, ret2
+}
+
+// GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID
+func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID)
+}
+
+// GetAgentPoolVMSetNames mocks base method
+func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetAgentPoolVMSetNames", nodes)
+	ret0, _ := ret[0].(*[]string)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames
+func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9f92064717d8b88b98c4d98bb0fed943dd09e6d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+const (
+	defaultAtachDetachDiskQPS    = 6.0
+	defaultAtachDetachDiskBucket = 10
+)
+
+// CloudProviderRateLimitConfig indicates the rate limit config for each clients.
+type CloudProviderRateLimitConfig struct {
+	// The default rate limit config options.
+	azclients.RateLimitConfig
+
+	// Rate limit config for each clients. Values would override default settings above.
+	RouteRateLimit                  *azclients.RateLimitConfig `json:"routeRateLimit,omitempty" yaml:"routeRateLimit,omitempty"`
+	SubnetsRateLimit                *azclients.RateLimitConfig `json:"subnetsRateLimit,omitempty" yaml:"subnetsRateLimit,omitempty"`
+	InterfaceRateLimit              *azclients.RateLimitConfig `json:"interfaceRateLimit,omitempty" yaml:"interfaceRateLimit,omitempty"`
+	RouteTableRateLimit             *azclients.RateLimitConfig `json:"routeTableRateLimit,omitempty" yaml:"routeTableRateLimit,omitempty"`
+	LoadBalancerRateLimit           *azclients.RateLimitConfig `json:"loadBalancerRateLimit,omitempty" yaml:"loadBalancerRateLimit,omitempty"`
+	PublicIPAddressRateLimit        *azclients.RateLimitConfig `json:"publicIPAddressRateLimit,omitempty" yaml:"publicIPAddressRateLimit,omitempty"`
+	SecurityGroupRateLimit          *azclients.RateLimitConfig `json:"securityGroupRateLimit,omitempty" yaml:"securityGroupRateLimit,omitempty"`
+	VirtualMachineRateLimit         *azclients.RateLimitConfig `json:"virtualMachineRateLimit,omitempty" yaml:"virtualMachineRateLimit,omitempty"`
+	StorageAccountRateLimit         *azclients.RateLimitConfig `json:"storageAccountRateLimit,omitempty" yaml:"storageAccountRateLimit,omitempty"`
+	DiskRateLimit                   *azclients.RateLimitConfig `json:"diskRateLimit,omitempty" yaml:"diskRateLimit,omitempty"`
+	SnapshotRateLimit               *azclients.RateLimitConfig `json:"snapshotRateLimit,omitempty" yaml:"snapshotRateLimit,omitempty"`
+	VirtualMachineScaleSetRateLimit *azclients.RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty" yaml:"virtualMachineScaleSetRateLimit,omitempty"`
+	VirtualMachineSizeRateLimit     *azclients.RateLimitConfig `json:"virtualMachineSizesRateLimit,omitempty" yaml:"virtualMachineSizesRateLimit,omitempty"`
+	AvailabilitySetRateLimit        *azclients.RateLimitConfig `json:"availabilitySetRateLimit,omitempty" yaml:"availabilitySetRateLimit,omitempty"`
+	AttachDetachDiskRateLimit       *azclients.RateLimitConfig `json:"attachDetachDiskRateLimit,omitempty" yaml:"attachDetachDiskRateLimit,omitempty"`
+}
+
+// InitializeCloudProviderRateLimitConfig initializes rate limit configs.
+func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig) {
+	if config == nil {
+		return
+	}
+
+	// Assign read rate limit defaults if no configuration was passed in.
+	if config.CloudProviderRateLimitQPS == 0 {
+		config.CloudProviderRateLimitQPS = consts.RateLimitQPSDefault
+	}
+	if config.CloudProviderRateLimitBucket == 0 {
+		config.CloudProviderRateLimitBucket = consts.RateLimitBucketDefault
+	}
+	// Assign write rate limit defaults if no configuration was passed in.
+	if config.CloudProviderRateLimitQPSWrite == 0 {
+		config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS
+	}
+	if config.CloudProviderRateLimitBucketWrite == 0 {
+		config.CloudProviderRateLimitBucketWrite = config.CloudProviderRateLimitBucket
+	}
+
+	config.RouteRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.RouteRateLimit)
+	config.SubnetsRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SubnetsRateLimit)
+	config.InterfaceRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.InterfaceRateLimit)
+	config.RouteTableRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.RouteTableRateLimit)
+	config.LoadBalancerRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.LoadBalancerRateLimit)
+	config.PublicIPAddressRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.PublicIPAddressRateLimit)
+	config.SecurityGroupRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SecurityGroupRateLimit)
+	config.VirtualMachineRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineRateLimit)
+	config.StorageAccountRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.StorageAccountRateLimit)
+	config.DiskRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.DiskRateLimit)
+	config.SnapshotRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SnapshotRateLimit)
+	config.VirtualMachineScaleSetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineScaleSetRateLimit)
+	config.VirtualMachineSizeRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineSizeRateLimit)
+	config.AvailabilitySetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.AvailabilitySetRateLimit)
+
+	atachDetachDiskRateLimitConfig := azclients.RateLimitConfig{
+		CloudProviderRateLimit:            true,
+		CloudProviderRateLimitQPSWrite:    defaultAtachDetachDiskQPS,
+		CloudProviderRateLimitBucketWrite: defaultAtachDetachDiskBucket,
+	}
+	config.AttachDetachDiskRateLimit = overrideDefaultRateLimitConfig(&atachDetachDiskRateLimitConfig, config.AttachDetachDiskRateLimit)
+}
+
+// overrideDefaultRateLimitConfig overrides the default CloudProviderRateLimitConfig.
+func overrideDefaultRateLimitConfig(defaults, config *azclients.RateLimitConfig) *azclients.RateLimitConfig {
+	// If config not set, apply defaults.
+	if config == nil {
+		return defaults
+	}
+
+	// Remain disabled if it's set explicitly.
+	if !config.CloudProviderRateLimit {
+		return &azclients.RateLimitConfig{CloudProviderRateLimit: false}
+	}
+
+	// Apply default values.
+	if config.CloudProviderRateLimitQPS == 0 {
+		config.CloudProviderRateLimitQPS = defaults.CloudProviderRateLimitQPS
+	}
+	if config.CloudProviderRateLimitBucket == 0 {
+		config.CloudProviderRateLimitBucket = defaults.CloudProviderRateLimitBucket
+	}
+	if config.CloudProviderRateLimitQPSWrite == 0 {
+		config.CloudProviderRateLimitQPSWrite = defaults.CloudProviderRateLimitQPSWrite
+	}
+	if config.CloudProviderRateLimitBucketWrite == 0 {
+		config.CloudProviderRateLimitBucketWrite = defaults.CloudProviderRateLimitBucketWrite
+	}
+
+	return config
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
new file mode 100644
index 0000000000000000000000000000000000000000..92a93d73c1e29f0f0d96a271f9d9c8e8957b2181
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
@@ -0,0 +1,578 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/wait"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+)
+
+var (
+	// routeUpdateInterval defines the route reconciling interval.
+	routeUpdateInterval = 30 * time.Second
+)
+
+// routeOperation defines the allowed operations for route updating.
+type routeOperation string
+
+// copied to minimize the number of cross reference
+// and exceptions in publishing and allowed imports.
+const (
+	// Route operations.
+	routeOperationAdd             routeOperation = "add"
+	routeOperationDelete          routeOperation = "delete"
+	routeTableOperationUpdateTags routeOperation = "updateRouteTableTags"
+)
+
+// delayedRouteOperation defines a delayed route operation which is used in delayedRouteUpdater.
+type delayedRouteOperation struct {
+	route          network.Route
+	routeTableTags map[string]*string
+	operation      routeOperation
+	result         chan error
+}
+
+// wait waits for the operation completion and returns the result.
+func (op *delayedRouteOperation) wait() error {
+	return <-op.result
+}
+
+// delayedRouteUpdater defines a delayed route updater, which batches all the
+// route updating operations within "interval" period.
+// Example usage:
+//   op, err := updater.addRouteOperation(routeOperationAdd, route)
+//   err = op.wait()
+type delayedRouteUpdater struct {
+	az       *Cloud
+	interval time.Duration
+
+	lock           sync.Mutex
+	routesToUpdate []*delayedRouteOperation
+}
+
+// newDelayedRouteUpdater creates a new delayedRouteUpdater.
+func newDelayedRouteUpdater(az *Cloud, interval time.Duration) *delayedRouteUpdater {
+	return &delayedRouteUpdater{
+		az:             az,
+		interval:       interval,
+		routesToUpdate: make([]*delayedRouteOperation, 0),
+	}
+}
+
+// run starts the updater reconciling loop.
+func (d *delayedRouteUpdater) run() {
+	err := wait.PollImmediateInfinite(d.interval, func() (bool, error) {
+		d.updateRoutes()
+		return false, nil
+	})
+	if err != nil { // this should never happen, if it does, panic
+		panic(err)
+	}
+}
+
+// updateRoutes invokes route table client to update all routes.
+func (d *delayedRouteUpdater) updateRoutes() {
+	d.lock.Lock()
+	defer d.lock.Unlock()
+
+	// No need to do any updating.
+	if len(d.routesToUpdate) == 0 {
+		klog.V(4).Info("updateRoutes: nothing to update, returning")
+		return
+	}
+
+	var err error
+	defer func() {
+		// Notify all the goroutines.
+		for _, rt := range d.routesToUpdate {
+			rt.result <- err
+		}
+		// Clear all the jobs.
+		d.routesToUpdate = make([]*delayedRouteOperation, 0)
+	}()
+
+	var (
+		routeTable       network.RouteTable
+		existsRouteTable bool
+	)
+	routeTable, existsRouteTable, err = d.az.getRouteTable(azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.Errorf("getRouteTable() failed with error: %v", err)
+		return
+	}
+
+	// create route table if it doesn't exists yet.
+	if !existsRouteTable {
+		err = d.az.createRouteTable()
+		if err != nil {
+			klog.Errorf("createRouteTable() failed with error: %v", err)
+			return
+		}
+
+		routeTable, _, err = d.az.getRouteTable(azcache.CacheReadTypeDefault)
+		if err != nil {
+			klog.Errorf("getRouteTable() failed with error: %v", err)
+			return
+		}
+	}
+
+	// reconcile routes.
+	dirty, onlyUpdateTags := false, true
+	routes := []network.Route{}
+	if routeTable.RouteTablePropertiesFormat != nil && routeTable.RouteTablePropertiesFormat.Routes != nil {
+		routes = *routeTable.Routes
+	}
+
+	routes, dirty = d.cleanupOutdatedRoutes(routes)
+	if dirty {
+		onlyUpdateTags = false
+	}
+
+	for _, rt := range d.routesToUpdate {
+		if rt.operation == routeTableOperationUpdateTags {
+			routeTable.Tags = rt.routeTableTags
+			dirty = true
+			continue
+		}
+
+		routeMatch := false
+		onlyUpdateTags = false
+		for i, existingRoute := range routes {
+			if strings.EqualFold(to.String(existingRoute.Name), to.String(rt.route.Name)) {
+				// delete the name-matched routes here (missing routes would be added later if the operation is add).
+				routes = append(routes[:i], routes[i+1:]...)
+				if existingRoute.RoutePropertiesFormat != nil &&
+					rt.route.RoutePropertiesFormat != nil &&
+					strings.EqualFold(to.String(existingRoute.AddressPrefix), to.String(rt.route.AddressPrefix)) &&
+					strings.EqualFold(to.String(existingRoute.NextHopIPAddress), to.String(rt.route.NextHopIPAddress)) {
+					routeMatch = true
+				}
+				if rt.operation == routeOperationDelete {
+					dirty = true
+				}
+				break
+			}
+		}
+
+		// Add missing routes if the operation is add.
+		if rt.operation == routeOperationAdd {
+			routes = append(routes, rt.route)
+			if !routeMatch {
+				dirty = true
+			}
+			continue
+		}
+	}
+
+	if dirty {
+		if !onlyUpdateTags {
+			klog.V(2).Infof("updateRoutes: updating routes")
+			routeTable.Routes = &routes
+		}
+		err = d.az.CreateOrUpdateRouteTable(routeTable)
+		if err != nil {
+			klog.Errorf("CreateOrUpdateRouteTable() failed with error: %v", err)
+			return
+		}
+
+		// wait a while for route updates to take effect.
+		time.Sleep(time.Duration(d.az.Config.RouteUpdateWaitingInSeconds) * time.Second)
+	}
+}
+
+// cleanupOutdatedRoutes deletes all non-dualstack routes when dualstack is enabled,
+// and deletes all dualstack routes when dualstack is not enabled.
+func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []network.Route) (routes []network.Route, changed bool) {
+	for i := len(existingRoutes) - 1; i >= 0; i-- {
+		existingRouteName := to.String(existingRoutes[i].Name)
+		split := strings.Split(existingRouteName, consts.RouteNameSeparator)
+
+		klog.V(4).Infof("cleanupOutdatedRoutes: checking route %s", existingRouteName)
+
+		// filter out unmanaged routes
+		deleteRoute := false
+		if d.az.nodeNames.Has(split[0]) {
+			if d.az.ipv6DualStackEnabled && len(split) == 1 {
+				klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated non-dualstack route %s", existingRouteName)
+				deleteRoute = true
+			} else if !d.az.ipv6DualStackEnabled && len(split) == 2 {
+				klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated dualstack route %s", existingRouteName)
+				deleteRoute = true
+			}
+
+			if deleteRoute {
+				existingRoutes = append(existingRoutes[:i], existingRoutes[i+1:]...)
+				changed = true
+			}
+		}
+	}
+
+	return existingRoutes, changed
+}
+
+// addRouteOperation adds the routeOperation to delayedRouteUpdater and returns a delayedRouteOperation.
+func (d *delayedRouteUpdater) addRouteOperation(operation routeOperation, route network.Route) (*delayedRouteOperation, error) {
+	d.lock.Lock()
+	defer d.lock.Unlock()
+
+	op := &delayedRouteOperation{
+		route:     route,
+		operation: operation,
+		result:    make(chan error),
+	}
+	d.routesToUpdate = append(d.routesToUpdate, op)
+	return op, nil
+}
+
+// addUpdateRouteTableTagsOperation adds a update route table tags operation to delayedRouteUpdater and returns a delayedRouteOperation.
+func (d *delayedRouteUpdater) addUpdateRouteTableTagsOperation(operation routeOperation, tags map[string]*string) (*delayedRouteOperation, error) {
+	d.lock.Lock()
+	defer d.lock.Unlock()
+
+	op := &delayedRouteOperation{
+		routeTableTags: tags,
+		operation:      operation,
+		result:         make(chan error),
+	}
+	d.routesToUpdate = append(d.routesToUpdate, op)
+	return op, nil
+}
+
+// ListRoutes lists all managed routes that belong to the specified clusterName
+func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
+	klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
+	routeTable, existsRouteTable, err := az.getRouteTable(azcache.CacheReadTypeDefault)
+	routes, err := processRoutes(az.ipv6DualStackEnabled, routeTable, existsRouteTable, err)
+	if err != nil {
+		return nil, err
+	}
+
+	// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
+	unmanagedNodes, err := az.GetUnmanagedNodes()
+	if err != nil {
+		return nil, err
+	}
+	az.routeCIDRsLock.Lock()
+	defer az.routeCIDRsLock.Unlock()
+	for _, nodeName := range unmanagedNodes.List() {
+		if cidr, ok := az.routeCIDRs[nodeName]; ok {
+			routes = append(routes, &cloudprovider.Route{
+				Name:            nodeName,
+				TargetNode:      MapRouteNameToNodeName(az.ipv6DualStackEnabled, nodeName),
+				DestinationCIDR: cidr,
+			})
+		}
+	}
+
+	// ensure the route table is tagged as configured
+	tags, changed := az.ensureRouteTableTagged(&routeTable)
+	if changed {
+		klog.V(2).Infof("ListRoutes: updating tags on route table %s", to.String(routeTable.Name))
+		op, err := az.routeUpdater.addUpdateRouteTableTagsOperation(routeTableOperationUpdateTags, tags)
+		if err != nil {
+			klog.Errorf("ListRoutes: failed to add route table operation with error: %v", err)
+			return nil, err
+		}
+
+		// Wait for operation complete.
+		err = op.wait()
+		if err != nil {
+			klog.Errorf("ListRoutes: failed to update route table tags with error: %v", err)
+			return nil, err
+		}
+	}
+
+	return routes, nil
+}
+
+// Injectable for testing
+func processRoutes(ipv6DualStackEnabled bool, routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		return []*cloudprovider.Route{}, nil
+	}
+
+	var kubeRoutes []*cloudprovider.Route
+	if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
+		kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
+		for i, route := range *routeTable.Routes {
+			instance := MapRouteNameToNodeName(ipv6DualStackEnabled, *route.Name)
+			cidr := *route.AddressPrefix
+			klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
+
+			kubeRoutes[i] = &cloudprovider.Route{
+				Name:            *route.Name,
+				TargetNode:      instance,
+				DestinationCIDR: cidr,
+			}
+		}
+	}
+
+	klog.V(10).Info("ListRoutes: FINISH")
+	return kubeRoutes, nil
+}
+
+func (az *Cloud) createRouteTable() error {
+	routeTable := network.RouteTable{
+		Name:                       to.StringPtr(az.RouteTableName),
+		Location:                   to.StringPtr(az.Location),
+		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
+	}
+
+	klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
+	err := az.CreateOrUpdateRouteTable(routeTable)
+	if err != nil {
+		return err
+	}
+
+	// Invalidate the cache right after updating
+	_ = az.rtCache.Delete(az.RouteTableName)
+	return nil
+}
+
+// CreateRoute creates the described managed route
+// route.Name will be ignored, although the cloud-provider may use nameHint
+// to create a more user-meaningful name.
+func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
+	mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.SubscriptionID, "")
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	// Returns  for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	var targetIP string
+	nodeName := string(kubeRoute.TargetNode)
+	unmanaged, err := az.IsNodeUnmanaged(nodeName)
+	if err != nil {
+		return err
+	}
+	if unmanaged {
+		if az.ipv6DualStackEnabled {
+			//TODO (khenidak) add support for unmanaged nodes when the feature reaches beta
+			return fmt.Errorf("unmanaged nodes are not supported in dual stack mode")
+		}
+		klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
+		az.routeCIDRsLock.Lock()
+		defer az.routeCIDRsLock.Unlock()
+		az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
+		return nil
+	}
+
+	CIDRv6 := utilnet.IsIPv6CIDRString(kubeRoute.DestinationCIDR)
+	// if single stack IPv4 then get the IP for the primary ip config
+	// single stack IPv6 is supported on dual stack host. So the IPv6 IP is secondary IP for both single stack IPv6 and dual stack
+	// Get all private IPs for the machine and find the first one that matches the IPv6 family
+	if !az.ipv6DualStackEnabled && !CIDRv6 {
+		targetIP, _, err = az.getIPForMachine(kubeRoute.TargetNode)
+		if err != nil {
+			return err
+		}
+	} else {
+		// for dual stack and single stack IPv6 we need to select
+		// a private ip that matches family of the cidr
+		klog.V(4).Infof("CreateRoute: create route instance=%q cidr=%q is in dual stack mode", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+		nodePrivateIPs, err := az.getPrivateIPsForMachine(kubeRoute.TargetNode)
+		if nil != err {
+			klog.V(3).Infof("CreateRoute: create route: failed(GetPrivateIPsByNodeName) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
+			return err
+		}
+
+		targetIP, err = findFirstIPByFamily(nodePrivateIPs, CIDRv6)
+		if nil != err {
+			klog.V(3).Infof("CreateRoute: create route: failed(findFirstIpByFamily) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
+			return err
+		}
+	}
+	routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+	route := network.Route{
+		Name: to.StringPtr(routeName),
+		RoutePropertiesFormat: &network.RoutePropertiesFormat{
+			AddressPrefix:    to.StringPtr(kubeRoute.DestinationCIDR),
+			NextHopType:      network.RouteNextHopTypeVirtualAppliance,
+			NextHopIPAddress: to.StringPtr(targetIP),
+		},
+	}
+
+	klog.V(2).Infof("CreateRoute: creating route for clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+	op, err := az.routeUpdater.addRouteOperation(routeOperationAdd, route)
+	if err != nil {
+		klog.Errorf("CreateRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+		return err
+	}
+
+	// Wait for operation complete.
+	err = op.wait()
+	if err != nil {
+		klog.Errorf("CreateRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+		return err
+	}
+
+	klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+	isOperationSucceeded = true
+
+	return nil
+}
+
+// DeleteRoute deletes the specified managed route
+// Route should be as returned by ListRoutes
+func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
+	mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.SubscriptionID, "")
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	// Returns  for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	nodeName := string(kubeRoute.TargetNode)
+	unmanaged, err := az.IsNodeUnmanaged(nodeName)
+	if err != nil {
+		return err
+	}
+	if unmanaged {
+		klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
+		az.routeCIDRsLock.Lock()
+		defer az.routeCIDRsLock.Unlock()
+		delete(az.routeCIDRs, nodeName)
+		return nil
+	}
+
+	routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+	klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeName)
+	route := network.Route{
+		Name:                  to.StringPtr(routeName),
+		RoutePropertiesFormat: &network.RoutePropertiesFormat{},
+	}
+	op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route)
+	if err != nil {
+		klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+		return err
+	}
+
+	// Wait for operation complete.
+	err = op.wait()
+	if err != nil {
+		klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+		return err
+	}
+
+	// Remove outdated ipv4 routes as well
+	if az.ipv6DualStackEnabled {
+		routeNameWithoutIPV6Suffix := strings.Split(routeName, consts.RouteNameSeparator)[0]
+		klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeNameWithoutIPV6Suffix)
+		route := network.Route{
+			Name:                  to.StringPtr(routeNameWithoutIPV6Suffix),
+			RoutePropertiesFormat: &network.RoutePropertiesFormat{},
+		}
+		op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route)
+		if err != nil {
+			klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+			return err
+		}
+
+		// Wait for operation complete.
+		err = op.wait()
+		if err != nil {
+			klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+			return err
+		}
+	}
+
+	klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+	isOperationSucceeded = true
+
+	return nil
+}
+
+// This must be kept in sync with MapRouteNameToNodeName.
+// These two functions enable stashing the instance name in the route
+// and then retrieving it later when listing. This is needed because
+// Azure does not let you put tags/descriptions on the Route itself.
+func mapNodeNameToRouteName(ipv6DualStackEnabled bool, nodeName types.NodeName, cidr string) string {
+	if !ipv6DualStackEnabled {
+		return string(nodeName)
+	}
+	return fmt.Sprintf(consts.RouteNameFmt, nodeName, cidrtoRfc1035(cidr))
+}
+
+// MapRouteNameToNodeName is used with mapNodeNameToRouteName.
+// See comment on mapNodeNameToRouteName for detailed usage.
+func MapRouteNameToNodeName(ipv6DualStackEnabled bool, routeName string) types.NodeName {
+	if !ipv6DualStackEnabled {
+		return types.NodeName(routeName)
+	}
+	parts := strings.Split(routeName, consts.RouteNameSeparator)
+	nodeName := parts[0]
+	return types.NodeName(nodeName)
+
+}
+
+// given a list of ips, return the first one
+// that matches the family requested
+// error if no match, or failure to parse
+// any of the ips
+func findFirstIPByFamily(ips []string, v6 bool) (string, error) {
+	for _, ip := range ips {
+		bIPv6 := utilnet.IsIPv6String(ip)
+		if v6 == bIPv6 {
+			return ip, nil
+		}
+	}
+	return "", fmt.Errorf("no match found matching the ipfamily requested")
+}
+
+//strips : . /
+func cidrtoRfc1035(cidr string) string {
+	cidr = strings.ReplaceAll(cidr, ":", "")
+	cidr = strings.ReplaceAll(cidr, ".", "")
+	cidr = strings.ReplaceAll(cidr, "/", "")
+	return cidr
+}
+
+// ensureRouteTableTagged ensures the route table is tagged as configured
+func (az *Cloud) ensureRouteTableTagged(rt *network.RouteTable) (map[string]*string, bool) {
+	if az.Tags == "" && (az.TagsMap == nil || len(az.TagsMap) == 0) {
+		return nil, false
+	}
+	tags := parseTags(az.Tags, az.TagsMap)
+	if rt.Tags == nil {
+		rt.Tags = make(map[string]*string)
+	}
+
+	tags, changed := az.reconcileTags(rt.Tags, tags)
+	rt.Tags = tags
+
+	return rt.Tags, changed
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a912f71c07845db83292afd8550e4431f1a832e
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
@@ -0,0 +1,1306 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/uuid"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+)
+
+var (
+	errNotInVMSet      = errors.New("vm is not in the vmset")
+	providerIDRE       = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
+	backendPoolIDRE    = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
+	nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
+	nicIDRE            = regexp.MustCompile(`(?i)/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(.+)/ipConfigurations/(?:.*)`)
+	vmIDRE             = regexp.MustCompile(`(?i)/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/virtualMachines/(.+)`)
+	vmasIDRE           = regexp.MustCompile(`/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/availabilitySets/(.+)`)
+)
+
+// getStandardMachineID returns the full identifier of a virtual machine.
+func (az *Cloud) getStandardMachineID(subscriptionID, resourceGroup, machineName string) string {
+	return fmt.Sprintf(
+		consts.MachineIDTemplate,
+		subscriptionID,
+		strings.ToLower(resourceGroup),
+		machineName)
+}
+
+// returns the full identifier of an availabilitySet
+func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
+	return fmt.Sprintf(
+		consts.AvailabilitySetIDTemplate,
+		az.SubscriptionID,
+		resourceGroup,
+		availabilitySetName)
+}
+
+// returns the full identifier of a loadbalancer frontendipconfiguration.
+func (az *Cloud) getFrontendIPConfigID(lbName, rgName, fipConfigName string) string {
+	return fmt.Sprintf(
+		consts.FrontendIPConfigIDTemplate,
+		az.getNetworkResourceSubscriptionID(),
+		rgName,
+		lbName,
+		fipConfigName)
+}
+
+// returns the full identifier of a loadbalancer backendpool.
+func (az *Cloud) getBackendPoolID(lbName, rgName, backendPoolName string) string {
+	return fmt.Sprintf(
+		consts.BackendPoolIDTemplate,
+		az.getNetworkResourceSubscriptionID(),
+		rgName,
+		lbName,
+		backendPoolName)
+}
+
+// returns the full identifier of a loadbalancer probe.
+func (az *Cloud) getLoadBalancerProbeID(lbName, rgName, lbRuleName string) string {
+	return fmt.Sprintf(
+		consts.LoadBalancerProbeIDTemplate,
+		az.getNetworkResourceSubscriptionID(),
+		rgName,
+		lbName,
+		lbRuleName)
+}
+
+// getNetworkResourceSubscriptionID returns the subscription id which hosts network resources
+func (az *Cloud) getNetworkResourceSubscriptionID() string {
+	if az.Config.UsesNetworkResourceInDifferentTenantOrSubscription() {
+		return az.NetworkResourceSubscriptionID
+	}
+	return az.SubscriptionID
+}
+
+func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
+	vmSetName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix)
+	if strings.EqualFold(clusterName, vmSetName) {
+		vmSetName = az.VMSet.GetPrimaryVMSetName()
+	}
+
+	return vmSetName
+}
+
+// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
+// Thus Azure do not allow mixed type (public and internal) load balancer.
+// So we'd have a separate name for internal load balancer.
+// This would be the name for Azure LoadBalancer resource.
+func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
+	if az.LoadBalancerName != "" {
+		clusterName = az.LoadBalancerName
+	}
+	lbNamePrefix := vmSetName
+	// The LB name prefix is set to the name of the cluster when:
+	// 1. the LB belongs to the primary agent pool.
+	// 2. using the single SLB.
+	useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
+	if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || useSingleSLB {
+		lbNamePrefix = clusterName
+	}
+	// 3. using multiple SLBs while the vmSet is sharing the primary SLB
+	useMultipleSLB := az.useStandardLoadBalancer() && az.EnableMultipleStandardLoadBalancers
+	if useMultipleSLB && az.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(vmSetName)) {
+		lbNamePrefix = clusterName
+	}
+	if isInternal {
+		return fmt.Sprintf("%s%s", lbNamePrefix, consts.InternalLoadBalancerNameSuffix)
+	}
+	return lbNamePrefix
+}
+
+// isControlPlaneNode returns true if the node has a control-plane role label.
+// The control-plane role is determined by looking for:
+// * a node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master="" label
+func isControlPlaneNode(node *v1.Node) bool {
+	if _, ok := node.Labels[consts.ControlPlaneNodeRoleLabel]; ok {
+		return true
+	}
+	// include master role labels for k8s < 1.19
+	if _, ok := node.Labels[consts.MasterNodeRoleLabel]; ok {
+		return true
+	}
+	if val, ok := node.Labels[consts.NodeLabelRole]; ok && val == "master" {
+		return true
+	}
+	return false
+}
+
+// returns the deepest child's identifier from a full identifier string.
+func getLastSegment(ID, separator string) (string, error) {
+	parts := strings.Split(ID, separator)
+	name := parts[len(parts)-1]
+	if len(name) == 0 {
+		return "", fmt.Errorf("resource name was missing from identifier")
+	}
+
+	return name, nil
+}
+
+// returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe
+// protocol types for the given Kubernetes protocol type.
+func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.TransportProtocol, *network.SecurityRuleProtocol, *network.ProbeProtocol, error) {
+	var transportProto network.TransportProtocol
+	var securityProto network.SecurityRuleProtocol
+	var probeProto network.ProbeProtocol
+
+	switch protocol {
+	case v1.ProtocolTCP:
+		transportProto = network.TransportProtocolTCP
+		securityProto = network.SecurityRuleProtocolTCP
+		probeProto = network.ProbeProtocolTCP
+		return &transportProto, &securityProto, &probeProto, nil
+	case v1.ProtocolUDP:
+		transportProto = network.TransportProtocolUDP
+		securityProto = network.SecurityRuleProtocolUDP
+		return &transportProto, &securityProto, nil, nil
+	case v1.ProtocolSCTP:
+		transportProto = network.TransportProtocolAll
+		securityProto = network.SecurityRuleProtocolAsterisk
+		return &transportProto, &securityProto, nil, nil
+	default:
+		return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP, UDP and SCTP are supported for Azure LoadBalancers")
+	}
+
+}
+
+// This returns the full identifier of the primary NIC for the given VM.
+func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
+	if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
+		return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
+	}
+
+	for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
+		if to.Bool(ref.Primary) {
+			return *ref.ID, nil
+		}
+	}
+
+	return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
+}
+
+func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
+	if nic.IPConfigurations == nil {
+		return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
+	}
+
+	if len(*nic.IPConfigurations) == 1 {
+		return &((*nic.IPConfigurations)[0]), nil
+	}
+
+	for _, ref := range *nic.IPConfigurations {
+		if *ref.Primary {
+			return &ref, nil
+		}
+	}
+
+	return nil, fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", *nic.Name)
+}
+
+// returns first ip configuration on a nic by family
+func getIPConfigByIPFamily(nic network.Interface, IPv6 bool) (*network.InterfaceIPConfiguration, error) {
+	if nic.IPConfigurations == nil {
+		return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
+	}
+
+	var ipVersion network.IPVersion
+	if IPv6 {
+		ipVersion = network.IPVersionIPv6
+	} else {
+		ipVersion = network.IPVersionIPv4
+	}
+	for _, ref := range *nic.IPConfigurations {
+		if ref.PrivateIPAddress != nil && ref.PrivateIPAddressVersion == ipVersion {
+			return &ref, nil
+		}
+	}
+	return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, to.String(nic.Name))
+}
+
+func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
+	return strings.HasSuffix(*lb.Name, consts.InternalLoadBalancerNameSuffix)
+}
+
+// getBackendPoolName the LB BackendPool name for a service.
+// to ensure backward and forward compat:
+// SingleStack -v4 (pre v1.16) => BackendPool name == clusterName
+// SingleStack -v6 => BackendPool name == <clusterName>-IPv6 (all cluster bootstrap uses this name)
+// DualStack
+//	=> IPv4 BackendPool name == clusterName
+//  => IPv6 BackendPool name == <clusterName>-IPv6
+// This means:
+// clusters moving from IPv4 to dualstack will require no changes
+// clusters moving from IPv6 to dualstack will require no changes as the IPv4 backend pool will created with <clusterName>
+func getBackendPoolName(clusterName string, service *v1.Service) string {
+	IPv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+	if IPv6 {
+		return fmt.Sprintf("%v-IPv6", clusterName)
+	}
+
+	return clusterName
+}
+
+func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32) string {
+	prefix := az.getRulePrefix(service)
+	ruleName := fmt.Sprintf("%s-%s-%d", prefix, protocol, port)
+	subnet := subnet(service)
+	if subnet == nil {
+		return ruleName
+	}
+
+	// Load balancer rule name must be less or equal to 80 characters, so excluding the hyphen two segments cannot exceed 79
+	subnetSegment := *subnet
+	if len(ruleName)+len(subnetSegment)+1 > consts.LoadBalancerRuleNameMaxLength {
+		subnetSegment = subnetSegment[:consts.LoadBalancerRuleNameMaxLength-len(ruleName)-1]
+	}
+
+	return fmt.Sprintf("%s-%s-%s-%d", prefix, subnetSegment, protocol, port)
+}
+
+func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
+	if useSharedSecurityRule(service) {
+		safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
+		return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
+	}
+	safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
+	rulePrefix := az.getRulePrefix(service)
+	return fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix)
+}
+
+// This returns a human-readable version of the Service used to tag some resources.
+// This is only used for human-readable convenience, and not to filter.
+func getServiceName(service *v1.Service) string {
+	return fmt.Sprintf("%s/%s", service.Namespace, service.Name)
+}
+
+// This returns a prefix for loadbalancer/security rules.
+func (az *Cloud) getRulePrefix(service *v1.Service) string {
+	return az.GetLoadBalancerName(context.TODO(), "", service)
+}
+
+func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) string {
+	return fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service))
+}
+
+func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool {
+	prefix := az.getRulePrefix(service)
+	return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
+}
+
+// There are two cases when a service owns the frontend IP config:
+// 1. The primary service, which means the frontend IP config is created after the creation of the service.
+// This means the name of the config can be tracked by the service UID.
+// 2. The secondary services must have their loadBalancer IP set if they want to share the same config as the primary
+// service. Hence, it can be tracked by the loadBalancer IP.
+func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) (bool, bool, error) {
+	var isPrimaryService bool
+	baseName := az.GetLoadBalancerName(context.TODO(), "", service)
+	if strings.HasPrefix(to.String(fip.Name), baseName) {
+		klog.V(6).Infof("serviceOwnsFrontendIP: found primary service %s of the "+
+			"frontend IP config %s", service.Name, *fip.Name)
+		isPrimaryService = true
+		return true, isPrimaryService, nil
+	}
+
+	loadBalancerIP := service.Spec.LoadBalancerIP
+	if loadBalancerIP == "" {
+		// it is a must that the secondary services set the loadBalancer IP
+		return false, isPrimaryService, nil
+	}
+
+	// for external secondary service the public IP address should be checked
+	if !requiresInternalLoadBalancer(service) {
+		pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
+		pip, err := az.findMatchedPIPByLoadBalancerIP(service, loadBalancerIP, pipResourceGroup)
+		if err != nil {
+			klog.Warningf("serviceOwnsFrontendIP: unexpected error when finding match public IP of the service %s with loadBalancerLP %s: %v", service.Name, loadBalancerIP, err)
+			return false, isPrimaryService, nil
+		}
+
+		if pip != nil &&
+			pip.ID != nil &&
+			pip.PublicIPAddressPropertiesFormat != nil &&
+			pip.IPAddress != nil &&
+			fip.FrontendIPConfigurationPropertiesFormat != nil &&
+			fip.FrontendIPConfigurationPropertiesFormat.PublicIPAddress != nil {
+			if strings.EqualFold(to.String(pip.ID), to.String(fip.PublicIPAddress.ID)) {
+				klog.V(4).Infof("serviceOwnsFrontendIP: found secondary service %s of the frontend IP config %s", service.Name, *fip.Name)
+
+				return true, isPrimaryService, nil
+			}
+			klog.V(4).Infof("serviceOwnsFrontendIP: the public IP with ID %s is being referenced by other service with public IP address %s", *pip.ID, *pip.IPAddress)
+		}
+
+		return false, isPrimaryService, nil
+	}
+
+	// for internal secondary service the private IP address on the frontend IP config should be checked
+	if fip.PrivateIPAddress == nil {
+		return false, isPrimaryService, nil
+	}
+
+	return strings.EqualFold(*fip.PrivateIPAddress, loadBalancerIP), isPrimaryService, nil
+}
+
+func (az *Cloud) getDefaultFrontendIPConfigName(service *v1.Service) string {
+	baseName := az.GetLoadBalancerName(context.TODO(), "", service)
+	subnetName := subnet(service)
+	if subnetName != nil {
+		ipcName := fmt.Sprintf("%s-%s", baseName, *subnetName)
+
+		// Azure lb front end configuration name must not exceed 80 characters
+		if len(ipcName) > consts.FrontendIPConfigNameMaxLength {
+			ipcName = ipcName[:consts.FrontendIPConfigNameMaxLength]
+		}
+		return ipcName
+	}
+	return baseName
+}
+
+// This returns the next available rule priority level for a given set of security rules.
+func getNextAvailablePriority(rules []network.SecurityRule) (int32, error) {
+	var smallest int32 = consts.LoadBalancerMinimumPriority
+	var spread int32 = 1
+
+outer:
+	for smallest < consts.LoadBalancerMaximumPriority {
+		for _, rule := range rules {
+			if *rule.Priority == smallest {
+				smallest += spread
+				continue outer
+			}
+		}
+		// no one else had it
+		return smallest, nil
+	}
+
+	return -1, fmt.Errorf("securityGroup priorities are exhausted")
+}
+
+var polyTable = crc32.MakeTable(crc32.Koopman)
+
+//MakeCRC32 : convert string to CRC32 format
+func MakeCRC32(str string) string {
+	crc := crc32.New(polyTable)
+	_, _ = crc.Write([]byte(str))
+	hash := crc.Sum32()
+	return strconv.FormatUint(uint64(hash), 10)
+}
+
+// availabilitySet implements VMSet interface for Azure availability sets.
+type availabilitySet struct {
+	*Cloud
+
+	vmasCache *azcache.TimedCache
+}
+
+type availabilitySetEntry struct {
+	vmas          *compute.AvailabilitySet
+	resourceGroup string
+}
+
+func (as *availabilitySet) newVMASCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		localCache := &sync.Map{}
+
+		allResourceGroups, err := as.GetResourceGroups()
+		if err != nil {
+			return nil, err
+		}
+
+		for _, resourceGroup := range allResourceGroups.List() {
+			allAvailabilitySets, rerr := as.AvailabilitySetsClient.List(context.Background(), resourceGroup)
+			if rerr != nil {
+				klog.Errorf("AvailabilitySetsClient.List failed: %v", rerr)
+				return nil, rerr.Error()
+			}
+
+			for i := range allAvailabilitySets {
+				vmas := allAvailabilitySets[i]
+				if strings.EqualFold(to.String(vmas.Name), "") {
+					klog.Warning("failed to get the name of the VMAS")
+					continue
+				}
+				localCache.Store(to.String(vmas.Name), &availabilitySetEntry{
+					vmas:          &vmas,
+					resourceGroup: resourceGroup,
+				})
+			}
+		}
+
+		return localCache, nil
+	}
+
+	if as.Config.AvailabilitySetsCacheTTLInSeconds == 0 {
+		as.Config.AvailabilitySetsCacheTTLInSeconds = consts.VMASCacheTTLDefaultInSeconds
+	}
+
+	return azcache.NewTimedcache(time.Duration(as.Config.AvailabilitySetsCacheTTLInSeconds)*time.Second, getter)
+}
+
+// newStandardSet creates a new availabilitySet.
+func newAvailabilitySet(az *Cloud) (VMSet, error) {
+	as := &availabilitySet{
+		Cloud: az,
+	}
+
+	var err error
+	as.vmasCache, err = as.newVMASCache()
+	if err != nil {
+		return nil, err
+	}
+
+	return as, nil
+}
+
+// GetInstanceIDByNodeName gets the cloud provider ID by node name.
+// It must return ("", cloudprovider.InstanceNotFound) if the instance does
+// not exist or is no longer running.
+func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) {
+	var machine compute.VirtualMachine
+	var err error
+
+	machine, err = as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeUnsafe)
+	if errors.Is(err, cloudprovider.InstanceNotFound) {
+		klog.Warningf("Unable to find node %s: %v", name, cloudprovider.InstanceNotFound)
+		return "", cloudprovider.InstanceNotFound
+	}
+	if err != nil {
+		if as.CloudProviderBackoff {
+			klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
+			machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), azcache.CacheReadTypeUnsafe)
+			if err != nil {
+				klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
+				return "", err
+			}
+		} else {
+			return "", err
+		}
+	}
+
+	resourceID := *machine.ID
+	convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
+	if err != nil {
+		klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
+		return "", err
+	}
+	return convertedResourceID, nil
+}
+
+// GetPowerStatusByNodeName returns the power state of the specified node.
+func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
+	vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeDefault)
+	if err != nil {
+		return powerState, err
+	}
+
+	if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
+		statuses := *vm.InstanceView.Statuses
+		for _, status := range statuses {
+			state := to.String(status.Code)
+			if strings.HasPrefix(state, vmPowerStatePrefix) {
+				return strings.TrimPrefix(state, vmPowerStatePrefix), nil
+			}
+		}
+	}
+
+	// vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting.
+	klog.V(3).Infof("InstanceView for node %q is nil, assuming it's stopped", name)
+	return vmPowerStateStopped, nil
+}
+
+// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
+func (as *availabilitySet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
+	vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeDefault)
+	if err != nil {
+		return provisioningState, err
+	}
+
+	if vm.VirtualMachineProperties == nil || vm.VirtualMachineProperties.ProvisioningState == nil {
+		return provisioningState, nil
+	}
+
+	return to.String(vm.VirtualMachineProperties.ProvisioningState), nil
+}
+
+// GetNodeNameByProviderID gets the node name by provider ID.
+func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
+	// NodeName is part of providerID for standard instances.
+	matches := providerIDRE.FindStringSubmatch(providerID)
+	if len(matches) != 2 {
+		return "", errors.New("error splitting providerID")
+	}
+
+	return types.NodeName(matches[1]), nil
+}
+
+// GetInstanceTypeByNodeName gets the instance type by node name.
+func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
+	machine, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
+		return "", err
+	}
+
+	if machine.HardwareProfile == nil {
+		return "", fmt.Errorf("HardwareProfile of node(%s) is nil", name)
+	}
+	return string(machine.HardwareProfile.VMSize), nil
+}
+
+// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
+// with availability zone, then it returns fault domain.
+// for details, refer to https://kubernetes-sigs.github.io/cloud-provider-azure/topics/availability-zones/#node-labels
+func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
+	vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return cloudprovider.Zone{}, err
+	}
+
+	var failureDomain string
+	if vm.Zones != nil && len(*vm.Zones) > 0 {
+		// Get availability zone for the node.
+		zones := *vm.Zones
+		zoneID, err := strconv.Atoi(zones[0])
+		if err != nil {
+			return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %w", zones, err)
+		}
+
+		failureDomain = as.makeZone(to.String(vm.Location), zoneID)
+	} else {
+		// Availability zone is not used for the node, falling back to fault domain.
+		failureDomain = strconv.Itoa(int(to.Int32(vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)))
+	}
+
+	zone := cloudprovider.Zone{
+		FailureDomain: strings.ToLower(failureDomain),
+		Region:        strings.ToLower(to.String(vm.Location)),
+	}
+	return zone, nil
+}
+
+// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
+// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
+func (as *availabilitySet) GetPrimaryVMSetName() string {
+	return as.Config.PrimaryAvailabilitySetName
+}
+
+// GetIPByNodeName gets machine private IP and public IP by node name.
+func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) {
+	nic, err := as.GetPrimaryInterface(name)
+	if err != nil {
+		return "", "", err
+	}
+
+	ipConfig, err := getPrimaryIPConfig(nic)
+	if err != nil {
+		klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
+		return "", "", err
+	}
+
+	privateIP := *ipConfig.PrivateIPAddress
+	publicIP := ""
+	if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
+		pipID := *ipConfig.PublicIPAddress.ID
+		pipName, err := getLastSegment(pipID, "/")
+		if err != nil {
+			return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
+		}
+		pip, existsPip, err := as.getPublicIPAddress(as.ResourceGroup, pipName)
+		if err != nil {
+			return "", "", err
+		}
+		if existsPip {
+			publicIP = *pip.IPAddress
+		}
+	}
+
+	return privateIP, publicIP, nil
+}
+
+// returns a list of private ips assigned to node
+// TODO (khenidak): This should read all nics, not just the primary
+// allowing users to split ipv4/v6 on multiple nics
+func (as *availabilitySet) GetPrivateIPsByNodeName(name string) ([]string, error) {
+	ips := make([]string, 0)
+	nic, err := as.GetPrimaryInterface(name)
+	if err != nil {
+		return ips, err
+	}
+
+	if nic.IPConfigurations == nil {
+		return ips, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
+	}
+
+	for _, ipConfig := range *(nic.IPConfigurations) {
+		if ipConfig.PrivateIPAddress != nil {
+			ips = append(ips, *(ipConfig.PrivateIPAddress))
+		}
+	}
+
+	return ips, nil
+}
+
+// getAgentPoolAvailabilitySets lists the virtual machines for the resource group and then builds
+// a list of availability sets that match the nodes available to k8s.
+func (as *availabilitySet) getAgentPoolAvailabilitySets(vms []compute.VirtualMachine, nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
+	vmNameToAvailabilitySetID := make(map[string]string, len(vms))
+	for vmx := range vms {
+		vm := vms[vmx]
+		if vm.AvailabilitySet != nil {
+			vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
+		}
+	}
+	agentPoolAvailabilitySets = &[]string{}
+	for nx := range nodes {
+		nodeName := (*nodes[nx]).Name
+		if isControlPlaneNode(nodes[nx]) {
+			continue
+		}
+		asID, ok := vmNameToAvailabilitySetID[nodeName]
+		if !ok {
+			klog.Warningf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
+			continue
+		}
+		asName, err := getLastSegment(asID, "/")
+		if err != nil {
+			klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
+			return nil, err
+		}
+		// AvailabilitySet ID is currently upper cased in a non-deterministic way
+		// We want to keep it lower case, before the ID get fixed
+		asName = strings.ToLower(asName)
+
+		*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
+	}
+
+	return agentPoolAvailabilitySets, nil
+}
+
+// GetVMSetNames selects all possible availability sets or scale sets
+// (depending vmType configured) for service load balancer, if the service has
+// no loadbalancer mode annotation returns the primary VMSet. If service annotation
+// for loadbalancer exists then returns the eligible VMSet. The mode selection
+// annotation would be ignored when using one SLB per cluster.
+func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
+	hasMode, isAuto, serviceAvailabilitySetName := as.getServiceLoadBalancerMode(service)
+	useSingleSLB := as.useStandardLoadBalancer() && !as.EnableMultipleStandardLoadBalancers
+	if !hasMode || useSingleSLB {
+		// no mode specified in service annotation or use single SLB mode
+		// default to PrimaryAvailabilitySetName
+		availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
+		return availabilitySetNames, nil
+	}
+
+	vms, err := as.ListVirtualMachines(as.ResourceGroup)
+	if err != nil {
+		klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
+		return nil, err
+	}
+	availabilitySetNames, err = as.getAgentPoolAvailabilitySets(vms, nodes)
+	if err != nil {
+		klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabilitySets failed err=(%v)", err)
+		return nil, err
+	}
+	if len(*availabilitySetNames) == 0 {
+		klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
+		return nil, fmt.Errorf("no availability sets found for nodes, node count(%d)", len(nodes))
+	}
+	if !isAuto {
+		found := false
+		for asx := range *availabilitySetNames {
+			if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetName) {
+				found = true
+				break
+			}
+		}
+		if !found {
+			klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetName)
+			return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetName)
+		}
+		return &[]string{serviceAvailabilitySetName}, nil
+	}
+
+	return availabilitySetNames, nil
+}
+
+func (as *availabilitySet) GetNodeVMSetName(node *v1.Node) (string, error) {
+	var hostName string
+	for _, nodeAddress := range node.Status.Addresses {
+		if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeHostName)) {
+			hostName = nodeAddress.Address
+		}
+	}
+	if hostName == "" {
+		if name, ok := node.Labels[consts.NodeLabelHostName]; ok {
+			hostName = name
+		}
+	}
+	if hostName == "" {
+		klog.Warningf("as.GetNodeVMSetName: cannot get host name from node %s", node.Name)
+		return "", nil
+	}
+
+	vms, err := as.ListVirtualMachines(as.ResourceGroup)
+	if err != nil {
+		klog.Errorf("as.GetNodeVMSetName - ListVirtualMachines failed, err=%v", err)
+		return "", err
+	}
+
+	var asName string
+	for _, vm := range vms {
+		if strings.EqualFold(to.String(vm.Name), hostName) {
+			if vm.AvailabilitySet != nil && to.String(vm.AvailabilitySet.ID) != "" {
+				klog.V(4).Infof("as.GetNodeVMSetName: found vm %s", hostName)
+
+				asName, err = getLastSegment(to.String(vm.AvailabilitySet.ID), "/")
+				if err != nil {
+					klog.Errorf("as.GetNodeVMSetName: failed to get last segment of ID %s: %s", to.String(vm.AvailabilitySet.ID), err)
+					return "", err
+				}
+			}
+
+			break
+		}
+	}
+
+	klog.V(4).Infof("as.GetNodeVMSetName: found availability set name %s from node name %s", asName, node.Name)
+	return asName, nil
+}
+
+// GetPrimaryInterface gets machine primary network interface by node name.
+func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
+	nic, _, err := as.getPrimaryInterfaceWithVMSet(nodeName, "")
+	return nic, err
+}
+
+// extractResourceGroupByNicID extracts the resource group name by nicID.
+func extractResourceGroupByNicID(nicID string) (string, error) {
+	matches := nicResourceGroupRE.FindStringSubmatch(nicID)
+	if len(matches) != 2 {
+		return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
+	}
+
+	return matches[1], nil
+}
+
+// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
+func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, string, error) {
+	var machine compute.VirtualMachine
+
+	machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
+		return network.Interface{}, "", err
+	}
+
+	primaryNicID, err := getPrimaryInterfaceID(machine)
+	if err != nil {
+		return network.Interface{}, "", err
+	}
+	nicName, err := getLastSegment(primaryNicID, "/")
+	if err != nil {
+		return network.Interface{}, "", err
+	}
+	nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
+	if err != nil {
+		return network.Interface{}, "", err
+	}
+
+	// Check availability set name. Note that vmSetName is empty string when getting
+	// the Node's IP address. While vmSetName is not empty, it should be checked with
+	// Node's real availability set name:
+	// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
+	//   availability set is mismatched with vmSetName.
+	// - For single standard SKU load balancer, backend could belong to multiple VMAS, so we
+	//   don't check vmSet for it.
+	// - For multiple standard SKU load balancers, the behavior is similar to the basic LB.
+	needCheck := false
+	if !as.useStandardLoadBalancer() {
+		// need to check the vmSet name when using the basic LB
+		needCheck = true
+	} else if as.EnableMultipleStandardLoadBalancers {
+		// need to check the vmSet name when using multiple standard LBs
+		needCheck = true
+	}
+	if vmSetName != "" && needCheck {
+		expectedAvailabilitySetID := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
+		if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetID) {
+			klog.V(3).Infof(
+				"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
+			return network.Interface{}, "", errNotInVMSet
+		}
+	}
+
+	nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
+	if err != nil {
+		return network.Interface{}, "", err
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	nic, rerr := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
+	if rerr != nil {
+		return network.Interface{}, "", rerr.Error()
+	}
+
+	var availabilitySetID string
+	if machine.VirtualMachineProperties != nil && machine.AvailabilitySet != nil {
+		availabilitySetID = to.String(machine.AvailabilitySet.ID)
+	}
+	return nic, availabilitySetID, nil
+}
+
+// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
+// participating in the specified LoadBalancer Backend Pool.
+func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+	vmName := mapNodeNameToVMName(nodeName)
+	serviceName := getServiceName(service)
+	nic, _, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
+	if err != nil {
+		if errors.Is(err, errNotInVMSet) {
+			klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
+			return "", "", "", nil, nil
+		}
+
+		klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
+		return "", "", "", nil, err
+	}
+
+	if nic.ProvisioningState == consts.NicFailedState {
+		klog.Warningf("EnsureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
+		return "", "", "", nil, nil
+	}
+
+	var primaryIPConfig *network.InterfaceIPConfiguration
+	ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+	if !as.Cloud.ipv6DualStackEnabled && !ipv6 {
+		primaryIPConfig, err = getPrimaryIPConfig(nic)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+	} else {
+		primaryIPConfig, err = getIPConfigByIPFamily(nic, ipv6)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+	}
+
+	foundPool := false
+	newBackendPools := []network.BackendAddressPool{}
+	if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+		newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+	}
+	for _, existingPool := range newBackendPools {
+		if strings.EqualFold(backendPoolID, *existingPool.ID) {
+			foundPool = true
+			break
+		}
+	}
+	if !foundPool {
+		if as.useStandardLoadBalancer() && len(newBackendPools) > 0 {
+			// Although standard load balancer supports backends from multiple availability
+			// sets, the same network interface couldn't be added to more than one load balancer of
+			// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
+			// about this.
+			newBackendPoolsIDs := make([]string, 0, len(newBackendPools))
+			for _, pool := range newBackendPools {
+				if pool.ID != nil {
+					newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
+				}
+			}
+			isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
+			if err != nil {
+				return "", "", "", nil, err
+			}
+			if !isSameLB {
+				klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName)
+				return "", "", "", nil, nil
+			}
+		}
+
+		newBackendPools = append(newBackendPools,
+			network.BackendAddressPool{
+				ID: to.StringPtr(backendPoolID),
+			})
+
+		primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
+
+		nicName := *nic.Name
+		klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
+		err := as.CreateOrUpdateInterface(service, nic)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+	}
+	return "", "", "", nil, nil
+}
+
+// EnsureHostsInPool ensures the given Node's primary IP configurations are
+// participating in the specified LoadBalancer Backend Pool.
+func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
+	mc := metrics.NewMetricContext("services", "vmas_ensure_hosts_in_pool", as.ResourceGroup, as.SubscriptionID, service.Name)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	hostUpdates := make([]func() error, 0, len(nodes))
+	for _, node := range nodes {
+		localNodeName := node.Name
+		if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+			klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
+			continue
+		}
+
+		shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+		if err != nil {
+			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+			return err
+		}
+		if shouldExcludeLoadBalancer {
+			klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
+			continue
+		}
+
+		f := func() error {
+			_, _, _, _, err := as.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName)
+			if err != nil {
+				return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %w", getServiceName(service), backendPoolID, err)
+			}
+			return nil
+		}
+		hostUpdates = append(hostUpdates, f)
+	}
+
+	errs := utilerrors.AggregateGoroutines(hostUpdates...)
+	if errs != nil {
+		return utilerrors.Flatten(errs)
+	}
+
+	isOperationSucceeded = true
+	return nil
+}
+
+// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+	// Returns nil if backend address pools already deleted.
+	if backendAddressPools == nil {
+		return nil
+	}
+
+	mc := metrics.NewMetricContext("services", "vmas_ensure_backend_pool_deleted", as.ResourceGroup, as.SubscriptionID, service.Name)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	ipConfigurationIDs := []string{}
+	for _, backendPool := range *backendAddressPools {
+		if strings.EqualFold(to.String(backendPool.ID), backendPoolID) &&
+			backendPool.BackendAddressPoolPropertiesFormat != nil &&
+			backendPool.BackendIPConfigurations != nil {
+			for _, ipConf := range *backendPool.BackendIPConfigurations {
+				if ipConf.ID == nil {
+					continue
+				}
+
+				ipConfigurationIDs = append(ipConfigurationIDs, *ipConf.ID)
+			}
+		}
+	}
+	nicUpdaters := make([]func() error, 0)
+	allErrs := make([]error, 0)
+	for i := range ipConfigurationIDs {
+		ipConfigurationID := ipConfigurationIDs[i]
+		nodeName, _, err := as.GetNodeNameByIPConfigurationID(ipConfigurationID)
+		if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
+			klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err)
+			allErrs = append(allErrs, err)
+			continue
+		}
+		if nodeName == "" {
+			continue
+		}
+
+		vmName := mapNodeNameToVMName(types.NodeName(nodeName))
+		nic, vmasID, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
+		if err != nil {
+			if errors.Is(err, errNotInVMSet) {
+				klog.V(3).Infof("EnsureBackendPoolDeleted skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
+				return nil
+			}
+
+			klog.Errorf("error: az.EnsureBackendPoolDeleted(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
+			return err
+		}
+		vmasName, err := getAvailabilitySetNameByID(vmasID)
+		if err != nil {
+			return fmt.Errorf("EnsureBackendPoolDeleted: failed to parse the VMAS ID %s: %w", vmasID, err)
+		}
+		// Only remove nodes belonging to specified vmSet to basic LB backends.
+		if !strings.EqualFold(vmasName, vmSetName) {
+			klog.V(2).Infof("EnsureBackendPoolDeleted: skipping the node %s belonging to another vm set %s", nodeName, vmasName)
+			continue
+		}
+
+		if nic.ProvisioningState == consts.NicFailedState {
+			klog.Warningf("EnsureBackendPoolDeleted skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
+			return nil
+		}
+
+		if nic.InterfacePropertiesFormat != nil && nic.InterfacePropertiesFormat.IPConfigurations != nil {
+			newIPConfigs := *nic.IPConfigurations
+			for j, ipConf := range newIPConfigs {
+				if !to.Bool(ipConf.Primary) {
+					continue
+				}
+				// found primary ip configuration
+				if ipConf.LoadBalancerBackendAddressPools != nil {
+					newLBAddressPools := *ipConf.LoadBalancerBackendAddressPools
+					for k := len(newLBAddressPools) - 1; k >= 0; k-- {
+						pool := newLBAddressPools[k]
+						if strings.EqualFold(to.String(pool.ID), backendPoolID) {
+							newLBAddressPools = append(newLBAddressPools[:k], newLBAddressPools[k+1:]...)
+							break
+						}
+					}
+					newIPConfigs[j].LoadBalancerBackendAddressPools = &newLBAddressPools
+				}
+			}
+			nic.IPConfigurations = &newIPConfigs
+			nicUpdaters = append(nicUpdaters, func() error {
+				ctx, cancel := getContextWithCancel()
+				defer cancel()
+				klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolID %s", as.resourceGroup, to.String(nic.Name), backendPoolID)
+				rerr := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, to.String(nic.Name), nic)
+				if rerr != nil {
+					klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", as.resourceGroup, to.String(nic.Name), rerr.Error())
+					return rerr.Error()
+				}
+				return nil
+			})
+		}
+	}
+	errs := utilerrors.AggregateGoroutines(nicUpdaters...)
+	if errs != nil {
+		return utilerrors.Flatten(errs)
+	}
+	// Fail if there are other errors.
+	if len(allErrs) > 0 {
+		return utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+	}
+
+	isOperationSucceeded = true
+	return nil
+}
+
+func getAvailabilitySetNameByID(asID string) (string, error) {
+	// for standalone VM
+	if asID == "" {
+		return "", nil
+	}
+
+	matches := vmasIDRE.FindStringSubmatch(asID)
+	if len(matches) != 2 {
+		return "", fmt.Errorf("getAvailabilitySetNameByID: failed to parse the VMAS ID %s", asID)
+	}
+	vmasName := matches[1]
+	return vmasName, nil
+}
+
+// get a storage account by UUID
+func generateStorageAccountName(accountNamePrefix string) string {
+	uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
+	accountName := strings.ToLower(accountNamePrefix + uniqueID)
+	if len(accountName) > consts.StorageAccountNameMaxLength {
+		return accountName[:consts.StorageAccountNameMaxLength-1]
+	}
+	return accountName
+}
+
+// GetNodeNameByIPConfigurationID gets the node name and the availabilitySet name by IP configuration ID.
+func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+	matches := nicIDRE.FindStringSubmatch(ipConfigurationID)
+	if len(matches) != 3 {
+		klog.V(4).Infof("Can not extract VM name from ipConfigurationID (%s)", ipConfigurationID)
+		return "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
+	}
+
+	nicResourceGroup, nicName := matches[1], matches[2]
+	if nicResourceGroup == "" || nicName == "" {
+		return "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
+	}
+	nic, rerr := as.InterfacesClient.Get(context.Background(), nicResourceGroup, nicName, "")
+	if rerr != nil {
+		return "", "", fmt.Errorf("GetNodeNameByIPConfigurationID(%s): failed to get interface of name %s: %w", ipConfigurationID, nicName, rerr.Error())
+	}
+	vmID := ""
+	if nic.InterfacePropertiesFormat != nil && nic.VirtualMachine != nil {
+		vmID = to.String(nic.VirtualMachine.ID)
+	}
+	if vmID == "" {
+		klog.V(2).Infof("GetNodeNameByIPConfigurationID(%s): empty vmID", ipConfigurationID)
+		return "", "", nil
+	}
+
+	matches = vmIDRE.FindStringSubmatch(vmID)
+	if len(matches) != 2 {
+		return "", "", fmt.Errorf("invalid virtual machine ID %s", vmID)
+	}
+	vmName := matches[1]
+
+	vm, err := as.getVirtualMachine(types.NodeName(vmName), azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.Errorf("Unable to get the virtual machine by node name %s: %v", vmName, err)
+		return "", "", err
+	}
+	asID := ""
+	if vm.VirtualMachineProperties != nil && vm.AvailabilitySet != nil {
+		asID = to.String(vm.AvailabilitySet.ID)
+	}
+	if asID == "" {
+		return vmName, "", nil
+	}
+
+	asName, err := getAvailabilitySetNameByID(asID)
+	if err != nil {
+		return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s: %v", asID, err)
+	}
+	return vmName, strings.ToLower(asName), nil
+}
+
+func (as *availabilitySet) getAvailabilitySetByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*compute.AvailabilitySet, error) {
+	cached, err := as.vmasCache.Get(consts.VMASKey, crt)
+	if err != nil {
+		return nil, err
+	}
+	vmasList := cached.(*sync.Map)
+
+	if vmasList == nil {
+		klog.Warning("Couldn't get all vmas from cache")
+		return nil, nil
+	}
+
+	var result *compute.AvailabilitySet
+	vmasList.Range(func(_, value interface{}) bool {
+		vmasEntry := value.(*availabilitySetEntry)
+		vmas := vmasEntry.vmas
+		if vmas != nil && vmas.AvailabilitySetProperties != nil && vmas.VirtualMachines != nil {
+			for _, vmIDRef := range *vmas.VirtualMachines {
+				if vmIDRef.ID != nil {
+					matches := vmIDRE.FindStringSubmatch(to.String(vmIDRef.ID))
+					if len(matches) != 2 {
+						err = fmt.Errorf("invalid vm ID %s", to.String(vmIDRef.ID))
+						return false
+					}
+
+					vmName := matches[1]
+					if strings.EqualFold(vmName, nodeName) {
+						result = vmas
+						return false
+					}
+				}
+			}
+		}
+
+		return true
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	if result == nil {
+		klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound)
+		return nil, cloudprovider.InstanceNotFound
+	}
+
+	return result, nil
+}
+
+// GetNodeCIDRMaskByProviderID returns the node CIDR subnet mask by provider ID.
+func (as *availabilitySet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+	nodeName, err := as.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	vmas, err := as.getAvailabilitySetByNodeName(string(nodeName), azcache.CacheReadTypeDefault)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			return consts.DefaultNodeMaskCIDRIPv4, consts.DefaultNodeMaskCIDRIPv6, nil
+		}
+		return 0, 0, err
+	}
+
+	var ipv4Mask, ipv6Mask int
+	if v4, ok := vmas.Tags[consts.VMSetCIDRIPV4TagKey]; ok && v4 != nil {
+		ipv4Mask, err = strconv.Atoi(to.String(v4))
+		if err != nil {
+			klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv4 mask size %s: %v", to.String(v4), err)
+		}
+	}
+	if v6, ok := vmas.Tags[consts.VMSetCIDRIPV6TagKey]; ok && v6 != nil {
+		ipv6Mask, err = strconv.Atoi(to.String(v6))
+		if err != nil {
+			klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv6 mask size%s: %v", to.String(v6), err)
+		}
+	}
+
+	return ipv4Mask, ipv6Mask, nil
+}
+
+//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMAS
+func (as *availabilitySet) EnsureBackendPoolDeletedFromVMSets(vmasNamesMap map[string]bool, backendPoolID string) error {
+	return nil
+}
+
+// GetAgentPoolVMSetNames returns all VMAS names according to the nodes
+func (as *availabilitySet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
+	vms, err := as.ListVirtualMachines(as.ResourceGroup)
+	if err != nil {
+		klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
+		return nil, err
+	}
+
+	return as.getAgentPoolAvailabilitySets(vms, nodes)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d00b965978da5fc9f059b705d0a58ad5301424d
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storage.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+// CreateFileShare creates a file share, using a matching storage account type, account kind, etc.
+// storage account will be created if specified account is not found
+func (az *Cloud) CreateFileShare(ctx context.Context, accountOptions *AccountOptions, shareOptions *fileclient.ShareOptions) (string, string, error) {
+	if accountOptions == nil {
+		return "", "", fmt.Errorf("account options is nil")
+	}
+	if shareOptions == nil {
+		return "", "", fmt.Errorf("share options is nil")
+	}
+	if accountOptions.ResourceGroup == "" {
+		accountOptions.ResourceGroup = az.resourceGroup
+	}
+
+	accountOptions.EnableHTTPSTrafficOnly = true
+	if shareOptions.Protocol == storage.EnabledProtocolsNFS {
+		accountOptions.EnableHTTPSTrafficOnly = false
+	}
+
+	accountName, accountKey, err := az.EnsureStorageAccount(ctx, accountOptions, consts.FileShareAccountNamePrefix)
+	if err != nil {
+		return "", "", fmt.Errorf("could not get storage key for storage account %s: %w", accountOptions.Name, err)
+	}
+
+	if err := az.createFileShare(accountOptions.ResourceGroup, accountName, shareOptions); err != nil {
+		return "", "", fmt.Errorf("failed to create share %s in account %s: %w", shareOptions.Name, accountName, err)
+	}
+	klog.V(4).Infof("created share %s in account %s", shareOptions.Name, accountOptions.Name)
+	return accountName, accountKey, nil
+}
+
+// DeleteFileShare deletes a file share using storage account name and key
+func (az *Cloud) DeleteFileShare(resourceGroup, accountName, shareName string) error {
+	if err := az.deleteFileShare(resourceGroup, accountName, shareName); err != nil {
+		return err
+	}
+	klog.V(4).Infof("share %s deleted", shareName)
+	return nil
+}
+
+// ResizeFileShare resizes a file share
+func (az *Cloud) ResizeFileShare(resourceGroup, accountName, name string, sizeGiB int) error {
+	return az.resizeFileShare(resourceGroup, accountName, name, sizeGiB)
+}
+
+// GetFileShare gets a file share
+func (az *Cloud) GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
+	return az.getFileShare(resourceGroupName, accountName, name)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
new file mode 100644
index 0000000000000000000000000000000000000000..480e89e6d3695f94ba15f7ea9efefcedf3286bc0
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
@@ -0,0 +1,504 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
+	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/klog/v2"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+// SkipMatchingTag skip account matching tag
+const SkipMatchingTag = "skip-matching"
+const LocationGlobal = "global"
+const GroupIDFile = "file"
+const PrivateDNSZoneName = "privatelink.file.core.windows.net"
+
+// AccountOptions contains the fields which are used to create storage account.
+type AccountOptions struct {
+	Name, Type, Kind, ResourceGroup, Location string
+	EnableHTTPSTrafficOnly                    bool
+	// indicate whether create new account when Name is empty or when account does not exists
+	CreateAccount                           bool
+	EnableLargeFileShare                    bool
+	CreatePrivateEndpoint                   bool
+	DisableFileServiceDeleteRetentionPolicy bool
+	IsHnsEnabled                            *bool
+	EnableNfsV3                             *bool
+	AllowBlobPublicAccess                   *bool
+	Tags                                    map[string]string
+	VirtualNetworkResourceIDs               []string
+}
+
+type accountWithLocation struct {
+	Name, StorageType, Location string
+}
+
+// getStorageAccounts get matching storage accounts
+func (az *Cloud) getStorageAccounts(ctx context.Context, accountOptions *AccountOptions) ([]accountWithLocation, error) {
+	if az.StorageAccountClient == nil {
+		return nil, fmt.Errorf("StorageAccountClient is nil")
+	}
+	result, rerr := az.StorageAccountClient.ListByResourceGroup(ctx, accountOptions.ResourceGroup)
+	if rerr != nil {
+		return nil, rerr.Error()
+	}
+
+	accounts := []accountWithLocation{}
+	for _, acct := range result {
+		if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
+			if !(isStorageTypeEqual(acct, accountOptions) &&
+				isAccountKindEqual(acct, accountOptions) &&
+				isLocationEqual(acct, accountOptions) &&
+				AreVNetRulesEqual(acct, accountOptions) &&
+				isLargeFileSharesPropertyEqual(acct, accountOptions) &&
+				isTaggedWithSkip(acct) &&
+				isHnsPropertyEqual(acct, accountOptions) &&
+				isEnableNfsV3PropertyEqual(acct, accountOptions) &&
+				isPrivateEndpointAsExpected(acct, accountOptions)) {
+				continue
+			}
+			accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: string((*acct.Sku).Name), Location: *acct.Location})
+		}
+	}
+	return accounts, nil
+}
+
+// GetStorageAccesskey gets the storage account access key
+func (az *Cloud) GetStorageAccesskey(ctx context.Context, account, resourceGroup string) (string, error) {
+	if az.StorageAccountClient == nil {
+		return "", fmt.Errorf("StorageAccountClient is nil")
+	}
+
+	result, rerr := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
+	if rerr != nil {
+		return "", rerr.Error()
+	}
+	if result.Keys == nil {
+		return "", fmt.Errorf("empty keys")
+	}
+
+	for _, k := range *result.Keys {
+		if k.Value != nil && *k.Value != "" {
+			v := *k.Value
+			if ind := strings.LastIndex(v, " "); ind >= 0 {
+				v = v[(ind + 1):]
+			}
+			return v, nil
+		}
+	}
+	return "", fmt.Errorf("no valid keys")
+}
+
+// EnsureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
+func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *AccountOptions, genAccountNamePrefix string) (string, string, error) {
+	if accountOptions == nil {
+		return "", "", fmt.Errorf("account options is nil")
+	}
+
+	accountName := accountOptions.Name
+	accountType := accountOptions.Type
+	accountKind := accountOptions.Kind
+	resourceGroup := accountOptions.ResourceGroup
+	location := accountOptions.Location
+	enableHTTPSTrafficOnly := accountOptions.EnableHTTPSTrafficOnly
+
+	var createNewAccount bool
+	if len(accountName) == 0 {
+		createNewAccount = true
+		if !accountOptions.CreateAccount {
+			// find a storage account that matches accountType
+			accounts, err := az.getStorageAccounts(ctx, accountOptions)
+			if err != nil {
+				return "", "", fmt.Errorf("could not list storage accounts for account type %s: %w", accountType, err)
+			}
+
+			if len(accounts) > 0 {
+				accountName = accounts[0].Name
+				createNewAccount = false
+				klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
+			}
+		}
+
+		if len(accountName) == 0 {
+			accountName = generateStorageAccountName(genAccountNamePrefix)
+		}
+	} else {
+		createNewAccount = false
+		if accountOptions.CreateAccount {
+			// check whether account exists
+			if _, err := az.GetStorageAccesskey(ctx, accountName, resourceGroup); err != nil {
+				klog.V(2).Infof("get storage key for storage account %s returned with %v", accountName, err)
+				createNewAccount = true
+			}
+		}
+	}
+
+	vnetResourceGroup := az.ResourceGroup
+	if len(az.VnetResourceGroup) > 0 {
+		vnetResourceGroup = az.VnetResourceGroup
+	}
+
+	if accountOptions.CreatePrivateEndpoint {
+		// Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup
+		if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil {
+			return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, vnetResourceGroup, err)
+		}
+	}
+
+	if createNewAccount {
+		// set network rules for storage account
+		var networkRuleSet *storage.NetworkRuleSet
+		virtualNetworkRules := []storage.VirtualNetworkRule{}
+		for i, subnetID := range accountOptions.VirtualNetworkResourceIDs {
+			vnetRule := storage.VirtualNetworkRule{
+				VirtualNetworkResourceID: &accountOptions.VirtualNetworkResourceIDs[i],
+				Action:                   storage.ActionAllow,
+			}
+			virtualNetworkRules = append(virtualNetworkRules, vnetRule)
+			klog.V(4).Infof("subnetID(%s) has been set", subnetID)
+		}
+		if len(virtualNetworkRules) > 0 {
+			networkRuleSet = &storage.NetworkRuleSet{
+				VirtualNetworkRules: &virtualNetworkRules,
+				DefaultAction:       storage.DefaultActionDeny,
+			}
+		}
+
+		if accountOptions.CreatePrivateEndpoint {
+			networkRuleSet = &storage.NetworkRuleSet{
+				DefaultAction: storage.DefaultActionDeny,
+			}
+		}
+
+		if location == "" {
+			location = az.Location
+		}
+		if accountType == "" {
+			accountType = consts.DefaultStorageAccountType
+		}
+
+		// use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
+		kind := consts.DefaultStorageAccountKind
+		if accountKind != "" {
+			kind = storage.Kind(accountKind)
+		}
+		if len(accountOptions.Tags) == 0 {
+			accountOptions.Tags = make(map[string]string)
+		}
+		accountOptions.Tags[consts.CreatedByTag] = "azure"
+		tags := convertMapToMapPointer(accountOptions.Tags)
+
+		klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v",
+			accountName, resourceGroup, location, accountType, kind, accountOptions.Tags)
+
+		cp := storage.AccountCreateParameters{
+			Sku:  &storage.Sku{Name: storage.SkuName(accountType)},
+			Kind: kind,
+			AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{
+				EnableHTTPSTrafficOnly: &enableHTTPSTrafficOnly,
+				NetworkRuleSet:         networkRuleSet,
+				IsHnsEnabled:           accountOptions.IsHnsEnabled,
+				EnableNfsV3:            accountOptions.EnableNfsV3,
+				MinimumTLSVersion:      storage.MinimumTLSVersionTLS12,
+			},
+			Tags:     tags,
+			Location: &location}
+
+		if accountOptions.EnableLargeFileShare {
+			klog.V(2).Infof("Enabling LargeFileShare for storage account(%s)", accountName)
+			cp.AccountPropertiesCreateParameters.LargeFileSharesState = storage.LargeFileSharesStateEnabled
+		}
+		if accountOptions.AllowBlobPublicAccess != nil {
+			klog.V(2).Infof("set AllowBlobPublicAccess(%v) for storage account(%s)", *accountOptions.AllowBlobPublicAccess, accountName)
+			cp.AccountPropertiesCreateParameters.AllowBlobPublicAccess = accountOptions.AllowBlobPublicAccess
+		}
+		if az.StorageAccountClient == nil {
+			return "", "", fmt.Errorf("StorageAccountClient is nil")
+		}
+
+		if rerr := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp); rerr != nil {
+			return "", "", fmt.Errorf("failed to create storage account %s, error: %v", accountName, rerr)
+		}
+
+		if accountOptions.DisableFileServiceDeleteRetentionPolicy {
+			klog.V(2).Infof("disable DisableFileServiceDeleteRetentionPolicy on account(%s), resource group(%s)", accountName, resourceGroup)
+			prop, err := az.FileClient.GetServiceProperties(resourceGroup, accountName)
+			if err != nil {
+				return "", "", err
+			}
+			if prop.FileServicePropertiesProperties == nil {
+				return "", "", fmt.Errorf("FileServicePropertiesProperties of account(%s), resource group(%s) is nil", accountName, resourceGroup)
+			}
+			prop.FileServicePropertiesProperties.ShareDeleteRetentionPolicy = &storage.DeleteRetentionPolicy{Enabled: to.BoolPtr(false)}
+			if _, err := az.FileClient.SetServiceProperties(resourceGroup, accountName, prop); err != nil {
+				return "", "", err
+			}
+		}
+
+		if accountOptions.CreatePrivateEndpoint {
+			// Get properties of the storageAccount
+			storageAccount, err := az.StorageAccountClient.GetProperties(ctx, resourceGroup, accountName)
+			if err != nil {
+				return "", "", fmt.Errorf("Failed to get the properties of storage account(%s), resourceGroup(%s), error: %v", accountName, resourceGroup, err)
+			}
+
+			// Create private endpoint
+			privateEndpointName := accountName + "-pvtendpoint"
+			if err := az.createPrivateEndpoint(ctx, accountName, storageAccount.ID, privateEndpointName, vnetResourceGroup); err != nil {
+				return "", "", fmt.Errorf("Failed to create private endpoint for storage account(%s), resourceGroup(%s), error: %v", accountName, vnetResourceGroup, err)
+			}
+
+			// Create virtual link to the zone private DNS zone
+			vNetLinkName := accountName + "-vnetlink"
+			if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup); err != nil {
+				return "", "", fmt.Errorf("Failed to create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s), error: %v", az.VnetName, PrivateDNSZoneName, vnetResourceGroup, err)
+			}
+
+			// Create dns zone group
+			dnsZoneGroupName := accountName + "-dnszonegroup"
+			if err := az.createPrivateDNSZoneGroup(ctx, dnsZoneGroupName, privateEndpointName, vnetResourceGroup); err != nil {
+				return "", "", fmt.Errorf("Failed to create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s), error: %v", privateEndpointName, az.VnetName, vnetResourceGroup, err)
+			}
+		}
+	}
+
+	// find the access key with this account
+	accountKey, err := az.GetStorageAccesskey(ctx, accountName, resourceGroup)
+	if err != nil {
+		return "", "", fmt.Errorf("could not get storage key for storage account %s: %w", accountName, err)
+	}
+
+	return accountName, accountKey, nil
+}
+
+func (az *Cloud) createPrivateEndpoint(ctx context.Context, accountName string, accountID *string, privateEndpointName, vnetResourceGroup string) error {
+	klog.V(2).Infof("Creating private endpoint(%s) for account (%s)", privateEndpointName, accountName)
+
+	subnet, _, err := az.getSubnet(az.VnetName, az.SubnetName)
+	if err != nil {
+		return err
+	}
+	// Disable the private endpoint network policies before creating private endpoint
+	subnet.SubnetPropertiesFormat.PrivateEndpointNetworkPolicies = network.VirtualNetworkPrivateEndpointNetworkPoliciesDisabled
+	if rerr := az.SubnetsClient.CreateOrUpdate(ctx, vnetResourceGroup, az.VnetName, az.SubnetName, subnet); rerr != nil {
+		return rerr.Error()
+	}
+
+	//Create private endpoint
+	privateLinkServiceConnectionName := accountName + "-pvtsvcconn"
+	privateLinkServiceConnection := network.PrivateLinkServiceConnection{
+		Name: &privateLinkServiceConnectionName,
+		PrivateLinkServiceConnectionProperties: &network.PrivateLinkServiceConnectionProperties{
+			GroupIds:             &[]string{GroupIDFile},
+			PrivateLinkServiceID: accountID,
+		},
+	}
+	privateLinkServiceConnections := []network.PrivateLinkServiceConnection{privateLinkServiceConnection}
+	privateEndpoint := network.PrivateEndpoint{
+		Location:                  &az.Location,
+		PrivateEndpointProperties: &network.PrivateEndpointProperties{Subnet: &subnet, PrivateLinkServiceConnections: &privateLinkServiceConnections},
+	}
+	return az.privateendpointclient.CreateOrUpdate(ctx, vnetResourceGroup, privateEndpointName, privateEndpoint, true)
+}
+
+func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup string) error {
+	klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", PrivateDNSZoneName, vnetResourceGroup)
+	location := LocationGlobal
+	privateDNSZone := privatedns.PrivateZone{Location: &location}
+	if err := az.privatednsclient.CreateOrUpdate(ctx, vnetResourceGroup, PrivateDNSZoneName, privateDNSZone, true); err != nil {
+		if strings.Contains(err.Error(), "exists already") {
+			klog.V(2).Infof("private dns zone(%s) in resourceGroup (%s) already exists", PrivateDNSZoneName, vnetResourceGroup)
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup string) error {
+	klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, PrivateDNSZoneName, vnetResourceGroup)
+	location := LocationGlobal
+	vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", az.SubscriptionID, vnetResourceGroup, az.VnetName)
+	parameters := privatedns.VirtualNetworkLink{
+		Location: &location,
+		VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{
+			VirtualNetwork:      &privatedns.SubResource{ID: &vnetID},
+			RegistrationEnabled: to.BoolPtr(true)},
+	}
+	return az.virtualNetworkLinksClient.CreateOrUpdate(ctx, vnetResourceGroup, PrivateDNSZoneName, vNetLinkName, parameters, false)
+}
+
+func (az *Cloud) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName, privateEndpointName, vnetResourceGroup string) error {
+	klog.V(2).Infof("Creating private DNS zone group(%s) with privateEndpoint(%s), vNetName(%s), resourceGroup(%s)", dnsZoneGroupName, privateEndpointName, az.VnetName, vnetResourceGroup)
+	privateDNSZoneID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/privateDnsZones/%s", az.SubscriptionID, vnetResourceGroup, PrivateDNSZoneName)
+	dnsZoneName := PrivateDNSZoneName
+	privateDNSZoneConfig := network.PrivateDNSZoneConfig{
+		Name: &dnsZoneName,
+		PrivateDNSZonePropertiesFormat: &network.PrivateDNSZonePropertiesFormat{
+			PrivateDNSZoneID: &privateDNSZoneID},
+	}
+	privateDNSZoneConfigs := []network.PrivateDNSZoneConfig{privateDNSZoneConfig}
+	privateDNSZoneGroup := network.PrivateDNSZoneGroup{
+		PrivateDNSZoneGroupPropertiesFormat: &network.PrivateDNSZoneGroupPropertiesFormat{
+			PrivateDNSZoneConfigs: &privateDNSZoneConfigs,
+		},
+	}
+	return az.privatednszonegroupclient.CreateOrUpdate(ctx, vnetResourceGroup, privateEndpointName, dnsZoneGroupName, privateDNSZoneGroup, false)
+}
+
+// AddStorageAccountTags add tags to storage account
+func (az *Cloud) AddStorageAccountTags(resourceGroup, account string, tags map[string]*string) *retry.Error {
+	if az.StorageAccountClient == nil {
+		return retry.NewError(false, fmt.Errorf("StorageAccountClient is nil"))
+	}
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	result, rerr := az.StorageAccountClient.GetProperties(ctx, resourceGroup, account)
+	if rerr != nil {
+		return rerr
+	}
+
+	newTags := result.Tags
+	if newTags == nil {
+		newTags = make(map[string]*string)
+	}
+
+	// merge two tag map
+	for k, v := range tags {
+		newTags[k] = v
+	}
+
+	updateParams := storage.AccountUpdateParameters{Tags: newTags}
+	return az.StorageAccountClient.Update(ctx, resourceGroup, account, updateParams)
+}
+
+// RemoveStorageAccountTag remove tag from storage account
+func (az *Cloud) RemoveStorageAccountTag(resourceGroup, account, key string) *retry.Error {
+	if az.StorageAccountClient == nil {
+		return retry.NewError(false, fmt.Errorf("StorageAccountClient is nil"))
+	}
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	result, rerr := az.StorageAccountClient.GetProperties(ctx, resourceGroup, account)
+	if rerr != nil {
+		return rerr
+	}
+
+	if len(result.Tags) == 0 {
+		return nil
+	}
+
+	originalLen := len(result.Tags)
+	delete(result.Tags, key)
+	if originalLen != len(result.Tags) {
+		updateParams := storage.AccountUpdateParameters{Tags: result.Tags}
+		return az.StorageAccountClient.Update(ctx, resourceGroup, account, updateParams)
+	}
+	return nil
+}
+
+func isStorageTypeEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	if accountOptions.Type != "" && !strings.EqualFold(accountOptions.Type, string((*account.Sku).Name)) {
+		return false
+	}
+	return true
+}
+
+func isAccountKindEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	if accountOptions.Kind != "" && !strings.EqualFold(accountOptions.Kind, string(account.Kind)) {
+		return false
+	}
+	return true
+}
+
+func isLocationEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	if accountOptions.Location != "" && !strings.EqualFold(accountOptions.Location, *account.Location) {
+		return false
+	}
+	return true
+}
+
+func AreVNetRulesEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	if len(accountOptions.VirtualNetworkResourceIDs) > 0 {
+		if account.AccountProperties == nil || account.AccountProperties.NetworkRuleSet == nil ||
+			account.AccountProperties.NetworkRuleSet.VirtualNetworkRules == nil {
+			return false
+		}
+
+		found := false
+		for _, subnetID := range accountOptions.VirtualNetworkResourceIDs {
+			for _, rule := range *account.AccountProperties.NetworkRuleSet.VirtualNetworkRules {
+				if strings.EqualFold(to.String(rule.VirtualNetworkResourceID), subnetID) && rule.Action == storage.ActionAllow {
+					found = true
+					break
+				}
+			}
+		}
+		if !found {
+			return false
+		}
+	}
+	return true
+}
+
+func isLargeFileSharesPropertyEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	if account.Sku.Tier != storage.SkuTier(compute.PremiumLRS) && accountOptions.EnableLargeFileShare && (len(account.LargeFileSharesState) == 0 || account.LargeFileSharesState == storage.LargeFileSharesStateDisabled) {
+		return false
+	}
+	return true
+}
+
+func isTaggedWithSkip(account storage.Account) bool {
+	if account.Tags != nil {
+		// skip account with SkipMatchingTag tag
+		if _, ok := account.Tags[SkipMatchingTag]; ok {
+			klog.V(2).Infof("found %s tag for account %s, skip matching", SkipMatchingTag, *account.Name)
+			return false
+		}
+	}
+	return true
+}
+
+func isHnsPropertyEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	return to.Bool(account.IsHnsEnabled) == to.Bool(accountOptions.IsHnsEnabled)
+}
+
+func isEnableNfsV3PropertyEqual(account storage.Account, accountOptions *AccountOptions) bool {
+	return to.Bool(account.EnableNfsV3) == to.Bool(accountOptions.EnableNfsV3)
+}
+
+func isPrivateEndpointAsExpected(account storage.Account, accountOptions *AccountOptions) bool {
+	if accountOptions.CreatePrivateEndpoint && account.PrivateEndpointConnections != nil && len(*account.PrivateEndpointConnections) > 0 {
+		return true
+	}
+	if !accountOptions.CreatePrivateEndpoint && (account.PrivateEndpointConnections == nil || len(*account.PrivateEndpointConnections) == 0) {
+		return true
+	}
+	return false
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..1bde0b5612ef466fb25b4779b012a9eb0e577a36
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+var strToExtendedLocationType = map[string]network.ExtendedLocationTypes{
+	"edgezone": network.ExtendedLocationTypesEdgeZone,
+}
+
+// lockMap used to lock on entries
+type lockMap struct {
+	sync.Mutex
+	mutexMap map[string]*sync.Mutex
+}
+
+// NewLockMap returns a new lock map
+func newLockMap() *lockMap {
+	return &lockMap{
+		mutexMap: make(map[string]*sync.Mutex),
+	}
+}
+
+// LockEntry acquires a lock associated with the specific entry
+func (lm *lockMap) LockEntry(entry string) {
+	lm.Lock()
+	// check if entry does not exists, then add entry
+	if _, exists := lm.mutexMap[entry]; !exists {
+		lm.addEntry(entry)
+	}
+
+	lm.Unlock()
+	lm.lockEntry(entry)
+}
+
+// UnlockEntry release the lock associated with the specific entry
+func (lm *lockMap) UnlockEntry(entry string) {
+	lm.Lock()
+	defer lm.Unlock()
+
+	if _, exists := lm.mutexMap[entry]; !exists {
+		return
+	}
+	lm.unlockEntry(entry)
+}
+
+func (lm *lockMap) addEntry(entry string) {
+	lm.mutexMap[entry] = &sync.Mutex{}
+}
+
+func (lm *lockMap) lockEntry(entry string) {
+	lm.mutexMap[entry].Lock()
+}
+
+func (lm *lockMap) unlockEntry(entry string) {
+	lm.mutexMap[entry].Unlock()
+}
+
+func getContextWithCancel() (context.Context, context.CancelFunc) {
+	return context.WithCancel(context.Background())
+}
+
+func convertMapToMapPointer(origin map[string]string) map[string]*string {
+	newly := make(map[string]*string)
+	for k, v := range origin {
+		value := v
+		newly[k] = &value
+	}
+	return newly
+}
+
+func parseTags(tags string, tagsMap map[string]string) map[string]*string {
+	formatted := make(map[string]*string)
+
+	if tags != "" {
+		kvs := strings.Split(tags, consts.TagsDelimiter)
+		for _, kv := range kvs {
+			res := strings.Split(kv, consts.TagKeyValueDelimiter)
+			if len(res) != 2 {
+				klog.Warningf("parseTags: error when parsing key-value pair %s, would ignore this one", kv)
+				continue
+			}
+			k, v := strings.TrimSpace(res[0]), strings.TrimSpace(res[1])
+			if k == "" {
+				klog.Warning("parseTags: empty key, ignoring this key-value pair")
+				continue
+			}
+			formatted[k] = to.StringPtr(v)
+		}
+	}
+
+	if len(tagsMap) > 0 {
+		for key, value := range tagsMap {
+			key, value := strings.TrimSpace(key), strings.TrimSpace(value)
+			if key == "" {
+				klog.Warningf("parseTags: empty key, ignoring this key-value pair")
+				continue
+			}
+
+			if found, k := findKeyInMapCaseInsensitive(formatted, key); found && k != key {
+				klog.V(4).Infof("parseTags: found identical keys: %s from tags and %s from tagsMap (case-insensitive), %s will replace %s", k, key, key, k)
+				delete(formatted, k)
+			}
+			formatted[key] = to.StringPtr(value)
+		}
+	}
+
+	return formatted
+}
+
+func findKeyInMapCaseInsensitive(targetMap map[string]*string, key string) (bool, string) {
+	for k := range targetMap {
+		if strings.EqualFold(k, key) {
+			return true, k
+		}
+	}
+
+	return false, ""
+}
+
+func (az *Cloud) reconcileTags(currentTagsOnResource, newTags map[string]*string) (reconciledTags map[string]*string, changed bool) {
+	var systemTags []string
+	systemTagsMap := make(map[string]*string)
+
+	if az.SystemTags != "" {
+		systemTags = strings.Split(az.SystemTags, consts.TagsDelimiter)
+		for i := 0; i < len(systemTags); i++ {
+			systemTags[i] = strings.TrimSpace(systemTags[i])
+		}
+
+		for _, systemTag := range systemTags {
+			systemTagsMap[systemTag] = to.StringPtr("")
+		}
+	}
+
+	// if the systemTags is not set, just add/update new currentTagsOnResource and not delete old currentTagsOnResource
+	for k, v := range newTags {
+		found, key := findKeyInMapCaseInsensitive(currentTagsOnResource, k)
+
+		if !found {
+			currentTagsOnResource[k] = v
+			changed = true
+		} else if !strings.EqualFold(to.String(v), to.String(currentTagsOnResource[key])) {
+			currentTagsOnResource[key] = v
+			changed = true
+		}
+	}
+
+	// if the systemTags is set, delete the old currentTagsOnResource
+	if len(systemTagsMap) > 0 {
+		for k := range currentTagsOnResource {
+			if _, ok := newTags[k]; !ok {
+				if found, _ := findKeyInMapCaseInsensitive(systemTagsMap, k); !found {
+					delete(currentTagsOnResource, k)
+					changed = true
+				}
+			}
+		}
+	}
+
+	return currentTagsOnResource, changed
+}
+
+func (az *Cloud) getVMSetNamesSharingPrimarySLB() sets.String {
+	vmSetNames := make([]string, 0)
+	if az.NodePoolsWithoutDedicatedSLB != "" {
+		vmSetNames = strings.Split(az.Config.NodePoolsWithoutDedicatedSLB, consts.VMSetNamesSharingPrimarySLBDelimiter)
+		for i := 0; i < len(vmSetNames); i++ {
+			vmSetNames[i] = strings.ToLower(strings.TrimSpace(vmSetNames[i]))
+		}
+	}
+
+	return sets.NewString(vmSetNames...)
+}
+
+func getExtendedLocationTypeFromString(extendedLocationType string) network.ExtendedLocationTypes {
+	extendedLocationType = strings.ToLower(extendedLocationType)
+	if val, ok := strToExtendedLocationType[extendedLocationType]; ok {
+		return val
+	}
+	return network.ExtendedLocationTypesEdgeZone
+}
+
+func getServiceAdditionalPublicIPs(service *v1.Service) ([]string, error) {
+	if service == nil {
+		return nil, nil
+	}
+
+	result := []string{}
+	if val, ok := service.Annotations[consts.ServiceAnnotationAdditionalPublicIPs]; ok {
+		pips := strings.Split(strings.TrimSpace(val), ",")
+		for _, pip := range pips {
+			ip := strings.TrimSpace(pip)
+			if ip == "" {
+				continue // skip empty string
+			}
+
+			if net.ParseIP(ip) == nil {
+				return nil, fmt.Errorf("%s is not a valid IP address", ip)
+			}
+
+			result = append(result, ip)
+		}
+	}
+
+	return result, nil
+}
+
+func getNodePrivateIPAddress(service *v1.Service, node *v1.Node) string {
+	isIPV6SVC := utilnet.IsIPv6String(service.Spec.ClusterIP)
+	for _, nodeAddress := range node.Status.Addresses {
+		if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeInternalIP)) &&
+			utilnet.IsIPv6String(nodeAddress.Address) == isIPV6SVC {
+			klog.V(4).Infof("getNodePrivateIPAddress: node %s, ip %s", node.Name, nodeAddress.Address)
+			return nodeAddress.Address
+		}
+	}
+
+	klog.Warningf("getNodePrivateIPAddress: empty ip found for node %s", node.Name)
+	return ""
+}
+
+func getNodePrivateIPAddresses(node *v1.Node) []string {
+	addresses := make([]string, 0)
+	for _, nodeAddress := range node.Status.Addresses {
+		if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeInternalIP)) {
+			addresses = append(addresses, nodeAddress.Address)
+		}
+	}
+
+	return addresses
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
new file mode 100644
index 0000000000000000000000000000000000000000..cea1dc5f8a01b0d473f41182abb2fafffef6edf0
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/azure"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+	cloudprovider "k8s.io/cloud-provider"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+)
+
+//go:generate sh -c "mockgen -destination=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go -package=provider VMSet"
+
+// VMSet defines functions all vmsets (including scale set and availability
+// set) should be implemented.
+// Don't forget to run the following command to generate the mock client:
+// mockgen -destination=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go -package=provider VMSet
+type VMSet interface {
+	// GetInstanceIDByNodeName gets the cloud provider ID by node name.
+	// It must return ("", cloudprovider.InstanceNotFound) if the instance does
+	// not exist or is no longer running.
+	GetInstanceIDByNodeName(name string) (string, error)
+	// GetInstanceTypeByNodeName gets the instance type by node name.
+	GetInstanceTypeByNodeName(name string) (string, error)
+	// GetIPByNodeName gets machine private IP and public IP by node name.
+	GetIPByNodeName(name string) (string, string, error)
+	// GetPrimaryInterface gets machine primary network interface by node name.
+	GetPrimaryInterface(nodeName string) (network.Interface, error)
+	// GetNodeNameByProviderID gets the node name by provider ID.
+	GetNodeNameByProviderID(providerID string) (types.NodeName, error)
+
+	// GetZoneByNodeName gets cloudprovider.Zone by node name.
+	GetZoneByNodeName(name string) (cloudprovider.Zone, error)
+
+	// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
+	// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
+	GetPrimaryVMSetName() string
+	// GetVMSetNames selects all possible availability sets or scale sets
+	// (depending vmType configured) for service load balancer, if the service has
+	// no loadbalancer mode annotation returns the primary VMSet. If service annotation
+	// for loadbalancer exists then return the eligible VMSet.
+	GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
+	// GetNodeVMSetName returns the availability set or vmss name by the node name.
+	// It will return empty string when using standalone vms.
+	GetNodeVMSetName(node *v1.Node) (string, error)
+	// EnsureHostsInPool ensures the given Node's primary IP configurations are
+	// participating in the specified LoadBalancer Backend Pool.
+	EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string) error
+	// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
+	// participating in the specified LoadBalancer Backend Pool.
+	EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
+	// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+	EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
+	//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS/VMAS
+	EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error
+
+	// AttachDisk attaches a disk to vm
+	AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error)
+	// DetachDisk detaches a disk from vm
+	DetachDisk(nodeName types.NodeName, diskMap map[string]string) error
+	// WaitForUpdateResult waits for the response of the update request
+	WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error
+
+	// GetDataDisks gets a list of data disks attached to the node.
+	GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error)
+
+	// UpdateVM updates a vm
+	UpdateVM(nodeName types.NodeName) error
+
+	// GetPowerStatusByNodeName returns the powerState for the specified node.
+	GetPowerStatusByNodeName(name string) (string, error)
+
+	// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
+	GetProvisioningStateByNodeName(name string) (string, error)
+
+	// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
+	GetPrivateIPsByNodeName(name string) ([]string, error)
+
+	// GetNodeNameByIPConfigurationID gets the nodeName and vmSetName by IP configuration ID.
+	GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error)
+
+	// GetNodeCIDRMasksByProviderID returns the node CIDR subnet mask by provider ID.
+	GetNodeCIDRMasksByProviderID(providerID string) (int, int, error)
+
+	// GetAgentPoolVMSetNames returns all vmSet names according to the nodes
+	GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error)
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a14b870ca5ad52448eff5634a265afb192dac05
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
@@ -0,0 +1,1794 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+	utilnet "k8s.io/utils/net"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/metrics"
+)
+
+var (
+	// ErrorNotVmssInstance indicates an instance is not belonging to any vmss.
+	ErrorNotVmssInstance = errors.New("not a vmss instance")
+
+	scaleSetNameRE         = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
+	resourceGroupRE        = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
+	vmssIPConfigurationRE  = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces(?:.*)`)
+	vmssPIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces/(.+)/ipConfigurations/(.+)/publicIPAddresses/(.+)`)
+	vmssVMProviderIDRE     = regexp.MustCompile(`azure:///subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(?:\d+)`)
+)
+
+// vmssMetaInfo contains the metadata for a VMSS.
+type vmssMetaInfo struct {
+	vmssName      string
+	resourceGroup string
+}
+
+// nodeIdentity identifies a node within a subscription.
+type nodeIdentity struct {
+	resourceGroup string
+	vmssName      string
+	nodeName      string
+}
+
+// ScaleSet implements VMSet interface for Azure scale set.
+type ScaleSet struct {
+	*Cloud
+
+	// availabilitySet is also required for scaleSet because some instances
+	// (e.g. control plane nodes) may not belong to any scale sets.
+	// this also allows for clusters with both VM and VMSS nodes.
+	availabilitySet VMSet
+
+	vmssCache                 *azcache.TimedCache
+	vmssVMCache               *sync.Map // [resourcegroup/vmssname]*azcache.TimedCache
+	availabilitySetNodesCache *azcache.TimedCache
+	// lockMap in cache refresh
+	lockMap *lockMap
+}
+
+// newScaleSet creates a new ScaleSet.
+func newScaleSet(az *Cloud) (VMSet, error) {
+	if az.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
+		az.Config.VmssVirtualMachinesCacheTTLInSeconds = consts.VMSSVirtualMachinesCacheTTLDefaultInSeconds
+	}
+
+	var err error
+	as, err := newAvailabilitySet(az)
+	if err != nil {
+		return nil, err
+	}
+	ss := &ScaleSet{
+		Cloud:           az,
+		availabilitySet: as,
+		vmssVMCache:     &sync.Map{},
+		lockMap:         newLockMap(),
+	}
+
+	if !ss.DisableAvailabilitySetNodes {
+		ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ss.vmssCache, err = ss.newVMSSCache()
+	if err != nil {
+		return nil, err
+	}
+
+	return ss, nil
+}
+
+func (ss *ScaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSet, error) {
+	getter := func(vmssName string) (*compute.VirtualMachineScaleSet, error) {
+		cached, err := ss.vmssCache.Get(consts.VMSSKey, crt)
+		if err != nil {
+			return nil, err
+		}
+
+		vmsses := cached.(*sync.Map)
+		if vmss, ok := vmsses.Load(vmssName); ok {
+			result := vmss.(*vmssEntry)
+			return result.vmss, nil
+		}
+
+		return nil, nil
+	}
+
+	vmss, err := getter(vmssName)
+	if err != nil {
+		return nil, err
+	}
+	if vmss != nil {
+		return vmss, nil
+	}
+
+	klog.V(2).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName)
+	_ = ss.vmssCache.Delete(consts.VMSSKey)
+	vmss, err = getter(vmssName)
+	if err != nil {
+		return nil, err
+	}
+
+	if vmss == nil {
+		return nil, cloudprovider.InstanceNotFound
+	}
+	return vmss, nil
+}
+
+// getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache.
+// Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity.
+func (ss *ScaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
+	cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
+	if err != nil {
+		return "", "", nil, err
+	}
+
+	getter := func(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) {
+		var found bool
+		cached, err := cache.Get(cacheKey, crt)
+		if err != nil {
+			return "", "", nil, found, err
+		}
+
+		virtualMachines := cached.(*sync.Map)
+		if vm, ok := virtualMachines.Load(nodeName); ok {
+			result := vm.(*vmssVirtualMachinesEntry)
+			found = true
+			return result.vmssName, result.instanceID, result.virtualMachine, found, nil
+		}
+
+		return "", "", nil, found, nil
+	}
+
+	_, err = getScaleSetVMInstanceID(node.nodeName)
+	if err != nil {
+		return "", "", nil, err
+	}
+
+	vmssName, instanceID, vm, found, err := getter(node.nodeName, crt)
+	if err != nil {
+		return "", "", nil, err
+	}
+
+	if !found {
+		// lock and try find nodeName from cache again, refresh cache if still not found
+		ss.lockMap.LockEntry(cacheKey)
+		defer ss.lockMap.UnlockEntry(cacheKey)
+		vmssName, instanceID, vm, found, err = getter(node.nodeName, crt)
+		if err == nil && found && vm != nil {
+			klog.V(2).Infof("found VMSS VM with nodeName %s after retry", node.nodeName)
+			return vmssName, instanceID, vm, nil
+		}
+
+		klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache(vmss: %s, rg: %s)", node.nodeName, node.vmssName, node.resourceGroup)
+		vmssName, instanceID, vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh)
+		if err != nil {
+			return "", "", nil, err
+		}
+	}
+
+	if found && vm != nil {
+		return vmssName, instanceID, vm, nil
+	}
+
+	if !found || vm == nil {
+		klog.Warningf("Unable to find node %s: %v", node.nodeName, cloudprovider.InstanceNotFound)
+		return "", "", nil, cloudprovider.InstanceNotFound
+	}
+	return vmssName, instanceID, vm, nil
+}
+
+// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
+// Returns cloudprovider.InstanceNotFound if nodeName does not belong to any scale set.
+func (ss *ScaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
+	node, err := ss.getNodeIdentityByNodeName(nodeName, crt)
+	if err != nil {
+		return "", "", nil, err
+	}
+
+	return ss.getVmssVMByNodeIdentity(node, crt)
+}
+
+// GetPowerStatusByNodeName returns the power state of the specified node.
+func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return "", err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetPowerStatusByNodeName(name)
+	}
+
+	_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return powerState, err
+	}
+
+	if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
+		statuses := *vm.InstanceView.Statuses
+		for _, status := range statuses {
+			state := to.String(status.Code)
+			if strings.HasPrefix(state, vmPowerStatePrefix) {
+				return strings.TrimPrefix(state, vmPowerStatePrefix), nil
+			}
+		}
+	}
+
+	// vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting.
+	klog.V(3).Infof("InstanceView for node %q is nil, assuming it's stopped", name)
+	return vmPowerStateStopped, nil
+}
+
+// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
+func (ss *ScaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return "", err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetProvisioningStateByNodeName(name)
+	}
+
+	_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return provisioningState, err
+	}
+
+	if vm.VirtualMachineScaleSetVMProperties == nil || vm.VirtualMachineScaleSetVMProperties.ProvisioningState == nil {
+		return provisioningState, nil
+	}
+
+	return to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), nil
+}
+
+// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
+// The node must belong to one of scale sets.
+func (ss *ScaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
+	cacheKey, cache, err := ss.getVMSSVMCache(resourceGroup, scaleSetName)
+	if err != nil {
+		return nil, err
+	}
+
+	getter := func(crt azcache.AzureCacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
+		cached, err := cache.Get(cacheKey, crt)
+		if err != nil {
+			return nil, false, err
+		}
+
+		virtualMachines := cached.(*sync.Map)
+		virtualMachines.Range(func(key, value interface{}) bool {
+			vmEntry := value.(*vmssVirtualMachinesEntry)
+			if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) &&
+				strings.EqualFold(vmEntry.vmssName, scaleSetName) &&
+				strings.EqualFold(vmEntry.instanceID, instanceID) {
+				vm = vmEntry.virtualMachine
+				found = true
+				return false
+			}
+
+			return true
+		})
+
+		return vm, found, nil
+	}
+
+	vm, found, err := getter(crt)
+	if err != nil {
+		return nil, err
+	}
+	if !found {
+		klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID)
+		vm, found, err = getter(azcache.CacheReadTypeForceRefresh)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if found && vm != nil {
+		return vm, nil
+	}
+	if found && vm == nil {
+		klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID)
+		vm, found, err = getter(azcache.CacheReadTypeDefault)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if !found || vm == nil {
+		return nil, cloudprovider.InstanceNotFound
+	}
+
+	return vm, nil
+}
+
+// GetInstanceIDByNodeName gets the cloud provider ID by node name.
+// It must return ("", cloudprovider.InstanceNotFound) if the instance does
+// not exist or is no longer running.
+func (ss *ScaleSet) GetInstanceIDByNodeName(name string) (string, error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return "", err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetInstanceIDByNodeName(name)
+	}
+
+	_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Unable to find node %s: %v", name, err)
+		return "", err
+	}
+
+	resourceID := *vm.ID
+	convertedResourceID, err := convertResourceGroupNameToLower(resourceID)
+	if err != nil {
+		klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err)
+		return "", err
+	}
+	return convertedResourceID, nil
+}
+
+// GetNodeNameByProviderID gets the node name by provider ID.
+// providerID example:
+// 	 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0
+// 	 2. vmss providerID:
+//		azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1
+//	    /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1
+func (ss *ScaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
+	// NodeName is not part of providerID for vmss instances.
+	scaleSetName, err := extractScaleSetNameByProviderID(providerID)
+	if err != nil {
+		klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is managed by availability set: %v", providerID, err)
+		return ss.availabilitySet.GetNodeNameByProviderID(providerID)
+	}
+
+	resourceGroup, err := extractResourceGroupByProviderID(providerID)
+	if err != nil {
+		return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
+	}
+
+	instanceID, err := getLastSegment(providerID, "/")
+	if err != nil {
+		klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is managed by availability set: %v", providerID, err)
+		return ss.availabilitySet.GetNodeNameByProviderID(providerID)
+	}
+
+	// instanceID contains scaleSetName (returned by disk.ManagedBy), e.g. k8s-agentpool-36841236-vmss_1
+	if strings.HasPrefix(strings.ToLower(instanceID), strings.ToLower(scaleSetName)) {
+		instanceID, err = getLastSegment(instanceID, "_")
+		if err != nil {
+			return "", err
+		}
+	}
+
+	vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Unable to find node by providerID %s: %v", providerID, err)
+		return "", err
+	}
+
+	if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
+		nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
+		return types.NodeName(nodeName), nil
+	}
+
+	return "", nil
+}
+
+// GetInstanceTypeByNodeName gets the instance type by node name.
+func (ss *ScaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return "", err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetInstanceTypeByNodeName(name)
+	}
+
+	_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return "", err
+	}
+
+	if vm.Sku != nil && vm.Sku.Name != nil {
+		return *vm.Sku.Name, nil
+	}
+
+	return "", nil
+}
+
+// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
+// with availability zone, then it returns fault domain.
+func (ss *ScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return cloudprovider.Zone{}, err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetZoneByNodeName(name)
+	}
+
+	_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return cloudprovider.Zone{}, err
+	}
+
+	var failureDomain string
+	if vm.Zones != nil && len(*vm.Zones) > 0 {
+		// Get availability zone for the node.
+		zones := *vm.Zones
+		zoneID, err := strconv.Atoi(zones[0])
+		if err != nil {
+			return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %w", zones, err)
+		}
+
+		failureDomain = ss.makeZone(to.String(vm.Location), zoneID)
+	} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
+		// Availability zone is not used for the node, falling back to fault domain.
+		failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
+	} else {
+		err = fmt.Errorf("failed to get zone info")
+		klog.Errorf("GetZoneByNodeName: got unexpected error %v", err)
+		_ = ss.deleteCacheForNode(name)
+		return cloudprovider.Zone{}, err
+	}
+
+	return cloudprovider.Zone{
+		FailureDomain: strings.ToLower(failureDomain),
+		Region:        strings.ToLower(to.String(vm.Location)),
+	}, nil
+}
+
+// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
+// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
+func (ss *ScaleSet) GetPrimaryVMSetName() string {
+	return ss.Config.PrimaryScaleSetName
+}
+
+// GetIPByNodeName gets machine private IP and public IP by node name.
+func (ss *ScaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
+	nic, err := ss.GetPrimaryInterface(nodeName)
+	if err != nil {
+		klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
+		return "", "", err
+	}
+
+	ipConfig, err := getPrimaryIPConfig(nic)
+	if err != nil {
+		klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
+		return "", "", err
+	}
+
+	internalIP := *ipConfig.PrivateIPAddress
+	publicIP := ""
+	if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
+		pipID := *ipConfig.PublicIPAddress.ID
+		matches := vmssPIPConfigurationRE.FindStringSubmatch(pipID)
+		if len(matches) == 7 {
+			resourceGroupName := matches[1]
+			virtualMachineScaleSetName := matches[2]
+			virtualMachineIndex := matches[3]
+			networkInterfaceName := matches[4]
+			IPConfigurationName := matches[5]
+			publicIPAddressName := matches[6]
+			pip, existsPip, err := ss.getVMSSPublicIPAddress(resourceGroupName, virtualMachineScaleSetName, virtualMachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName)
+			if err != nil {
+				klog.Errorf("ss.getVMSSPublicIPAddress() failed with error: %v", err)
+				return "", "", err
+			}
+			if existsPip && pip.IPAddress != nil {
+				publicIP = *pip.IPAddress
+			}
+		} else {
+			klog.Warningf("Failed to get VMSS Public IP with ID %s", pipID)
+		}
+	}
+
+	return internalIP, publicIP, nil
+}
+
+func (ss *ScaleSet) getVMSSPublicIPAddress(resourceGroupName string, virtualMachineScaleSetName string, virtualMachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string) (network.PublicIPAddress, bool, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	pip, err := ss.PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualMachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, "")
+	exists, rerr := checkResourceExistsFromError(err)
+	if rerr != nil {
+		return pip, false, rerr.Error()
+	}
+
+	if !exists {
+		klog.V(2).Infof("Public IP %q not found", publicIPAddressName)
+		return pip, false, nil
+	}
+
+	return pip, exists, nil
+}
+
+// returns a list of private ips assigned to node
+// TODO (khenidak): This should read all nics, not just the primary
+// allowing users to split ipv4/v6 on multiple nics
+func (ss *ScaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
+	ips := make([]string, 0)
+	nic, err := ss.GetPrimaryInterface(nodeName)
+	if err != nil {
+		klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
+		return ips, err
+	}
+
+	if nic.IPConfigurations == nil {
+		return ips, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
+	}
+
+	for _, ipConfig := range *(nic.IPConfigurations) {
+		if ipConfig.PrivateIPAddress != nil {
+			ips = append(ips, *(ipConfig.PrivateIPAddress))
+		}
+	}
+
+	return ips, nil
+}
+
+// This returns the full identifier of the primary NIC for the given VM.
+func (ss *ScaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
+	if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil {
+		return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name))
+	}
+
+	if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
+		return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
+	}
+
+	for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
+		if to.Bool(ref.Primary) {
+			return *ref.ID, nil
+		}
+	}
+
+	return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", to.String(machine.Name))
+}
+
+// getVmssMachineID returns the full identifier of a vmss virtual machine.
+func (az *Cloud) getVmssMachineID(subscriptionID, resourceGroup, scaleSetName, instanceID string) string {
+	return fmt.Sprintf(
+		consts.VmssMachineIDTemplate,
+		subscriptionID,
+		strings.ToLower(resourceGroup),
+		scaleSetName,
+		instanceID)
+}
+
+// machineName is composed of computerNamePrefix and 36-based instanceID.
+// And instanceID part if in fixed length of 6 characters.
+// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
+func getScaleSetVMInstanceID(machineName string) (string, error) {
+	nameLength := len(machineName)
+	if nameLength < 6 {
+		return "", ErrorNotVmssInstance
+	}
+
+	instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
+	if err != nil {
+		return "", ErrorNotVmssInstance
+	}
+
+	return fmt.Sprintf("%d", instanceID), nil
+}
+
+// extractScaleSetNameByProviderID extracts the scaleset name by vmss node's ProviderID.
+func extractScaleSetNameByProviderID(providerID string) (string, error) {
+	matches := scaleSetNameRE.FindStringSubmatch(providerID)
+	if len(matches) != 2 {
+		return "", ErrorNotVmssInstance
+	}
+
+	return matches[1], nil
+}
+
+// extractResourceGroupByProviderID extracts the resource group name by vmss node's ProviderID.
+func extractResourceGroupByProviderID(providerID string) (string, error) {
+	matches := resourceGroupRE.FindStringSubmatch(providerID)
+	if len(matches) != 2 {
+		return "", ErrorNotVmssInstance
+	}
+
+	return matches[1], nil
+}
+
+// listScaleSets lists all scale sets with orchestrationMode ScaleSetVM.
+func (ss *ScaleSet) listScaleSets(resourceGroup string) ([]string, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
+	if rerr != nil {
+		klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
+		return nil, rerr.Error()
+	}
+
+	ssNames := make([]string, 0)
+	for _, vmss := range allScaleSets {
+		name := *vmss.Name
+		if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 {
+			klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name)
+			continue
+		}
+
+		if vmss.VirtualMachineScaleSetProperties == nil || vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile == nil {
+			klog.V(3).Infof("VMSS %q orchestrationMode is VirtualMachine, skipping", name)
+			continue
+		}
+
+		ssNames = append(ssNames, name)
+	}
+
+	return ssNames, nil
+}
+
+// getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity.
+func (ss *ScaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) {
+	getter := func(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) {
+		node := &nodeIdentity{
+			nodeName: nodeName,
+		}
+
+		cached, err := ss.vmssCache.Get(consts.VMSSKey, crt)
+		if err != nil {
+			return nil, err
+		}
+
+		vmsses := cached.(*sync.Map)
+		vmsses.Range(func(key, value interface{}) bool {
+			v := value.(*vmssEntry)
+			if v.vmss.Name == nil {
+				return true
+			}
+
+			vmssPrefix := *v.vmss.Name
+			if v.vmss.VirtualMachineProfile != nil &&
+				v.vmss.VirtualMachineProfile.OsProfile != nil &&
+				v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil {
+				vmssPrefix = *v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix
+			}
+
+			if strings.EqualFold(vmssPrefix, nodeName[:len(nodeName)-6]) {
+				node.vmssName = *v.vmss.Name
+				node.resourceGroup = v.resourceGroup
+				return false
+			}
+
+			return true
+		})
+		return node, nil
+	}
+
+	if _, err := getScaleSetVMInstanceID(nodeName); err != nil {
+		return nil, err
+	}
+
+	node, err := getter(nodeName, crt)
+	if err != nil {
+		return nil, err
+	}
+	if node.vmssName != "" {
+		return node, nil
+	}
+
+	klog.V(2).Infof("Couldn't find VMSS for node %s, refreshing the cache", nodeName)
+	node, err = getter(nodeName, azcache.CacheReadTypeForceRefresh)
+	if err != nil {
+		return nil, err
+	}
+	if node.vmssName == "" {
+		klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound)
+		return nil, cloudprovider.InstanceNotFound
+	}
+	return node, nil
+}
+
+// listScaleSetVMs lists VMs belonging to the specified scale set.
+func (ss *ScaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) {
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+
+	allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView))
+	if rerr != nil {
+		klog.Errorf("VirtualMachineScaleSetVMsClient.List(%s, %s) failed: %v", resourceGroup, scaleSetName, rerr)
+		if rerr.IsNotFound() {
+			return nil, cloudprovider.InstanceNotFound
+		}
+		return nil, rerr.Error()
+	}
+
+	return allVMs, nil
+}
+
+// getAgentPoolScaleSets lists the virtual machines for the resource group and then builds
+// a list of scale sets that match the nodes available to k8s.
+func (ss *ScaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
+	agentPoolScaleSets := &[]string{}
+	for nx := range nodes {
+		if isControlPlaneNode(nodes[nx]) {
+			continue
+		}
+
+		nodeName := nodes[nx].Name
+		shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
+		if err != nil {
+			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
+			return nil, err
+		}
+		if shouldExcludeLoadBalancer {
+			continue
+		}
+
+		ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
+		if err != nil {
+			return nil, err
+		}
+
+		if ssName == "" {
+			klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
+			continue
+		}
+
+		*agentPoolScaleSets = append(*agentPoolScaleSets, ssName)
+	}
+
+	return agentPoolScaleSets, nil
+}
+
+// GetVMSetNames selects all possible scale sets for service load balancer. If the service has
+// no loadbalancer mode annotation returns the primary VMSet. If service annotation
+// for loadbalancer exists then return the eligible VMSet.
+func (ss *ScaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) {
+	hasMode, isAuto, serviceVMSetName := ss.getServiceLoadBalancerMode(service)
+	useSingleSLB := ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers
+	if !hasMode || useSingleSLB {
+		// no mode specified in service annotation or use single SLB mode
+		// default to PrimaryScaleSetName
+		scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
+		return scaleSetNames, nil
+	}
+
+	scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
+	if err != nil {
+		klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
+		return nil, err
+	}
+	if len(*scaleSetNames) == 0 {
+		klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
+		return nil, fmt.Errorf("no scale sets found for nodes, node count(%d)", len(nodes))
+	}
+
+	if !isAuto {
+		found := false
+		for asx := range *scaleSetNames {
+			if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetName) {
+				found = true
+				serviceVMSetName = (*scaleSetNames)[asx]
+				break
+			}
+		}
+		if !found {
+			klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetName)
+			return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetName)
+		}
+		return &[]string{serviceVMSetName}, nil
+	}
+
+	return scaleSetNames, nil
+}
+
+// extractResourceGroupByVMSSNicID extracts the resource group name by vmss nicID.
+func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
+	matches := vmssIPConfigurationRE.FindStringSubmatch(nicID)
+	if len(matches) != 4 {
+		return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
+	}
+
+	return matches[1], nil
+}
+
+// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
+func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
+	managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
+		return network.Interface{}, err
+	}
+	if managedByAS {
+		// vm is managed by availability set.
+		return ss.availabilitySet.GetPrimaryInterface(nodeName)
+	}
+
+	ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		// VM is availability set, but not cached yet in availabilitySetNodesCache.
+		if errors.Is(err, ErrorNotVmssInstance) {
+			return ss.availabilitySet.GetPrimaryInterface(nodeName)
+		}
+
+		klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
+		return network.Interface{}, err
+	}
+
+	primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm)
+	if err != nil {
+		klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
+		return network.Interface{}, err
+	}
+
+	nicName, err := getLastSegment(primaryInterfaceID, "/")
+	if err != nil {
+		klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
+		return network.Interface{}, err
+	}
+	resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID)
+	if err != nil {
+		return network.Interface{}, err
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	nic, rerr := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "")
+	if rerr != nil {
+		exists, realErr := checkResourceExistsFromError(rerr)
+		if realErr != nil {
+			klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, realErr)
+			return network.Interface{}, realErr.Error()
+		}
+
+		if !exists {
+			return network.Interface{}, cloudprovider.InstanceNotFound
+		}
+	}
+
+	// Fix interface's location, which is required when updating the interface.
+	// TODO: is this a bug of azure SDK?
+	if nic.Location == nil || *nic.Location == "" {
+		nic.Location = vm.Location
+	}
+
+	return nic, nil
+}
+
+// getPrimaryNetworkInterfaceConfiguration gets primary network interface configuration for scale set virtual machine.
+func (ss *ScaleSet) getPrimaryNetworkInterfaceConfiguration(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
+	if len(networkConfigurations) == 1 {
+		return &networkConfigurations[0], nil
+	}
+
+	for idx := range networkConfigurations {
+		networkConfig := &networkConfigurations[idx]
+		if networkConfig.Primary != nil && *networkConfig.Primary {
+			return networkConfig, nil
+		}
+	}
+
+	return nil, fmt.Errorf("failed to find a primary network configuration for the scale set VM %q", nodeName)
+}
+
+// getPrimaryNetworkInterfaceConfigurationForScaleSet gets primary network interface configuration for scale set.
+func (ss *ScaleSet) getPrimaryNetworkInterfaceConfigurationForScaleSet(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, vmssName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
+	if len(networkConfigurations) == 1 {
+		return &networkConfigurations[0], nil
+	}
+
+	for idx := range networkConfigurations {
+		networkConfig := &networkConfigurations[idx]
+		if networkConfig.Primary != nil && *networkConfig.Primary {
+			return networkConfig, nil
+		}
+	}
+
+	return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", vmssName)
+}
+
+func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScaleSetNetworkConfiguration) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
+	ipConfigurations := *config.IPConfigurations
+	if len(ipConfigurations) == 1 {
+		return &ipConfigurations[0], nil
+	}
+
+	for idx := range ipConfigurations {
+		ipConfig := &ipConfigurations[idx]
+		if ipConfig.Primary != nil && *ipConfig.Primary {
+			return ipConfig, nil
+		}
+	}
+
+	return nil, fmt.Errorf("failed to find a primary IP configuration")
+}
+
+func (ss *ScaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string, IPv6 bool) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
+	ipConfigurations := *config.IPConfigurations
+
+	var ipVersion compute.IPVersion
+	if IPv6 {
+		ipVersion = compute.IPv6
+	} else {
+		ipVersion = compute.IPv4
+	}
+	for idx := range ipConfigurations {
+		ipConfig := &ipConfigurations[idx]
+		if ipConfig.PrivateIPAddressVersion == ipVersion {
+			return ipConfig, nil
+		}
+	}
+
+	return nil, fmt.Errorf("failed to find a IPconfiguration(IPv6=%v) for the scale set VM %q", IPv6, nodeName)
+}
+
+// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
+// participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error).
+func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+	vmName := mapNodeNameToVMName(nodeName)
+	ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			klog.Infof("EnsureHostInPool: skipping node %s because it is not found", vmName)
+			return "", "", "", nil, nil
+		}
+
+		klog.Errorf("EnsureHostInPool: failed to get VMSS VM %s: %v", vmName, err)
+		return "", "", "", nil, err
+	}
+
+	klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, ssName, backendPoolID)
+
+	// Check scale set name:
+	// - For basic SKU load balancer, return nil if the node's scale set is mismatched with vmSetNameOfLB.
+	// - For single standard SKU load balancer, backend could belong to multiple VMSS, so we
+	//   don't check vmSet for it.
+	// - For multiple standard SKU load balancers, the behavior is similar to the basic load balancer
+	needCheck := false
+	if !ss.useStandardLoadBalancer() {
+		// need to check the vmSet name when using the basic LB
+		needCheck = true
+	} else if ss.EnableMultipleStandardLoadBalancers {
+		// need to check the vmSet name when using multiple standard LBs
+		needCheck = true
+	}
+	if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, ssName) {
+		klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB)
+		return "", "", "", nil, nil
+	}
+
+	// Find primary network interface configuration.
+	if vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil {
+		klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, probably because the vm's being deleted", vmName)
+		return "", "", "", nil, nil
+	}
+
+	networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations
+	primaryNetworkInterfaceConfiguration, err := ss.getPrimaryNetworkInterfaceConfiguration(networkInterfaceConfigurations, vmName)
+	if err != nil {
+		return "", "", "", nil, err
+	}
+
+	var primaryIPConfiguration *compute.VirtualMachineScaleSetIPConfiguration
+	ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+	// Find primary network interface configuration.
+	if !ss.Cloud.ipv6DualStackEnabled && !ipv6 {
+		// Find primary IP configuration.
+		primaryIPConfiguration, err = getPrimaryIPConfigFromVMSSNetworkConfig(primaryNetworkInterfaceConfiguration)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+	} else {
+		// For IPv6 or dualstack service, we need to pick the right IP configuration based on the cluster ip family
+		// IPv6 configuration is only supported as non-primary, so we need to fetch the ip configuration where the
+		// privateIPAddressVersion matches the clusterIP family
+		primaryIPConfiguration, err = ss.getConfigForScaleSetByIPFamily(primaryNetworkInterfaceConfiguration, vmName, ipv6)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+	}
+
+	// Update primary IP configuration's LoadBalancerBackendAddressPools.
+	foundPool := false
+	newBackendPools := []compute.SubResource{}
+	if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
+		newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
+	}
+	for _, existingPool := range newBackendPools {
+		if strings.EqualFold(backendPoolID, *existingPool.ID) {
+			foundPool = true
+			break
+		}
+	}
+
+	// The backendPoolID has already been found from existing LoadBalancerBackendAddressPools.
+	if foundPool {
+		return "", "", "", nil, nil
+	}
+
+	if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 {
+		// Although standard load balancer supports backends from multiple scale
+		// sets, the same network interface couldn't be added to more than one load balancer of
+		// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
+		// about this.
+		newBackendPoolsIDs := make([]string, 0, len(newBackendPools))
+		for _, pool := range newBackendPools {
+			if pool.ID != nil {
+				newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
+			}
+		}
+		isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
+		if err != nil {
+			return "", "", "", nil, err
+		}
+		if !isSameLB {
+			klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName)
+			return "", "", "", nil, nil
+		}
+	}
+
+	// Compose a new vmssVM with added backendPoolID.
+	newBackendPools = append(newBackendPools,
+		compute.SubResource{
+			ID: to.StringPtr(backendPoolID),
+		})
+	primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
+	newVM := &compute.VirtualMachineScaleSetVM{
+		Location: vm.Location,
+		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
+			HardwareProfile: vm.HardwareProfile,
+			NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
+				NetworkInterfaceConfigurations: &networkInterfaceConfigurations,
+			},
+		},
+	}
+
+	// Get the node resource group.
+	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
+	if err != nil {
+		return "", "", "", nil, err
+	}
+
+	return nodeResourceGroup, ssName, instanceID, newVM, nil
+}
+
+func getVmssAndResourceGroupNameByVMProviderID(providerID string) (string, string, error) {
+	matches := vmssVMProviderIDRE.FindStringSubmatch(providerID)
+	if len(matches) != 3 {
+		return "", "", ErrorNotVmssInstance
+	}
+	return matches[1], matches[2], nil
+}
+
+func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+	klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID)
+	vmssNamesMap := make(map[string]bool)
+
+	// the single standard load balancer supports multiple vmss in its backend while
+	// multiple standard load balancers and the basic load balancer doesn't
+	if ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers {
+		for _, node := range nodes {
+			if ss.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+				continue
+			}
+
+			shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
+			if err != nil {
+				klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
+				return err
+			}
+			if shouldExcludeLoadBalancer {
+				klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
+				continue
+			}
+
+			// in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
+			resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID)
+			if err != nil {
+				klog.V(4).Infof("ensureVMSSInPool: found VMAS node %s, will skip checking and continue", node.Name)
+				continue
+			}
+			// only vmsses in the resource group same as it's in azure config are included
+			if strings.EqualFold(resourceGroupName, ss.ResourceGroup) {
+				vmssNamesMap[vmssName] = true
+			}
+		}
+	} else {
+		vmssNamesMap[vmSetNameOfLB] = true
+	}
+
+	klog.V(2).Infof("ensureVMSSInPool begins to update VMSS %v with backendPoolID %s", vmssNamesMap, backendPoolID)
+	for vmssName := range vmssNamesMap {
+		vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
+		if err != nil {
+			return err
+		}
+
+		// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+		// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+		if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+			klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName)
+			continue
+		}
+
+		if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
+			klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration of vmss %s", vmssName)
+			continue
+		}
+		vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+		primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
+		if err != nil {
+			return err
+		}
+		var primaryIPConfig *compute.VirtualMachineScaleSetIPConfiguration
+		ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP)
+		// Find primary network interface configuration.
+		if !ss.Cloud.ipv6DualStackEnabled && !ipv6 {
+			// Find primary IP configuration.
+			primaryIPConfig, err = getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
+			if err != nil {
+				return err
+			}
+		} else {
+			primaryIPConfig, err = ss.getConfigForScaleSetByIPFamily(primaryNIC, "", ipv6)
+			if err != nil {
+				return err
+			}
+		}
+
+		loadBalancerBackendAddressPools := []compute.SubResource{}
+		if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+			loadBalancerBackendAddressPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+		}
+
+		var found bool
+		for _, loadBalancerBackendAddressPool := range loadBalancerBackendAddressPools {
+			if strings.EqualFold(*loadBalancerBackendAddressPool.ID, backendPoolID) {
+				found = true
+				break
+			}
+		}
+		if found {
+			continue
+		}
+
+		if ss.useStandardLoadBalancer() && len(loadBalancerBackendAddressPools) > 0 {
+			// Although standard load balancer supports backends from multiple scale
+			// sets, the same network interface couldn't be added to more than one load balancer of
+			// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
+			// about this.
+			newBackendPoolsIDs := make([]string, 0, len(loadBalancerBackendAddressPools))
+			for _, pool := range loadBalancerBackendAddressPools {
+				if pool.ID != nil {
+					newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID)
+				}
+			}
+			isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs)
+			if err != nil {
+				return err
+			}
+			if !isSameLB {
+				klog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmssName, oldLBName)
+				return nil
+			}
+		}
+
+		// Compose a new vmss with added backendPoolID.
+		loadBalancerBackendAddressPools = append(loadBalancerBackendAddressPools,
+			compute.SubResource{
+				ID: to.StringPtr(backendPoolID),
+			})
+		primaryIPConfig.LoadBalancerBackendAddressPools = &loadBalancerBackendAddressPools
+		newVMSS := compute.VirtualMachineScaleSet{
+			Location: vmss.Location,
+			VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
+				VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
+					NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
+						NetworkInterfaceConfigurations: &vmssNIC,
+					},
+				},
+			},
+		}
+
+		klog.V(2).Infof("ensureVMSSInPool begins to update vmss(%s) with new backendPoolID %s", vmssName, backendPoolID)
+		rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
+		if rerr != nil {
+			klog.Errorf("ensureVMSSInPool CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err)
+			return rerr.Error()
+		}
+	}
+	return nil
+}
+
+// EnsureHostsInPool ensures the given Node's primary IP configurations are
+// participating in the specified LoadBalancer Backend Pool.
+func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error {
+	mc := metrics.NewMetricContext("services", "vmss_ensure_hosts_in_pool", ss.ResourceGroup, ss.SubscriptionID, service.Name)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	hostUpdates := make([]func() error, 0, len(nodes))
+	nodeUpdates := make(map[vmssMetaInfo]map[string]compute.VirtualMachineScaleSetVM)
+	errors := make([]error, 0)
+	for _, node := range nodes {
+		localNodeName := node.Name
+
+		if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
+			klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
+			continue
+		}
+
+		shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+		if err != nil {
+			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+			return err
+		}
+		if shouldExcludeLoadBalancer {
+			klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
+			continue
+		}
+
+		// Check whether the node is VMAS virtual machine.
+		managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, azcache.CacheReadTypeDefault)
+		if err != nil {
+			klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
+			errors = append(errors, err)
+			continue
+		}
+
+		if managedByAS {
+			// VMAS nodes should also be added to the SLB backends.
+			if ss.useStandardLoadBalancer() {
+				hostUpdates = append(hostUpdates, func() error {
+					_, _, _, _, err := ss.availabilitySet.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetNameOfLB)
+					return err
+				})
+				continue
+			}
+
+			klog.V(3).Infof("EnsureHostsInPool skips node %s because VMAS nodes couldn't be added to basic LB with VMSS backends", localNodeName)
+			continue
+		}
+
+		nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetNameOfLB)
+		if err != nil {
+			klog.Errorf("EnsureHostInPool(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
+			errors = append(errors, err)
+			continue
+		}
+
+		// No need to update if nodeVMSSVM is nil.
+		if nodeVMSSVM == nil {
+			continue
+		}
+
+		nodeVMSSMetaInfo := vmssMetaInfo{vmssName: nodeVMSS, resourceGroup: nodeResourceGroup}
+		if v, ok := nodeUpdates[nodeVMSSMetaInfo]; ok {
+			v[nodeInstanceID] = *nodeVMSSVM
+		} else {
+			nodeUpdates[nodeVMSSMetaInfo] = map[string]compute.VirtualMachineScaleSetVM{
+				nodeInstanceID: *nodeVMSSVM,
+			}
+		}
+
+		// Invalidate the cache since the VMSS VM would be updated.
+		defer func() {
+			_ = ss.deleteCacheForNode(localNodeName)
+		}()
+	}
+
+	// Update VMs with best effort that have already been added to nodeUpdates.
+	for meta, update := range nodeUpdates {
+		// create new instance of meta and update for passing to anonymous function
+		meta := meta
+		update := update
+		hostUpdates = append(hostUpdates, func() error {
+			ctx, cancel := getContextWithCancel()
+			defer cancel()
+			klog.V(2).Infof("EnsureHostInPool begins to UpdateVMs for VMSS(%s, %s) with new backendPoolID %s", meta.resourceGroup, meta.vmssName, backendPoolID)
+			rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", ss.getPutVMSSVMBatchSize())
+			if rerr != nil {
+				klog.Errorf("EnsureHostInPool UpdateVMs for VMSS(%s, %s) failed with error %v", meta.resourceGroup, meta.vmssName, rerr.Error())
+				return rerr.Error()
+			}
+
+			return nil
+		})
+	}
+	errs := utilerrors.AggregateGoroutines(hostUpdates...)
+	if errs != nil {
+		return utilerrors.Flatten(errs)
+	}
+
+	// Fail if there are other errors.
+	if len(errors) > 0 {
+		return utilerrors.Flatten(utilerrors.NewAggregate(errors))
+	}
+
+	// Ensure the backendPoolID is also added on VMSS itself.
+	// Refer to issue kubernetes/kubernetes#80365 for detailed information
+	err := ss.ensureVMSSInPool(service, nodes, backendPoolID, vmSetNameOfLB)
+	if err != nil {
+		return err
+	}
+
+	isOperationSucceeded = true
+	return nil
+}
+
+// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted
+// from the specified node, which returns (resourceGroup, vmasName, instanceID, vmssVM, error).
+func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
+	ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		if errors.Is(err, cloudprovider.InstanceNotFound) {
+			klog.Infof("ensureBackendPoolDeletedFromNode: skipping node %s because it is not found", nodeName)
+			return "", "", "", nil, nil
+		}
+
+		return "", "", "", nil, err
+	}
+
+	// Find primary network interface configuration.
+	if vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil {
+		klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, probably because the vm's being deleted", nodeName)
+		return "", "", "", nil, nil
+	}
+	networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations
+	primaryNetworkInterfaceConfiguration, err := ss.getPrimaryNetworkInterfaceConfiguration(networkInterfaceConfigurations, nodeName)
+	if err != nil {
+		return "", "", "", nil, err
+	}
+
+	// Find primary IP configuration.
+	primaryIPConfiguration, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNetworkInterfaceConfiguration)
+	if err != nil {
+		return "", "", "", nil, err
+	}
+	if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 {
+		return "", "", "", nil, nil
+	}
+
+	// Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration.
+	existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
+	newBackendPools := []compute.SubResource{}
+	foundPool := false
+	for i := len(existingBackendPools) - 1; i >= 0; i-- {
+		curPool := existingBackendPools[i]
+		if strings.EqualFold(backendPoolID, *curPool.ID) {
+			klog.V(10).Infof("ensureBackendPoolDeletedFromNode gets unwanted backend pool %q for node %s", backendPoolID, nodeName)
+			foundPool = true
+			newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
+		}
+	}
+
+	// Pool not found, assume it has been already removed.
+	if !foundPool {
+		return "", "", "", nil, nil
+	}
+
+	// Compose a new vmssVM with added backendPoolID.
+	primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
+	newVM := &compute.VirtualMachineScaleSetVM{
+		Location: vm.Location,
+		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
+			HardwareProfile: vm.HardwareProfile,
+			NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
+				NetworkInterfaceConfigurations: &networkInterfaceConfigurations,
+			},
+		},
+	}
+
+	// Get the node resource group.
+	nodeResourceGroup, err := ss.GetNodeResourceGroup(nodeName)
+	if err != nil {
+		return "", "", "", nil, err
+	}
+
+	return nodeResourceGroup, ssName, instanceID, newVM, nil
+}
+
+// GetNodeNameByIPConfigurationID gets the node name and the VMSS name by IP configuration ID.
+func (ss *ScaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+	matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
+	if len(matches) != 4 {
+		klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
+		name, rg, err := ss.availabilitySet.GetNodeNameByIPConfigurationID(ipConfigurationID)
+		if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) {
+			klog.Errorf("Unable to find node by IPConfigurationID %s: %v", ipConfigurationID, err)
+			return "", "", ErrorNotVmssInstance
+		}
+		return name, rg, nil
+	}
+
+	resourceGroup := matches[1]
+	scaleSetName := matches[2]
+	instanceID := matches[3]
+	vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("Unable to find node by ipConfigurationID %s: %v", ipConfigurationID, err)
+		return "", "", err
+	}
+
+	if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
+		return strings.ToLower(*vm.OsProfile.ComputerName), scaleSetName, nil
+	}
+
+	return "", "", nil
+}
+
+func getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
+	matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
+	if len(matches) != 4 {
+		klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
+		return "", "", ErrorNotVmssInstance
+	}
+
+	resourceGroup := matches[1]
+	scaleSetName := matches[2]
+	return scaleSetName, resourceGroup, nil
+}
+
+func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backendPoolID, vmSetName string, ipConfigurationIDs []string) error {
+	vmssNamesMap := make(map[string]bool)
+
+	// the standard load balancer supports multiple vmss in its backend while the basic sku doesn't
+	if ss.useStandardLoadBalancer() {
+		for _, ipConfigurationID := range ipConfigurationIDs {
+			// in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
+			vmssName, resourceGroupName, err := getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID)
+			if err != nil {
+				klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: found VMAS ipcConfigurationID %s, will skip checking and continue", ipConfigurationID)
+				continue
+			}
+			// only vmsses in the resource group same as it's in azure config are included
+			if strings.EqualFold(resourceGroupName, ss.ResourceGroup) {
+				vmssNamesMap[vmssName] = true
+			}
+		}
+	} else {
+		vmssNamesMap[vmSetName] = true
+	}
+
+	return ss.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, backendPoolID)
+}
+
+// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
+func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
+	// Returns nil if backend address pools already deleted.
+	if backendAddressPools == nil {
+		return nil
+	}
+
+	mc := metrics.NewMetricContext("services", "vmss_ensure_backend_pool_deleted", ss.ResourceGroup, ss.SubscriptionID, service.Name)
+	isOperationSucceeded := false
+	defer func() {
+		mc.ObserveOperationWithResult(isOperationSucceeded)
+	}()
+
+	ipConfigurationIDs := []string{}
+	for _, backendPool := range *backendAddressPools {
+		if strings.EqualFold(*backendPool.ID, backendPoolID) && backendPool.BackendIPConfigurations != nil {
+			for _, ipConf := range *backendPool.BackendIPConfigurations {
+				if ipConf.ID == nil {
+					continue
+				}
+
+				ipConfigurationIDs = append(ipConfigurationIDs, *ipConf.ID)
+			}
+		}
+	}
+
+	hostUpdates := make([]func() error, 0, len(ipConfigurationIDs))
+	nodeUpdates := make(map[vmssMetaInfo]map[string]compute.VirtualMachineScaleSetVM)
+	allErrs := make([]error, 0)
+	for i := range ipConfigurationIDs {
+		ipConfigurationID := ipConfigurationIDs[i]
+
+		var scaleSetName string
+		var err error
+		if scaleSetName, err = extractScaleSetNameByProviderID(ipConfigurationID); err == nil {
+			// Only remove nodes belonging to specified vmSet to basic LB backends.
+			if !ss.useStandardLoadBalancer() && !strings.EqualFold(scaleSetName, vmSetName) {
+				continue
+			}
+		}
+
+		nodeName, _, err := ss.GetNodeNameByIPConfigurationID(ipConfigurationID)
+		if err != nil {
+			if errors.Is(err, ErrorNotVmssInstance) { // Do nothing for the VMAS nodes.
+				continue
+			}
+
+			if errors.Is(err, cloudprovider.InstanceNotFound) {
+				klog.Infof("EnsureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not found", getServiceName(service), ipConfigurationID)
+				continue
+			}
+
+			klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err)
+			allErrs = append(allErrs, err)
+			continue
+		}
+
+		nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.ensureBackendPoolDeletedFromNode(nodeName, backendPoolID)
+		if err != nil {
+			if !errors.Is(err, ErrorNotVmssInstance) { // Do nothing for the VMAS nodes.
+				klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err)
+				allErrs = append(allErrs, err)
+			}
+			continue
+		}
+
+		// No need to update if nodeVMSSVM is nil.
+		if nodeVMSSVM == nil {
+			continue
+		}
+
+		nodeVMSSMetaInfo := vmssMetaInfo{vmssName: nodeVMSS, resourceGroup: nodeResourceGroup}
+		if v, ok := nodeUpdates[nodeVMSSMetaInfo]; ok {
+			v[nodeInstanceID] = *nodeVMSSVM
+		} else {
+			nodeUpdates[nodeVMSSMetaInfo] = map[string]compute.VirtualMachineScaleSetVM{
+				nodeInstanceID: *nodeVMSSVM,
+			}
+		}
+
+		// Invalidate the cache since the VMSS VM would be updated.
+		defer func() {
+			_ = ss.deleteCacheForNode(nodeName)
+		}()
+	}
+
+	// Update VMs with best effort that have already been added to nodeUpdates.
+	for meta, update := range nodeUpdates {
+		// create new instance of meta and update for passing to anonymous function
+		meta := meta
+		update := update
+		hostUpdates = append(hostUpdates, func() error {
+			ctx, cancel := getContextWithCancel()
+			defer cancel()
+			klog.V(2).Infof("EnsureBackendPoolDeleted begins to UpdateVMs for VMSS(%s, %s) with backendPoolID %s", meta.resourceGroup, meta.vmssName, backendPoolID)
+			rerr := ss.VirtualMachineScaleSetVMsClient.UpdateVMs(ctx, meta.resourceGroup, meta.vmssName, update, "network_update", ss.getPutVMSSVMBatchSize())
+			if rerr != nil {
+				klog.Errorf("EnsureBackendPoolDeleted UpdateVMs for VMSS(%s, %s) failed with error %v", meta.resourceGroup, meta.vmssName, rerr.Error())
+				return rerr.Error()
+			}
+
+			return nil
+		})
+	}
+	errs := utilerrors.AggregateGoroutines(hostUpdates...)
+	if errs != nil {
+		return utilerrors.Flatten(errs)
+	}
+
+	// Fail if there are other errors.
+	if len(allErrs) > 0 {
+		return utilerrors.Flatten(utilerrors.NewAggregate(allErrs))
+	}
+
+	// Ensure the backendPoolID is also deleted on VMSS itself.
+	if deleteFromVMSet {
+		err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
+		if err != nil {
+			return err
+		}
+	}
+
+	isOperationSucceeded = true
+	return nil
+}
+
+// GetNodeCIDRMaskByProviderID returns the node CIDR subnet mask by provider ID.
+func (ss *ScaleSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) {
+	_, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(providerID)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	var ipv4Mask, ipv6Mask int
+	if v4, ok := vmss.Tags[consts.VMSetCIDRIPV4TagKey]; ok && v4 != nil {
+		ipv4Mask, err = strconv.Atoi(to.String(v4))
+		if err != nil {
+			klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv4 mask size %s: %v", to.String(v4), err)
+		}
+	}
+	if v6, ok := vmss.Tags[consts.VMSetCIDRIPV6TagKey]; ok && v6 != nil {
+		ipv6Mask, err = strconv.Atoi(to.String(v6))
+		if err != nil {
+			klog.Errorf("GetNodeCIDRMasksByProviderID: error when paring the value of the ipv6 mask size%s: %v", to.String(v6), err)
+		}
+	}
+
+	return ipv4Mask, ipv6Mask, nil
+}
+
+//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS
+func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]bool, backendPoolID string) error {
+	vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
+	errors := make([]error, 0, len(vmssNamesMap))
+	for vmssName := range vmssNamesMap {
+		vmssName := vmssName
+		vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
+		if err != nil {
+			klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get VMSS %s: %v", vmssName, err)
+			errors = append(errors, err)
+			continue
+		}
+
+		// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
+		// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
+		if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) {
+			klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName)
+			continue
+		}
+		if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
+			klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vmss %s", vmssName)
+			continue
+		}
+		vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
+		primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
+		if err != nil {
+			klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
+			errors = append(errors, err)
+			continue
+		}
+		primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
+		if err != nil {
+			klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
+			errors = append(errors, err)
+			continue
+		}
+		loadBalancerBackendAddressPools := []compute.SubResource{}
+		if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
+			loadBalancerBackendAddressPools = *primaryIPConfig.LoadBalancerBackendAddressPools
+		}
+
+		var found bool
+		var newBackendPools []compute.SubResource
+		for i := len(loadBalancerBackendAddressPools) - 1; i >= 0; i-- {
+			curPool := loadBalancerBackendAddressPools[i]
+			if strings.EqualFold(backendPoolID, *curPool.ID) {
+				klog.V(10).Infof("ensureBackendPoolDeletedFromVMSS gets unwanted backend pool %q for VMSS %s", backendPoolID, vmssName)
+				found = true
+				newBackendPools = append(loadBalancerBackendAddressPools[:i], loadBalancerBackendAddressPools[i+1:]...)
+			}
+		}
+		if !found {
+			continue
+		}
+
+		vmssUpdaters = append(vmssUpdaters, func() error {
+			// Compose a new vmss with added backendPoolID.
+			primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
+			newVMSS := compute.VirtualMachineScaleSet{
+				Location: vmss.Location,
+				VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
+					VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
+						NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
+							NetworkInterfaceConfigurations: &vmssNIC,
+						},
+					},
+				},
+			}
+
+			klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
+			rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
+			if rerr != nil {
+				klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
+				return rerr.Error()
+			}
+
+			return nil
+		})
+	}
+
+	errs := utilerrors.AggregateGoroutines(vmssUpdaters...)
+	if errs != nil {
+		return utilerrors.Flatten(errs)
+	}
+	// Fail if there are other errors.
+	if len(errors) > 0 {
+		return utilerrors.Flatten(utilerrors.NewAggregate(errors))
+	}
+
+	return nil
+}
+
+// GetAgentPoolVMSetNames returns all VMSS/VMAS names according to the nodes.
+// We need to include the VMAS here because some of the cluster provisioning tools
+// like capz allows mixed instance type.
+func (ss *ScaleSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
+	vmSetNames := make([]string, 0)
+	as := ss.availabilitySet.(*availabilitySet)
+
+	for _, node := range nodes {
+		var names *[]string
+		managedByAS, err := ss.isNodeManagedByAvailabilitySet(node.Name, azcache.CacheReadTypeDefault)
+		if err != nil {
+			return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to check if the node %s is managed by VMAS: %w", node.Name, err)
+		}
+		if managedByAS {
+			cached, err := ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, azcache.CacheReadTypeDefault)
+			if err != nil {
+				return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to get availabilitySetNodesCache")
+			}
+			vms := cached.(availabilitySetNodeEntry).vms
+			names, err = as.getAgentPoolAvailabilitySets(vms, []*v1.Node{node})
+			if err != nil {
+				return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to execute getAgentPoolAvailabilitySets: %w", err)
+			}
+			vmSetNames = append(vmSetNames, *names...)
+			continue
+		}
+
+		names, err = ss.getAgentPoolScaleSets([]*v1.Node{node})
+		if err != nil {
+			return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to execute getAgentPoolScaleSets: %w", err)
+		}
+		vmSetNames = append(vmSetNames, *names...)
+	}
+
+	return &vmSetNames, nil
+}
+
+func (ss *ScaleSet) GetNodeVMSetName(node *v1.Node) (string, error) {
+	providerID := node.Spec.ProviderID
+	_, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(providerID)
+	if err != nil {
+		klog.Warningf("ss.GetNodeVMSetName: the provider ID %s of node %s does not match the format of a VMSS instance, assuming it is managed by an availability set", providerID, node.Name)
+		return ss.availabilitySet.GetNodeVMSetName(node)
+	}
+
+	klog.V(4).Infof("ss.GetNodeVMSetName: found vmss name %s from node name %s", vmssName, node.Name)
+	return vmssName, nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..8666ef83a117bf46fd928ea37c7aadb65dfa8cf3
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go
@@ -0,0 +1,341 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+)
+
+type vmssVirtualMachinesEntry struct {
+	resourceGroup  string
+	vmssName       string
+	instanceID     string
+	virtualMachine *compute.VirtualMachineScaleSetVM
+	lastUpdate     time.Time
+}
+
+type vmssEntry struct {
+	vmss          *compute.VirtualMachineScaleSet
+	resourceGroup string
+	lastUpdate    time.Time
+}
+
+type availabilitySetNodeEntry struct {
+	vmNames   sets.String
+	nodeNames sets.String
+	vms       []compute.VirtualMachine
+}
+
+func (ss *ScaleSet) newVMSSCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		localCache := &sync.Map{} // [vmasName]*vmssEntry
+
+		allResourceGroups, err := ss.GetResourceGroups()
+		if err != nil {
+			return nil, err
+		}
+
+		for _, resourceGroup := range allResourceGroups.List() {
+			allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(context.Background(), resourceGroup)
+			if rerr != nil {
+				klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
+				return nil, rerr.Error()
+			}
+
+			for i := range allScaleSets {
+				scaleSet := allScaleSets[i]
+				if scaleSet.Name == nil || *scaleSet.Name == "" {
+					klog.Warning("failed to get the name of VMSS")
+					continue
+				}
+				localCache.Store(*scaleSet.Name, &vmssEntry{
+					vmss:          &scaleSet,
+					resourceGroup: resourceGroup,
+					lastUpdate:    time.Now().UTC(),
+				})
+			}
+		}
+
+		return localCache, nil
+	}
+
+	if ss.Config.VmssCacheTTLInSeconds == 0 {
+		ss.Config.VmssCacheTTLInSeconds = consts.VMSSCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(ss.Config.VmssCacheTTLInSeconds)*time.Second, getter)
+}
+
+func extractVmssVMName(name string) (string, string, error) {
+	split := strings.SplitAfter(name, consts.VMSSNameSeparator)
+	if len(split) < 2 {
+		klog.V(3).Infof("Failed to extract vmssVMName %q", name)
+		return "", "", ErrorNotVmssInstance
+	}
+
+	ssName := strings.Join(split[0:len(split)-1], "")
+	// removing the trailing `vmssNameSeparator` since we used SplitAfter
+	ssName = ssName[:len(ssName)-1]
+	instanceID := split[len(split)-1]
+	return ssName, instanceID, nil
+}
+
+// getVMSSVMCache returns an *azcache.TimedCache and cache key for a VMSS (creating that cache if new).
+func (ss *ScaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *azcache.TimedCache, error) {
+	cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName))
+	if entry, ok := ss.vmssVMCache.Load(cacheKey); ok {
+		cache := entry.(*azcache.TimedCache)
+		return cacheKey, cache, nil
+	}
+
+	cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey)
+	if err != nil {
+		return "", nil, err
+	}
+	ss.vmssVMCache.Store(cacheKey, cache)
+	return cacheKey, cache, nil
+}
+
+// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes.
+func (ss *ScaleSet) gcVMSSVMCache() error {
+	cached, err := ss.vmssCache.Get(consts.VMSSKey, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		return err
+	}
+
+	vmsses := cached.(*sync.Map)
+	removed := map[string]bool{}
+	ss.vmssVMCache.Range(func(key, value interface{}) bool {
+		cacheKey := key.(string)
+		vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:]
+		if _, ok := vmsses.Load(vlistIdx); !ok {
+			removed[cacheKey] = true
+		}
+		return true
+	})
+
+	for key := range removed {
+		ss.vmssVMCache.Delete(key)
+	}
+
+	return nil
+}
+
+// newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS.
+func (ss *ScaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
+	vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
+
+	getter := func(key string) (interface{}, error) {
+		localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
+
+		oldCache := make(map[string]vmssVirtualMachinesEntry)
+
+		if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok {
+			// get old cache before refreshing the cache
+			cache := vmssCache.(*azcache.TimedCache)
+			entry, exists, err := cache.Store.GetByKey(cacheKey)
+			if err != nil {
+				return nil, err
+			}
+			if exists {
+				cached := entry.(*azcache.AzureCacheEntry).Data
+				if cached != nil {
+					virtualMachines := cached.(*sync.Map)
+					virtualMachines.Range(func(key, value interface{}) bool {
+						oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry)
+						return true
+					})
+				}
+			}
+		}
+
+		vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName)
+		if err != nil {
+			return nil, err
+		}
+
+		for i := range vms {
+			vm := vms[i]
+			if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
+				klog.Warningf("failed to get computerName for vmssVM (%q)", vmssName)
+				continue
+			}
+
+			computerName := strings.ToLower(*vm.OsProfile.ComputerName)
+			if vm.NetworkProfile == nil || vm.NetworkProfile.NetworkInterfaces == nil {
+				klog.Warningf("skip caching vmssVM %s since its network profile hasn't initialized yet (probably still under creating)", computerName)
+				continue
+			}
+
+			vmssVMCacheEntry := &vmssVirtualMachinesEntry{
+				resourceGroup:  resourceGroupName,
+				vmssName:       vmssName,
+				instanceID:     to.String(vm.InstanceID),
+				virtualMachine: &vm,
+				lastUpdate:     time.Now().UTC(),
+			}
+			// set cache entry to nil when the VM is under deleting.
+			if vm.VirtualMachineScaleSetVMProperties != nil &&
+				strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) {
+				klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName)
+				vmssVMCacheEntry.virtualMachine = nil
+			}
+			localCache.Store(computerName, vmssVMCacheEntry)
+
+			delete(oldCache, computerName)
+		}
+
+		// add old missing cache data with nil entries to prevent aggressive
+		// ARM calls during cache invalidation
+		for name, vmEntry := range oldCache {
+			// if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache
+			// then it should not be added back to the cache
+			if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL {
+				klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
+				continue
+			}
+			lastUpdate := time.Now().UTC()
+			if vmEntry.virtualMachine == nil {
+				// if this is already a nil entry then keep the time the nil
+				// entry was first created, so we can cleanup unwanted entries
+				lastUpdate = vmEntry.lastUpdate
+			}
+
+			klog.V(5).Infof("adding old entries to new cache for %s", name)
+			localCache.Store(name, &vmssVirtualMachinesEntry{
+				resourceGroup:  vmEntry.resourceGroup,
+				vmssName:       vmEntry.vmssName,
+				instanceID:     vmEntry.instanceID,
+				virtualMachine: nil,
+				lastUpdate:     lastUpdate,
+			})
+		}
+
+		return localCache, nil
+	}
+
+	return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter)
+}
+
+func (ss *ScaleSet) deleteCacheForNode(nodeName string) error {
+	node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+		return err
+	}
+
+	cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
+	if err != nil {
+		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+		return err
+	}
+
+	vmcache, err := timedcache.Get(cacheKey, azcache.CacheReadTypeUnsafe)
+	if err != nil {
+		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
+		return err
+	}
+	virtualMachines := vmcache.(*sync.Map)
+	virtualMachines.Delete(nodeName)
+
+	if err := ss.gcVMSSVMCache(); err != nil {
+		klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err)
+	}
+
+	return nil
+}
+
+func (ss *ScaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		vmNames := sets.NewString()
+		resourceGroups, err := ss.GetResourceGroups()
+		if err != nil {
+			return nil, err
+		}
+
+		vmList := make([]compute.VirtualMachine, 0)
+		for _, resourceGroup := range resourceGroups.List() {
+			vms, err := ss.Cloud.ListVirtualMachines(resourceGroup)
+			if err != nil {
+				return nil, fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group %s: %w", resourceGroup, err)
+			}
+			for _, vm := range vms {
+				if vm.Name != nil {
+					vmNames.Insert(to.String(vm.Name))
+					vmList = append(vmList, vm)
+				}
+			}
+		}
+
+		// store all the node names in the cluster when the cache data was created.
+		nodeNames, err := ss.GetNodeNames()
+		if err != nil {
+			return nil, err
+		}
+
+		localCache := availabilitySetNodeEntry{
+			vmNames:   vmNames,
+			nodeNames: nodeNames,
+			vms:       vmList,
+		}
+
+		return localCache, nil
+	}
+
+	if ss.Config.AvailabilitySetNodesCacheTTLInSeconds == 0 {
+		ss.Config.AvailabilitySetNodesCacheTTLInSeconds = consts.AvailabilitySetNodesCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(ss.Config.AvailabilitySetNodesCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (ss *ScaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt azcache.AzureCacheReadType) (bool, error) {
+	// Assume all nodes are managed by VMSS when DisableAvailabilitySetNodes is enabled.
+	if ss.DisableAvailabilitySetNodes {
+		klog.V(2).Infof("Assuming node %q is managed by VMSS since DisableAvailabilitySetNodes is set to true", nodeName)
+		return false, nil
+	}
+
+	cached, err := ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, crt)
+	if err != nil {
+		return false, err
+	}
+
+	cachedNodes := cached.(availabilitySetNodeEntry).nodeNames
+	// if the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache.
+	if !cachedNodes.Has(nodeName) {
+		klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh, refreshing the cache", nodeName)
+		cached, err = ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, azcache.CacheReadTypeForceRefresh)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	cachedVMs := cached.(availabilitySetNodeEntry).vmNames
+	return cachedVMs.Has(nodeName), nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
new file mode 100644
index 0000000000000000000000000000000000000000..32b6fed7233d1dfcc852a7926cc20a20045be319
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
@@ -0,0 +1,360 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"fmt"
+	"net/http"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
+	"github.com/Azure/go-autorest/autorest/to"
+
+	"k8s.io/apimachinery/pkg/types"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+var (
+	vmCacheTTLDefaultInSeconds           = 60
+	loadBalancerCacheTTLDefaultInSeconds = 120
+	nsgCacheTTLDefaultInSeconds          = 120
+	routeTableCacheTTLDefaultInSeconds   = 120
+
+	azureNodeProviderIDRE    = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
+	azureResourceGroupNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/(?:.*)`)
+)
+
+// checkExistsFromError inspects an error and returns a true if err is nil,
+// false if error is an autorest.Error with StatusCode=404 and will return the
+// error back if error is another status code or another type of error.
+func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) {
+	if err == nil {
+		return true, nil
+	}
+
+	if err.HTTPStatusCode == http.StatusNotFound {
+		return false, nil
+	}
+
+	return false, err
+}
+
+/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
+/// The service side has throttling control that delays responses if there are multiple requests onto certain vm
+/// resource request in short period.
+func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
+	vmName := string(nodeName)
+	cachedVM, err := az.vmCache.Get(vmName, crt)
+	if err != nil {
+		return vm, err
+	}
+
+	if cachedVM == nil {
+		klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound)
+		return vm, cloudprovider.InstanceNotFound
+	}
+
+	return *(cachedVM.(*compute.VirtualMachine)), nil
+}
+
+func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable network.RouteTable, exists bool, err error) {
+	if len(az.RouteTableName) == 0 {
+		return routeTable, false, fmt.Errorf("Route table name is not configured")
+	}
+
+	cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
+	if err != nil {
+		return routeTable, false, err
+	}
+
+	if cachedRt == nil {
+		return routeTable, false, nil
+	}
+
+	return *(cachedRt.(*network.RouteTable)), true, nil
+}
+
+func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (network.PublicIPAddress, bool, error) {
+	resourceGroup := az.ResourceGroup
+	if pipResourceGroup != "" {
+		resourceGroup = pipResourceGroup
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	pip, err := az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
+	exists, rerr := checkResourceExistsFromError(err)
+	if rerr != nil {
+		return pip, false, rerr.Error()
+	}
+
+	if !exists {
+		klog.V(2).Infof("Public IP %q not found", pipName)
+		return pip, false, nil
+	}
+
+	return pip, exists, nil
+}
+
+func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (network.Subnet, bool, error) {
+	var rg string
+	if len(az.VnetResourceGroup) > 0 {
+		rg = az.VnetResourceGroup
+	} else {
+		rg = az.ResourceGroup
+	}
+
+	ctx, cancel := getContextWithCancel()
+	defer cancel()
+	subnet, err := az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
+	exists, rerr := checkResourceExistsFromError(err)
+	if rerr != nil {
+		return subnet, false, rerr.Error()
+	}
+
+	if !exists {
+		klog.V(2).Infof("Subnet %q not found", subnetName)
+		return subnet, false, nil
+	}
+
+	return subnet, exists, nil
+}
+
+func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb network.LoadBalancer, exists bool, err error) {
+	cachedLB, err := az.lbCache.Get(name, crt)
+	if err != nil {
+		return lb, false, err
+	}
+
+	if cachedLB == nil {
+		return lb, false, nil
+	}
+
+	return *(cachedLB.(*network.LoadBalancer)), true, nil
+}
+
+func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.SecurityGroup, error) {
+	nsg := network.SecurityGroup{}
+	if az.SecurityGroupName == "" {
+		return nsg, fmt.Errorf("securityGroupName is not configured")
+	}
+
+	securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt)
+	if err != nil {
+		return nsg, err
+	}
+
+	if securityGroup == nil {
+		return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
+	}
+
+	return *(securityGroup.(*network.SecurityGroup)), nil
+}
+
+func (az *Cloud) newVMCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
+		// request. If we first send an InstanceView request and then a non InstanceView request, the second
+		// request will still hit throttling. This is what happens now for cloud controller manager: In this
+		// case we do get instance view every time to fulfill the azure_zones requirement without hitting
+		// throttling.
+		// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
+		ctx, cancel := getContextWithCancel()
+		defer cancel()
+
+		resourceGroup, err := az.GetNodeResourceGroup(key)
+		if err != nil {
+			return nil, err
+		}
+
+		vm, verr := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
+		exists, rerr := checkResourceExistsFromError(verr)
+		if rerr != nil {
+			return nil, rerr.Error()
+		}
+
+		if !exists {
+			klog.V(2).Infof("Virtual machine %q not found", key)
+			return nil, nil
+		}
+
+		if vm.VirtualMachineProperties != nil &&
+			strings.EqualFold(to.String(vm.VirtualMachineProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) {
+			klog.V(2).Infof("Virtual machine %q is under deleting", key)
+			return nil, nil
+		}
+
+		return &vm, nil
+	}
+
+	if az.VMCacheTTLInSeconds == 0 {
+		az.VMCacheTTLInSeconds = vmCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(az.VMCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (az *Cloud) newLBCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		ctx, cancel := getContextWithCancel()
+		defer cancel()
+
+		lb, err := az.LoadBalancerClient.Get(ctx, az.getLoadBalancerResourceGroup(), key, "")
+		exists, rerr := checkResourceExistsFromError(err)
+		if rerr != nil {
+			return nil, rerr.Error()
+		}
+
+		if !exists {
+			klog.V(2).Infof("Load balancer %q not found", key)
+			return nil, nil
+		}
+
+		return &lb, nil
+	}
+
+	if az.LoadBalancerCacheTTLInSeconds == 0 {
+		az.LoadBalancerCacheTTLInSeconds = loadBalancerCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(az.LoadBalancerCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (az *Cloud) newNSGCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		ctx, cancel := getContextWithCancel()
+		defer cancel()
+		nsg, err := az.SecurityGroupsClient.Get(ctx, az.SecurityGroupResourceGroup, key, "")
+		exists, rerr := checkResourceExistsFromError(err)
+		if rerr != nil {
+			return nil, rerr.Error()
+		}
+
+		if !exists {
+			klog.V(2).Infof("Security group %q not found", key)
+			return nil, nil
+		}
+
+		return &nsg, nil
+	}
+
+	if az.NsgCacheTTLInSeconds == 0 {
+		az.NsgCacheTTLInSeconds = nsgCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(az.NsgCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (az *Cloud) newRouteTableCache() (*azcache.TimedCache, error) {
+	getter := func(key string) (interface{}, error) {
+		ctx, cancel := getContextWithCancel()
+		defer cancel()
+		rt, err := az.RouteTablesClient.Get(ctx, az.RouteTableResourceGroup, key, "")
+		exists, rerr := checkResourceExistsFromError(err)
+		if rerr != nil {
+			return nil, rerr.Error()
+		}
+
+		if !exists {
+			klog.V(2).Infof("Route table %q not found", key)
+			return nil, nil
+		}
+
+		return &rt, nil
+	}
+
+	if az.RouteTableCacheTTLInSeconds == 0 {
+		az.RouteTableCacheTTLInSeconds = routeTableCacheTTLDefaultInSeconds
+	}
+	return azcache.NewTimedcache(time.Duration(az.RouteTableCacheTTLInSeconds)*time.Second, getter)
+}
+
+func (az *Cloud) useStandardLoadBalancer() bool {
+	return strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuStandard)
+}
+
+func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
+	return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
+}
+
+func (az *Cloud) disableLoadBalancerOutboundSNAT() bool {
+	if !az.useStandardLoadBalancer() || az.DisableOutboundSNAT == nil {
+		return false
+	}
+
+	return *az.DisableOutboundSNAT
+}
+
+// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
+// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
+// backends. Azure routes and managed disks are also not supported for them.
+func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
+	unmanagedNodes, err := az.GetUnmanagedNodes()
+	if err != nil {
+		return false, err
+	}
+
+	return unmanagedNodes.Has(nodeName), nil
+}
+
+// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
+// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
+func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
+	return !azureNodeProviderIDRE.Match([]byte(providerID))
+}
+
+// convertResourceGroupNameToLower converts the resource group name in the resource ID to be lowered.
+func convertResourceGroupNameToLower(resourceID string) (string, error) {
+	matches := azureResourceGroupNameRE.FindStringSubmatch(resourceID)
+	if len(matches) != 2 {
+		return "", fmt.Errorf("%q isn't in Azure resource ID format %q", resourceID, azureResourceGroupNameRE.String())
+	}
+
+	resourceGroup := matches[1]
+	return strings.Replace(resourceID, resourceGroup, strings.ToLower(resourceGroup), 1), nil
+}
+
+// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools.
+// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same.
+// If not same, the lbName for existingBackendPools would also be returned.
+func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) {
+	matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID)
+	if len(matches) != 2 {
+		return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID)
+	}
+
+	newLBName := matches[1]
+	newLBNameTrimmed := strings.TrimSuffix(newLBName, consts.InternalLoadBalancerNameSuffix)
+	for _, backendPool := range existingBackendPools {
+		matches := backendPoolIDRE.FindStringSubmatch(backendPool)
+		if len(matches) != 2 {
+			return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool)
+		}
+
+		lbName := matches[1]
+		if !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) {
+			return false, lbName, nil
+		}
+	}
+
+	return true, "", nil
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go
new file mode 100644
index 0000000000000000000000000000000000000000..52e178c0135be2d4919e23fdb4e420add0b72878
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go
@@ -0,0 +1,219 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provider
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/wait"
+	cloudprovider "k8s.io/cloud-provider"
+	"k8s.io/klog/v2"
+
+	azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
+	"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+	"sigs.k8s.io/cloud-provider-azure/pkg/retry"
+)
+
+func (az *Cloud) refreshZones(refreshFunc func() error) {
+	ticker := time.NewTicker(consts.ZoneFetchingInterval)
+	defer ticker.Stop()
+
+	for range ticker.C {
+		_ = refreshFunc()
+	}
+}
+
+func (az *Cloud) syncRegionZonesMap() error {
+	klog.V(2).Infof("syncRegionZonesMap: starting to fetch all available zones for the subscription %s", az.SubscriptionID)
+	zones, rerr := az.ZoneClient.GetZones(context.Background(), az.SubscriptionID)
+	if rerr != nil {
+		klog.Warningf("syncRegionZonesMap: error when get zones: %s, will retry after %s", rerr.Error().Error(), consts.ZoneFetchingInterval.String())
+		return rerr.Error()
+	}
+	if len(zones) == 0 {
+		klog.Warning("syncRegionZonesMap: empty zone list")
+	}
+
+	az.updateRegionZonesMap(zones)
+
+	return nil
+}
+
+func (az *Cloud) updateRegionZonesMap(zones map[string][]string) {
+	az.refreshZonesLock.Lock()
+	defer az.refreshZonesLock.Unlock()
+
+	if az.regionZonesMap == nil {
+		az.regionZonesMap = make(map[string][]string)
+	}
+
+	for region, z := range zones {
+		az.regionZonesMap[region] = z
+	}
+}
+
+func (az *Cloud) getRegionZonesBackoff(region string) ([]string, error) {
+	if az.isStackCloud() {
+		// Azure Stack does not support zone at the moment
+		// https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-network-differences?view=azs-2102
+		klog.V(3).Infof("getRegionZonesMapWrapper: Azure Stack does not support Zones at the moment, skipping")
+		return az.regionZonesMap[region], nil
+	}
+
+	if len(az.regionZonesMap) != 0 {
+		az.refreshZonesLock.RLock()
+		defer az.refreshZonesLock.RUnlock()
+
+		return az.regionZonesMap[region], nil
+	}
+
+	klog.V(2).Infof("getRegionZonesMapWrapper: the region-zones map is not initialized successfully, retrying immediately")
+
+	var (
+		zones map[string][]string
+		rerr  *retry.Error
+	)
+	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (done bool, err error) {
+		zones, rerr = az.ZoneClient.GetZones(context.Background(), az.SubscriptionID)
+		if rerr != nil {
+			klog.Errorf("getRegionZonesMapWrapper: failed to fetch zones information: %v", rerr.Error())
+			return false, nil
+		}
+
+		return true, nil
+	})
+
+	if errors.Is(err, wait.ErrWaitTimeout) {
+		return []string{}, rerr.Error()
+	}
+
+	az.updateRegionZonesMap(zones)
+
+	if len(az.regionZonesMap) != 0 {
+		az.refreshZonesLock.RLock()
+		defer az.refreshZonesLock.RUnlock()
+
+		return az.regionZonesMap[region], nil
+	}
+
+	return []string{}, nil
+}
+
+// makeZone returns the zone value in format of <region>-<zone-id>.
+func (az *Cloud) makeZone(location string, zoneID int) string {
+	return fmt.Sprintf("%s-%d", strings.ToLower(location), zoneID)
+}
+
+// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
+func (az *Cloud) isAvailabilityZone(zone string) bool {
+	return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
+}
+
+// GetZoneID returns the ID of zone from node's zone label.
+func (az *Cloud) GetZoneID(zoneLabel string) string {
+	if !az.isAvailabilityZone(zoneLabel) {
+		return ""
+	}
+
+	return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
+}
+
+// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
+// If the node is not running with availability zones, then it will fall back to fault domain.
+func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
+	if az.UseInstanceMetadata {
+		metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
+		if err != nil {
+			return cloudprovider.Zone{}, err
+		}
+
+		if metadata.Compute == nil {
+			_ = az.Metadata.imsCache.Delete(consts.MetadataCacheKey)
+			return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
+		}
+
+		zone := ""
+		location := metadata.Compute.Location
+		if metadata.Compute.Zone != "" {
+			zoneID, err := strconv.Atoi(metadata.Compute.Zone)
+			if err != nil {
+				return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %w", metadata.Compute.Zone, err)
+			}
+			zone = az.makeZone(location, zoneID)
+		} else {
+			klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
+			zone = metadata.Compute.FaultDomain
+		}
+
+		return cloudprovider.Zone{
+			FailureDomain: strings.ToLower(zone),
+			Region:        strings.ToLower(location),
+		}, nil
+	}
+	// if UseInstanceMetadata is false, get Zone name by calling ARM
+	hostname, err := os.Hostname()
+	if err != nil {
+		return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
+	}
+	return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname))
+}
+
+// GetZoneByProviderID implements Zones.GetZoneByProviderID
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
+	if providerID == "" {
+		return cloudprovider.Zone{}, errNodeNotInitialized
+	}
+
+	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	if az.IsNodeUnmanagedByProviderID(providerID) {
+		klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
+		return cloudprovider.Zone{}, nil
+	}
+
+	nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
+	if err != nil {
+		return cloudprovider.Zone{}, err
+	}
+
+	return az.GetZoneByNodeName(ctx, nodeName)
+}
+
+// GetZoneByNodeName implements Zones.GetZoneByNodeName
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
+	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
+	unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
+	if err != nil {
+		return cloudprovider.Zone{}, err
+	}
+	if unmanaged {
+		klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
+		return cloudprovider.Zone{}, nil
+	}
+
+	return az.VMSet.GetZoneByNodeName(string(nodeName))
+}
diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb46d8c807f6ecdbd1647f2d02d2b1fdc4931523
--- /dev/null
+++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package provider is an implementation of CloudProvider Interface, LoadBalancer
+// and Instances for Azure.
+package provider // import "sigs.k8s.io/cloud-provider-azure/pkg/provider"