diff --git a/cluster-autoscaler/core/scale_down.go b/cluster-autoscaler/core/scale_down.go
index 57183841cb9abfd78790027c0a4f537f84ac4789..b116ba34844c06fdd7087dcc49cb90756937a928 100644
--- a/cluster-autoscaler/core/scale_down.go
+++ b/cluster-autoscaler/core/scale_down.go
@@ -758,14 +758,15 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
 		allGone = true
 		for _, pod := range pods {
 			podreturned, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
-			if err == nil {
+			if err == nil && (podreturned == nil || podreturned.Spec.NodeName == node.Name) {
 				glog.Errorf("Not deleted yet %v", podreturned)
 				allGone = false
 				break
 			}
-			if !kube_errors.IsNotFound(err) {
+			if err != nil && !kube_errors.IsNotFound(err) {
 				glog.Errorf("Failed to check pod %s/%s: %v", pod.Namespace, pod.Name, err)
 				allGone = false
+				break
 			}
 		}
 		if allGone {
diff --git a/cluster-autoscaler/core/scale_down_test.go b/cluster-autoscaler/core/scale_down_test.go
index 0acfbde9489542e2c53247a721029e0d814f1ea6..f06324ff8ded2c1b451a0720dd8c3cd19d25c40d 100644
--- a/cluster-autoscaler/core/scale_down_test.go
+++ b/cluster-autoscaler/core/scale_down_test.go
@@ -531,6 +531,49 @@ func TestDrainNode(t *testing.T) {
 	assert.Equal(t, p2.Name, deleted[1])
 }
 
+func TestDrainNodeWithRescheduled(t *testing.T) {
+	deletedPods := make(chan string, 10)
+	fakeClient := &fake.Clientset{}
+
+	p1 := BuildTestPod("p1", 100, 0)
+	p2 := BuildTestPod("p2", 300, 0)
+	p2Rescheduled := BuildTestPod("p2", 300, 0)
+	p2Rescheduled.Spec.NodeName = "n2"
+	n1 := BuildTestNode("n1", 1000, 1000)
+	SetNodeReadyState(n1, true, time.Time{})
+
+	fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
+		getAction := action.(core.GetAction)
+		if getAction == nil {
+			return false, nil, nil
+		}
+		if getAction.GetName() == "p2" {
+			return true, p2Rescheduled, nil
+		}
+		return true, nil, errors.NewNotFound(apiv1.Resource("pod"), "whatever")
+	})
+	fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
+		createAction := action.(core.CreateAction)
+		if createAction == nil {
+			return false, nil, nil
+		}
+		eviction := createAction.GetObject().(*policyv1.Eviction)
+		if eviction == nil {
+			return false, nil, nil
+		}
+		deletedPods <- eviction.Name
+		return true, nil, nil
+	})
+	err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second)
+	assert.NoError(t, err)
+	deleted := make([]string, 0)
+	deleted = append(deleted, getStringFromChan(deletedPods))
+	deleted = append(deleted, getStringFromChan(deletedPods))
+	sort.Strings(deleted)
+	assert.Equal(t, p1.Name, deleted[0])
+	assert.Equal(t, p2.Name, deleted[1])
+}
+
 func TestDrainNodeWithRetries(t *testing.T) {
 	deletedPods := make(chan string, 10)
 	// Simulate pdb of size 1, by making them goroutine succeed sequentially
diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go
index 9fcda2df211feb247b8194f3879ecec0e28ba9e0..e75ba5a2cdea98757394945ba1de394414a0a832 100644
--- a/cluster-autoscaler/core/static_autoscaler.go
+++ b/cluster-autoscaler/core/static_autoscaler.go
@@ -74,6 +74,8 @@ func NewStaticAutoscaler(opts AutoscalingOptions, predicateChecker *simulator.Pr
 func (a *StaticAutoscaler) CleanUp() {
 	// CA can die at any time. Removing taints that might have been left from the previous run.
 	if readyNodes, err := a.ReadyNodeLister().List(); err != nil {
+		glog.Errorf("Failed to list ready nodes, not cleaning up taints: %v", err)
+	} else {
 		cleanToBeDeleted(readyNodes, a.AutoscalingContext.ClientSet, a.Recorder)
 	}
 }
diff --git a/cluster-autoscaler/utils/deletetaint/delete.go b/cluster-autoscaler/utils/deletetaint/delete.go
index 422d75ba235a9c88c7a3afc2e0c888e05ff8089d..f17ccda0a7f6fc56124e5c389b356727beabdea6 100644
--- a/cluster-autoscaler/utils/deletetaint/delete.go
+++ b/cluster-autoscaler/utils/deletetaint/delete.go
@@ -57,7 +57,7 @@ func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
 func addToBeDeletedTaint(node *apiv1.Node) (bool, error) {
 	for _, taint := range node.Spec.Taints {
 		if taint.Key == ToBeDeletedTaint {
-			glog.V(2).Infof("ToBeDeletedTaint already present on on node %v", taint, node.Name)
+			glog.V(2).Infof("ToBeDeletedTaint already present on node %v, taint: %v", node.Name, taint)
 			return false, nil
 		}
 	}