diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
index 0aee3cd64fe31d835fff6cf33c8095036e80327f..88f87636f5b359aa0ec711eab901468d67a30294 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go
@@ -50,41 +50,41 @@ func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *core.Po
 
 func addResourcesPatch(idx int) resource_admission.PatchRecord {
 	return resource_admission.PatchRecord{
-		"add",
-		fmt.Sprintf("/spec/containers/%d/resources", idx),
-		core.ResourceRequirements{},
+		Op:    "add",
+		Path:  fmt.Sprintf("/spec/containers/%d/resources", idx),
+		Value: core.ResourceRequirements{},
 	}
 }
 
 func addRequestsPatch(idx int) resource_admission.PatchRecord {
 	return resource_admission.PatchRecord{
-		"add",
-		fmt.Sprintf("/spec/containers/%d/resources/requests", idx),
-		core.ResourceList{},
+		Op:    "add",
+		Path:  fmt.Sprintf("/spec/containers/%d/resources/requests", idx),
+		Value: core.ResourceList{},
 	}
 }
 
 func addLimitsPatch(idx int) resource_admission.PatchRecord {
 	return resource_admission.PatchRecord{
-		"add",
-		fmt.Sprintf("/spec/containers/%d/resources/limits", idx),
-		core.ResourceList{},
+		Op:    "add",
+		Path:  fmt.Sprintf("/spec/containers/%d/resources/limits", idx),
+		Value: core.ResourceList{},
 	}
 }
 
 func addResourceRequestPatch(index int, res, amount string) resource_admission.PatchRecord {
 	return resource_admission.PatchRecord{
-		"add",
-		fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res),
-		resource.MustParse(amount),
+		Op:    "add",
+		Path:  fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res),
+		Value: resource.MustParse(amount),
 	}
 }
 
 func addResourceLimitPatch(index int, res, amount string) resource_admission.PatchRecord {
 	return resource_admission.PatchRecord{
-		"add",
-		fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res),
-		resource.MustParse(amount),
+		Op:    "add",
+		Path:  fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res),
+		Value: resource.MustParse(amount),
 	}
 }
 
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go
index f40291a7844263caafd6732aa6ad587cc2205741..cacadfa445234af5105c749e73ee26945be629b5 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go
@@ -27,8 +27,7 @@ import (
 )
 
 const (
-	cpu    = apiv1.ResourceCPU
-	memory = apiv1.ResourceMemory
+	cpu = apiv1.ResourceCPU
 )
 
 func TestValidateVPA(t *testing.T) {
diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
index ea9fc36f58859207a673bfbbd7184a0c75d846fd..c7bcedf7ecab703a030bc6e7b889889d4b059713 100644
--- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go
@@ -31,12 +31,21 @@ import (
 
 // TODO: Extract these constants to a common test module.
 var (
-	testPodID1       = model.PodID{"namespace-1", "pod-1"}
-	testContainerID1 = model.ContainerID{testPodID1, "container-1"}
-	testVpaID1       = model.VpaID{"namespace-1", "vpa-1"}
-	testLabels       = map[string]string{"label-1": "value-1"}
-	testSelectorStr  = "label-1 = value-1"
-	testRequest      = model.Resources{
+	testPodID1 = model.PodID{
+		Namespace: "namespace-1",
+		PodName:   "pod-1",
+	}
+	testContainerID1 = model.ContainerID{
+		PodID:         testPodID1,
+		ContainerName: "container-1",
+	}
+	testVpaID1 = model.VpaID{
+		Namespace: "namespace-1",
+		VpaName:   "vpa-1",
+	}
+	testLabels      = map[string]string{"label-1": "value-1"}
+	testSelectorStr = "label-1 = value-1"
+	testRequest     = model.Resources{
 		model.ResourceCPU:    model.CPUAmountFromCores(3.14),
 		model.ResourceMemory: model.MemoryAmountFromBytes(3.14e9),
 	}
@@ -65,7 +74,11 @@ func TestMergeContainerStateForCheckpointDropsRecentMemoryPeak(t *testing.T) {
 
 	timeNow := time.Unix(1, 0)
 	container.AddSample(&model.ContainerUsageSample{
-		timeNow, model.MemoryAmountFromBytes(1024 * 1024 * 1024), testRequest[model.ResourceMemory], model.ResourceMemory})
+		MeasureStart: timeNow,
+		Usage:        model.MemoryAmountFromBytes(1024 * 1024 * 1024),
+		Request:      testRequest[model.ResourceMemory],
+		Resource:     model.ResourceMemory,
+	})
 	vpa := addVpa(t, cluster, testVpaID1, testSelectorStr)
 
 	// Verify that the current peak is excluded from the aggregation.
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
index 6c21ba6cf401c8fcc6a2960f55298c376e5d6bc7..bfce561a3a8614aa4572a3c8fab66488c0758353 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go
@@ -400,7 +400,9 @@ func (feeder *clusterStateFeeder) LoadVPAs() {
 	for vpaID := range feeder.clusterState.Vpas {
 		if _, exists := vpaKeys[vpaID]; !exists {
 			klog.V(3).Infof("Deleting VPA %v", vpaID)
-			feeder.clusterState.DeleteVpa(vpaID)
+			if err := feeder.clusterState.DeleteVpa(vpaID); err != nil {
+				klog.Errorf("Deleting VPA %v failed: %v", vpaID, err)
+			}
 		}
 	}
 	feeder.clusterState.ObservedVpas = vpaCRDs
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
index 360cc2e7b0f324a55bc3af16289ba6557ffaee65..234815ae4b57333158ef465e23ebe2a8ba893778 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go
@@ -52,13 +52,11 @@ func parseLabelSelector(selector string) labels.Selector {
 }
 
 var (
-	unsupportedConditionNoLongerSupported = "Label selector is no longer supported, please migrate to targetRef"
-	unsupportedConditionTextFromFetcher   = "Cannot read targetRef. Reason: targetRef not defined"
-	unsupportedConditionNoExtraText       = "Cannot read targetRef"
-	unsupportedConditionBothDefined       = "Both targetRef and label selector defined. Please remove label selector"
-	unsupportedConditionNoTargetRef       = "Cannot read targetRef"
-	unsupportedConditionMudaMudaMuda      = "Error checking if target is a topmost well-known or scalable controller: muda muda muda"
-	unsupportedTargetRefHasParent         = "The targetRef controller has a parent but it should point to a topmost well-known or scalable controller"
+	unsupportedConditionTextFromFetcher = "Cannot read targetRef. Reason: targetRef not defined"
+	unsupportedConditionNoExtraText     = "Cannot read targetRef"
+	unsupportedConditionNoTargetRef     = "Cannot read targetRef"
+	unsupportedConditionMudaMudaMuda    = "Error checking if target is a topmost well-known or scalable controller: muda muda muda"
+	unsupportedTargetRefHasParent       = "The targetRef controller has a parent but it should point to a topmost well-known or scalable controller"
 )
 
 const (
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go
index 0ee7b8d38965da29d33fd151b03ae43058735580..b9241945631656d143ed86401659734f2ae5d7b3 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go
@@ -125,7 +125,8 @@ func TestControllerCache_GetRefreshesDeleteAfter(t *testing.T) {
 	assert.Equal(t, startTime.Add(10*time.Second), c.cache[key].deleteAfter)
 
 	timeNow = startTime.Add(5 * time.Second)
-	c.Get(key.namespace, key.groupResource, key.name)
+	_, _, err := c.Get(key.namespace, key.groupResource, key.name)
+	assert.NoError(t, err)
 	assert.Equal(t, startTime.Add(15*time.Second), c.cache[key].deleteAfter)
 }
 
@@ -154,7 +155,8 @@ func TestControllerCache_GetChangesLifeTimeNotFreshness(t *testing.T) {
 	assertTimeBetween(t, firstRefreshAfter, startTime.Add(time.Second), startTime.Add(2*time.Second))
 
 	timeNow = startTime.Add(5 * time.Second)
-	c.Get(key.namespace, key.groupResource, key.name)
+	_, _, err := c.Get(key.namespace, key.groupResource, key.name)
+	assert.NoError(t, err)
 	cacheEntry = c.cache[key]
 	// scheduled to delete 10s after get (15s after insert)
 	assert.Equal(t, startTime.Add(15*time.Second), cacheEntry.deleteAfter)
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go
index 8aa18ac89f50a6116ebbe3d5a71d42fcf105c251..d257222aa6ac3d6ab1e00391bb5907bbe7cc5a43 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go
@@ -180,52 +180,24 @@ func getParentOfWellKnownController(informer cache.SharedIndexInformer, controll
 	if !exists {
 		return nil, fmt.Errorf("%s %s/%s does not exist", kind, namespace, name)
 	}
-	switch obj.(type) {
+	switch apiObj := obj.(type) {
 	case (*appsv1.DaemonSet):
-		apiObj, ok := obj.(*appsv1.DaemonSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*appsv1.Deployment):
-		apiObj, ok := obj.(*appsv1.Deployment)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*appsv1.StatefulSet):
-		apiObj, ok := obj.(*appsv1.StatefulSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*appsv1.ReplicaSet):
-		apiObj, ok := obj.(*appsv1.ReplicaSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*batchv1.Job):
-		apiObj, ok := obj.(*batchv1.Job)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*batchv1beta1.CronJob):
-		apiObj, ok := obj.(*batchv1beta1.CronJob)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	case (*corev1.ReplicationController):
-		apiObj, ok := obj.(*corev1.ReplicationController)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return getOwnerController(apiObj.OwnerReferences, namespace), nil
 	}
 
-	return nil, fmt.Errorf("Don't know how to read owner controller")
+	return nil, fmt.Errorf("don't know how to read owner controller")
 }
 
 func (f *controllerFetcher) getParentOfController(controllerKey ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) {
@@ -376,34 +348,3 @@ func (f *controllerFetcher) FindTopMostWellKnownOrScalable(key *ControllerKeyWit
 		key = owner
 	}
 }
-
-type identityControllerFetcher struct {
-}
-
-func (f *identityControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) {
-	return controller, nil
-}
-
-type constControllerFetcher struct {
-	ControllerKeyWithAPIVersion *ControllerKeyWithAPIVersion
-}
-
-func (f *constControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) {
-	return f.ControllerKeyWithAPIVersion, nil
-}
-
-type mockControllerFetcher struct {
-	expected *ControllerKeyWithAPIVersion
-	result   *ControllerKeyWithAPIVersion
-}
-
-func (f *mockControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) {
-	if controller == nil && f.expected == nil {
-		return f.result, nil
-	}
-	if controller == nil || *controller != *f.expected {
-		return nil, fmt.Errorf("Unexpected argument: %v", controller)
-	}
-
-	return f.result, nil
-}
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go
index 8c5e284b5ab5864c99e47d81bcadb8f8fb3d4eb0..705a02238957464138679178b7b75305f6152e19 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go
@@ -98,18 +98,18 @@ func simpleControllerFetcher() *controllerFetcher {
 	return &f
 }
 
-func addController(controller *controllerFetcher, obj runtime.Object) {
+func addController(t *testing.T, controller *controllerFetcher, obj runtime.Object) {
 	kind := wellKnownController(obj.GetObjectKind().GroupVersionKind().Kind)
 	_, ok := controller.informersMap[kind]
 	if ok {
-		controller.informersMap[kind].GetStore().Add(obj)
+		err := controller.informersMap[kind].GetStore().Add(obj)
+		assert.NoError(t, err)
 	}
 }
 
 func TestControllerFetcher(t *testing.T) {
 	type testCase struct {
 		name          string
-		apiVersion    string
 		key           *ControllerKeyWithAPIVersion
 		objects       []runtime.Object
 		expectedKey   *ControllerKeyWithAPIVersion
@@ -383,7 +383,7 @@ func TestControllerFetcher(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			f := simpleControllerFetcher()
 			for _, obj := range tc.objects {
-				addController(f, obj)
+				addController(t, f, obj)
 			}
 			topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(tc.key)
 			if tc.expectedKey == nil {
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go
index a7f4082567d624ecc145a3bead6b7aad867ae03b..087244329d069300354d6d549baef9aca189777c 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go
@@ -289,6 +289,10 @@ func (p *prometheusHistoryProvider) GetClusterHistory() (map[model.PodID]*PodHis
 			sort.Slice(samples, func(i, j int) bool { return samples[i].MeasureStart.Before(samples[j].MeasureStart) })
 		}
 	}
-	p.readLastLabels(res, p.config.PodLabelsMetricName)
+	err = p.readLastLabels(res, p.config.PodLabelsMetricName)
+	if err != nil {
+		return nil, fmt.Errorf("cannot read last labels: %v", err)
+	}
+
 	return res, nil
 }
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go
index 83ea4393cffa20c5acdc801939efe3781dc1deb0..51e20a7e9ae3e17b0d06836851b6488df39c6d25 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go
@@ -59,59 +59,59 @@ type mockPrometheusAPI struct {
 	mock.Mock
 }
 
-func (m mockPrometheusAPI) AlertManagers(ctx context.Context) (prometheusv1.AlertManagersResult, error) {
+func (m *mockPrometheusAPI) AlertManagers(ctx context.Context) (prometheusv1.AlertManagersResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Alerts(ctx context.Context) (prometheusv1.AlertsResult, error) {
+func (m *mockPrometheusAPI) Alerts(ctx context.Context) (prometheusv1.AlertsResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) CleanTombstones(ctx context.Context) error {
+func (m *mockPrometheusAPI) CleanTombstones(ctx context.Context) error {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Config(ctx context.Context) (prometheusv1.ConfigResult, error) {
+func (m *mockPrometheusAPI) Config(ctx context.Context) (prometheusv1.ConfigResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
+func (m *mockPrometheusAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Flags(ctx context.Context) (prometheusv1.FlagsResult, error) {
+func (m *mockPrometheusAPI) Flags(ctx context.Context) (prometheusv1.FlagsResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) LabelNames(ctx context.Context) ([]string, error) {
+func (m *mockPrometheusAPI) LabelNames(ctx context.Context) ([]string, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) LabelValues(ctx context.Context, label string) (prommodel.LabelValues, error) {
+func (m *mockPrometheusAPI) LabelValues(ctx context.Context, label string) (prommodel.LabelValues, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]prommodel.LabelSet, api.Warnings, error) {
+func (m *mockPrometheusAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]prommodel.LabelSet, api.Warnings, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Rules(ctx context.Context) (prometheusv1.RulesResult, error) {
+func (m *mockPrometheusAPI) Rules(ctx context.Context) (prometheusv1.RulesResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Snapshot(ctx context.Context, skipHead bool) (prometheusv1.SnapshotResult, error) {
+func (m *mockPrometheusAPI) Snapshot(ctx context.Context, skipHead bool) (prometheusv1.SnapshotResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Targets(ctx context.Context) (prometheusv1.TargetsResult, error) {
+func (m *mockPrometheusAPI) Targets(ctx context.Context) (prometheusv1.TargetsResult, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) TargetsMetadata(ctx context.Context, _, _, _ string) ([]prometheusv1.MetricMetadata, error) {
+func (m *mockPrometheusAPI) TargetsMetadata(ctx context.Context, _, _, _ string) ([]prometheusv1.MetricMetadata, error) {
 	panic("not implemented")
 }
 
-func (m mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time) (prommodel.Value, api.Warnings, error) {
+func (m *mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time) (prommodel.Value, api.Warnings, error) {
 	args := m.Called(ctx, query, ts)
 	var returnArg prommodel.Value
 	if args.Get(0) != nil {
@@ -120,7 +120,7 @@ func (m mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time
 	return returnArg, nil, args.Error(1)
 }
 
-func (m mockPrometheusAPI) QueryRange(ctx context.Context, query string, r prometheusv1.Range) (prommodel.Value, api.Warnings, error) {
+func (m *mockPrometheusAPI) QueryRange(ctx context.Context, query string, r prometheusv1.Range) (prommodel.Value, api.Warnings, error) {
 	args := m.Called(ctx, query, r)
 	var returnArg prommodel.Value
 	if args.Get(0) != nil {
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
index f40df0f1e395692c16e81c1df84e92b874b7395d..7e572540e2f40dd0f6aff21e53a0b2dd8049cda1 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go
@@ -138,11 +138,11 @@ func (*observer) OnAdd(obj interface{}) {}
 // passess it to the ObservedOomsChannel
 func (o *observer) OnUpdate(oldObj, newObj interface{}) {
 	oldPod, ok := oldObj.(*apiv1.Pod)
-	if oldPod == nil || !ok {
+	if !ok {
 		klog.Errorf("OOM observer received invalid oldObj: %v", oldObj)
 	}
 	newPod, ok := newObj.(*apiv1.Pod)
-	if newPod == nil || !ok {
+	if !ok {
 		klog.Errorf("OOM observer received invalid newObj: %v", newObj)
 	}
 
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
index d45ac6ba84ebb64defa1a25ddee76e651119d311..1319beb75d4442e6ffb9dbe9d0f9ae1851d1008a 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go
@@ -25,6 +25,7 @@ import (
 	"k8s.io/apimachinery/pkg/api/resource"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 	"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
 )
 
@@ -32,7 +33,7 @@ var scheme = runtime.NewScheme()
 var codecs = serializer.NewCodecFactory(scheme)
 
 func init() {
-	v1.AddToScheme(scheme)
+	utilruntime.Must(v1.AddToScheme(scheme))
 }
 
 const pod1Yaml = `
diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
index 6118579a7e9dbbf47dbbaa0c7b3248d21a442efd..ccb6aabe4b2e5de982814039b2c03f73e6dadf89 100644
--- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
+++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go
@@ -25,6 +25,7 @@ import (
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 	"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
 	v1lister "k8s.io/client-go/listers/core/v1"
 )
@@ -33,7 +34,7 @@ var scheme = runtime.NewScheme()
 var codecs = serializer.NewCodecFactory(scheme)
 
 func init() {
-	v1.AddToScheme(scheme)
+	utilruntime.Must(v1.AddToScheme(scheme))
 }
 
 const pod1Yaml = `
diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go
index 30a280d850af45db7f86ee118abdfb74bd517762..4e83e448ad7a2a71ae34bdaa0ed656d301651b63 100644
--- a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go
@@ -71,14 +71,22 @@ func TestConfidenceMultiplier(t *testing.T) {
 		model.ResourceCPU:    model.CPUAmountFromCores(3.14),
 		model.ResourceMemory: model.MemoryAmountFromBytes(3.14e9),
 	})
-	testedEstimator := &confidenceMultiplier{0.1, 2.0, baseEstimator}
+	testedEstimator := &confidenceMultiplier{
+		multiplier:    0.1,
+		exponent:      2.0,
+		baseEstimator: baseEstimator,
+	}
 
 	s := model.NewAggregateContainerState()
 	// Add 9 CPU samples at the frequency of 1/(2 mins).
 	timestamp := anyTime
 	for i := 1; i <= 9; i++ {
 		s.AddSample(&model.ContainerUsageSample{
-			timestamp, model.CPUAmountFromCores(1.0), testRequest[model.ResourceCPU], model.ResourceCPU})
+			MeasureStart: timestamp,
+			Usage:        model.CPUAmountFromCores(1.0),
+			Request:      testRequest[model.ResourceCPU],
+			Resource:     model.ResourceCPU,
+		})
 		timestamp = timestamp.Add(time.Minute * 2)
 	}
 
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
index 2b4383b88a79988fdb3c1600576c613028a21333..0067f807450aa75e10bd6ea141573ee2c8346681 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go
@@ -29,10 +29,9 @@ import (
 )
 
 var (
-	testPodID1       = PodID{"namespace-1", "pod-1"}
-	testPodID2       = PodID{"namespace-1", "pod-2"}
-	testContainerID1 = ContainerID{testPodID1, "container-1"}
-	testRequest      = Resources{
+	testPodID1  = PodID{"namespace-1", "pod-1"}
+	testPodID2  = PodID{"namespace-1", "pod-2"}
+	testRequest = Resources{
 		ResourceCPU:    CPUAmountFromCores(3.14),
 		ResourceMemory: MemoryAmountFromBytes(3.14e9),
 	}
@@ -141,7 +140,7 @@ func TestAggregateContainerStateSaveToCheckpoint(t *testing.T) {
 
 	assert.NoError(t, err)
 
-	assert.True(t, time.Now().Sub(checkpoint.LastUpdateTime.Time) < 10*time.Second)
+	assert.True(t, time.Since(checkpoint.LastUpdateTime.Time) < 10*time.Second)
 	assert.Equal(t, t1, checkpoint.FirstSampleStart.Time)
 	assert.Equal(t, t2, checkpoint.LastSampleStart.Time)
 	assert.Equal(t, 10, checkpoint.TotalSamplesCount)
diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
index bfe05b2c0511dfa3beba8c4e50ba2ecf4a63191c..043a6870ef8a92c114e55f83a35015e77b8a8e21 100644
--- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go
@@ -236,7 +236,7 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
 	vpa := addTestVpa(cluster)
 	pod := addTestPod(cluster)
-	addTestContainer(cluster)
+	addTestContainer(t, cluster)
 
 	assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest))
 	usageSample := makeTestUsageSample()
@@ -352,8 +352,9 @@ func addTestPod(cluster *ClusterState) *PodState {
 	return cluster.Pods[testPodID]
 }
 
-func addTestContainer(cluster *ClusterState) *ContainerState {
-	cluster.AddOrUpdateContainer(testContainerID, testRequest)
+func addTestContainer(t *testing.T, cluster *ClusterState) *ContainerState {
+	err := cluster.AddOrUpdateContainer(testContainerID, testRequest)
+	assert.NoError(t, err)
 	return cluster.GetContainer(testContainerID)
 }
 
@@ -364,7 +365,7 @@ func TestAddVpaThenAddPod(t *testing.T) {
 	vpa := addTestVpa(cluster)
 	assert.Empty(t, vpa.aggregateContainerStates)
 	addTestPod(cluster)
-	addTestContainer(cluster)
+	addTestContainer(t, cluster)
 	aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)
 	assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey)
 }
@@ -374,7 +375,7 @@ func TestAddVpaThenAddPod(t *testing.T) {
 func TestAddPodThenAddVpa(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
 	addTestPod(cluster)
-	addTestContainer(cluster)
+	addTestContainer(t, cluster)
 	vpa := addTestVpa(cluster)
 	aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)
 	assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey)
@@ -387,7 +388,7 @@ func TestChangePodLabels(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
 	vpa := addTestVpa(cluster)
 	addTestPod(cluster)
-	addTestContainer(cluster)
+	addTestContainer(t, cluster)
 	aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)
 	assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey)
 	// Update Pod labels to no longer match the VPA.
@@ -418,12 +419,11 @@ func TestUpdateAnnotations(t *testing.T) {
 // between the pod and the VPA are updated correctly each time.
 func TestUpdatePodSelector(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
-	vpa := addTestVpa(cluster)
 	addTestPod(cluster)
-	addTestContainer(cluster)
+	addTestContainer(t, cluster)
 
 	// Update the VPA selector such that it still matches the Pod.
-	vpa = addVpa(cluster, testVpaID, testAnnotations, "label-1 in (value-1,value-2)", testTargetRef)
+	vpa := addVpa(cluster, testVpaID, testAnnotations, "label-1 in (value-1,value-2)", testTargetRef)
 	assert.Contains(t, vpa.aggregateContainerStates, cluster.aggregateStateKeyForContainerID(testContainerID))
 
 	// Update the VPA selector to no longer match the Pod.
@@ -549,7 +549,7 @@ func TestAddOrUpdateVPAPolicies(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			cluster := NewClusterState(testGcPeriod)
 			addTestPod(cluster)
-			addTestContainer(cluster)
+			addTestContainer(t, cluster)
 			if tc.oldVpa != nil {
 				oldVpa := addVpaObject(cluster, testVpaID, tc.oldVpa, testSelectorStr)
 				if !assert.Contains(t, cluster.Vpas, testVpaID) {
@@ -596,8 +596,10 @@ func TestTwoPodsWithSameLabels(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
 	cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning)
 	cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning)
-	cluster.AddOrUpdateContainer(containerID1, testRequest)
-	cluster.AddOrUpdateContainer(containerID2, testRequest)
+	err := cluster.AddOrUpdateContainer(containerID1, testRequest)
+	assert.NoError(t, err)
+	err = cluster.AddOrUpdateContainer(containerID2, testRequest)
+	assert.NoError(t, err)
 
 	// Expect only one aggregation to be created.
 	assert.Equal(t, 1, len(cluster.aggregateStateMap))
@@ -613,8 +615,10 @@ func TestTwoPodsWithDifferentNamespaces(t *testing.T) {
 	cluster := NewClusterState(testGcPeriod)
 	cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning)
 	cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning)
-	cluster.AddOrUpdateContainer(containerID1, testRequest)
-	cluster.AddOrUpdateContainer(containerID2, testRequest)
+	err := cluster.AddOrUpdateContainer(containerID1, testRequest)
+	assert.NoError(t, err)
+	err = cluster.AddOrUpdateContainer(containerID2, testRequest)
+	assert.NoError(t, err)
 
 	// Expect two separate aggregations to be created.
 	assert.Equal(t, 2, len(cluster.aggregateStateMap))
diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
index df76b307e587009fbb3bb0e39aff8b2322fc0a63..5a4fbf3064e27dd1611506ccd6540cd1191a30a0 100644
--- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
+++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
@@ -151,7 +151,9 @@ func getCappedRecommendation(vpaID model.VpaID, resources logic.RecommendedPodRe
 			UncappedTarget: model.ResourcesAsResourceList(res.Target),
 		})
 	}
-	recommendation := &vpa_types.RecommendedPodResources{containerResources}
+	recommendation := &vpa_types.RecommendedPodResources{
+		ContainerRecommendations: containerResources,
+	}
 	cappedRecommendation, err := vpa_utils.ApplyVPAPolicy(recommendation, policy)
 	if err != nil {
 		klog.Errorf("Failed to apply policy for VPA %v/%v: %v", vpaID.Namespace, vpaID.VpaName, err)
@@ -166,7 +168,7 @@ func (r *recommender) MaintainCheckpoints(ctx context.Context, minCheckpointsPer
 		if err := r.checkpointWriter.StoreCheckpoints(ctx, now, minCheckpointsPerRun); err != nil {
 			klog.Warningf("Failed to store checkpoints. Reason: %+v", err)
 		}
-		if time.Now().Sub(r.lastCheckpointGC) > r.checkpointsGCInterval {
+		if time.Since(r.lastCheckpointGC) > r.checkpointsGCInterval {
 			r.lastCheckpointGC = now
 			r.clusterStateFeeder.GarbageCollectCheckpoints()
 		}
diff --git a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go
index 71f4b4ad2a9ec9a82d0c10da55eaafba639ee5ac..94cdb3dbd12f084c133cbd9f2e0ee319f75c9702 100644
--- a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go
@@ -163,7 +163,8 @@ func TestDecayingHistogramLoadFromCheckpoint(t *testing.T) {
 		halfLife:           time.Hour,
 		referenceTimestamp: time.Time{},
 	}
-	d.LoadFromCheckpoint(&checkpoint)
+	err := d.LoadFromCheckpoint(&checkpoint)
+	assert.NoError(t, err)
 
 	assert.False(t, d.histogram.IsEmpty())
 	assert.Equal(t, timestamp, d.referenceTimestamp)
diff --git a/vertical-pod-autoscaler/pkg/target/fetcher.go b/vertical-pod-autoscaler/pkg/target/fetcher.go
index dd8ac7b9c18018f875de10d9a9cddeb249dfe531..a26848d8827e0b49137518701a8f23a6e22b828a 100644
--- a/vertical-pod-autoscaler/pkg/target/fetcher.go
+++ b/vertical-pod-autoscaler/pkg/target/fetcher.go
@@ -154,51 +154,23 @@ func getLabelSelector(informer cache.SharedIndexInformer, kind, namespace, name
 	if !exists {
 		return nil, fmt.Errorf("%s %s/%s does not exist", kind, namespace, name)
 	}
-	switch obj.(type) {
+	switch apiObj := obj.(type) {
 	case (*appsv1.DaemonSet):
-		apiObj, ok := obj.(*appsv1.DaemonSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector)
 	case (*appsv1.Deployment):
-		apiObj, ok := obj.(*appsv1.Deployment)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector)
 	case (*appsv1.StatefulSet):
-		apiObj, ok := obj.(*appsv1.StatefulSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector)
 	case (*appsv1.ReplicaSet):
-		apiObj, ok := obj.(*appsv1.ReplicaSet)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector)
 	case (*batchv1.Job):
-		apiObj, ok := obj.(*batchv1.Job)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector)
 	case (*batchv1beta1.CronJob):
-		apiObj, ok := obj.(*batchv1beta1.CronJob)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(metav1.SetAsLabelSelector(apiObj.Spec.JobTemplate.Spec.Template.Labels))
 	case (*corev1.ReplicationController):
-		apiObj, ok := obj.(*corev1.ReplicationController)
-		if !ok {
-			return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name)
-		}
 		return metav1.LabelSelectorAsSelector(metav1.SetAsLabelSelector(apiObj.Spec.Selector))
 	}
-	return nil, fmt.Errorf("Don't know how to read label seletor")
+	return nil, fmt.Errorf("don't know how to read label seletor")
 }
 
 func (f *vpaTargetSelectorFetcher) getLabelSelectorFromResource(
diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go
index ea2342d6757e5d64a6ab72e72cc896c70b5698f1..6f738c25ce7117d31af16ba1e0cd19c7a09f585d 100644
--- a/vertical-pod-autoscaler/pkg/updater/main.go
+++ b/vertical-pod-autoscaler/pkg/updater/main.go
@@ -113,9 +113,9 @@ func main() {
 		klog.Fatalf("Failed to create updater: %v", err)
 	}
 	ticker := time.Tick(*updaterInterval)
+	ctx, cancel := context.WithTimeout(context.Background(), *updaterInterval)
+	defer cancel()
 	for range ticker {
-		ctx, cancel := context.WithTimeout(context.Background(), *updaterInterval)
-		defer cancel()
 		updater.RunOnce(ctx)
 		healthCheck.UpdateLastActivity()
 	}
diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go b/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go
index a3f01147d5fe7340e9aa0ce188b3ce3f1b0fb886..a043939b251fd44bd4ccddaab6fa46fba210294f 100644
--- a/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go
+++ b/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go
@@ -100,5 +100,5 @@ func NewAdmissionLatency() *AdmissionLatency {
 
 // Observe measures the execution time from when the AdmissionLatency was created
 func (t *AdmissionLatency) Observe(status AdmissionStatus, resource AdmissionResource) {
-	(*t.histo).WithLabelValues(string(status), string(resource)).Observe(time.Now().Sub(t.start).Seconds())
+	(*t.histo).WithLabelValues(string(status), string(resource)).Observe(time.Since(t.start).Seconds())
 }
diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go
index e72f5c9265d4dd1a82c04d0d1aee9becb909f8ab..78417dc13789a2aeec71b5be83a1a2101b873743 100644
--- a/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go
+++ b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go
@@ -21,6 +21,8 @@ import (
 	"net/http"
 	"sync"
 	"time"
+
+	"k8s.io/klog/v2"
 )
 
 // HealthCheck contains information about last activity time of the monitored component.
@@ -59,11 +61,13 @@ func (hc *HealthCheck) checkLastActivity() (bool, time.Duration) {
 func (hc *HealthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	timedOut, ago := hc.checkLastActivity()
 	if timedOut {
-		w.WriteHeader(500)
-		w.Write([]byte(fmt.Sprintf("Error: last activity more than %v ago", ago)))
+		http.Error(w, fmt.Sprintf("Error: last activity more than %v ago", ago), http.StatusInternalServerError)
 	} else {
 		w.WriteHeader(200)
-		w.Write([]byte("OK"))
+		_, err := w.Write([]byte("OK"))
+		if err != nil {
+			klog.Fatalf("Failed to write response message: %v", err)
+		}
 	}
 }
 
diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go
index 4c0718ebb3a0ddc011fd7ce0155b3ca59b82d866..fa8329025aac1f2ccf1e39e27efb59e085a75e09 100644
--- a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go
+++ b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go
@@ -77,7 +77,7 @@ func (t *ExecutionTimer) ObserveStep(step string) {
 
 // ObserveTotal measures the execution time from the creation of the ExecutionTimer
 func (t *ExecutionTimer) ObserveTotal() {
-	(*t.histo).WithLabelValues("total").Observe(time.Now().Sub(t.start).Seconds())
+	(*t.histo).WithLabelValues("total").Observe(time.Since(t.start).Seconds())
 }
 
 // CreateExecutionTimeMetric prepares a new histogram labeled with execution step
diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go
index 83dca2ce569d230c3a5064bda1b6090996906c91..c8d564585c013f70ac22481440eccba88403af37 100644
--- a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go
+++ b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go
@@ -98,7 +98,7 @@ func NewExecutionTimer() *metrics.ExecutionTimer {
 
 // ObserveRecommendationLatency observes the time it took for the first recommendation to appear
 func ObserveRecommendationLatency(created time.Time) {
-	recommendationLatency.Observe(time.Now().Sub(created).Seconds())
+	recommendationLatency.Observe(time.Since(created).Seconds())
 }
 
 // RecordAggregateContainerStatesCount records the number of containers being tracked by the recommender
diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go
index 0a85523753453001eafec24cc121f8642d9bdc75..e5f66e81a5649e0e3399173fe981300bb2ea5a0d 100644
--- a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go
+++ b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go
@@ -87,7 +87,9 @@ func (pb *podBuilderImpl) WithPhase(phase apiv1.PodPhase) PodBuilder {
 }
 
 func (pb *podBuilderImpl) Get() *apiv1.Pod {
-	startTime := metav1.Time{testTimestamp}
+	startTime := metav1.Time{
+		Time: testTimestamp,
+	}
 	pod := &apiv1.Pod{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: "default",
diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go
index 1da3a076aff359f4e02c2ab8d99b024d344f3639..f75a078d29658a52cbf81217344f73eb0181cc74 100644
--- a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go
+++ b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go
@@ -183,9 +183,7 @@ func (b *verticalPodAutoscalerBuilder) Get() *vpa_types.VerticalPodAutoscaler {
 	}}}
 
 	recommendation := b.recommendation.WithContainer(b.containerName).Get()
-	for _, rec := range b.appendedRecommendations {
-		recommendation.ContainerRecommendations = append(recommendation.ContainerRecommendations, rec)
-	}
+	recommendation.ContainerRecommendations = append(recommendation.ContainerRecommendations, b.appendedRecommendations...)
 
 	return &vpa_types.VerticalPodAutoscaler{
 		ObjectMeta: meta.ObjectMeta{
diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go
index 19db3ab2787727364e4b249106386a31ac58dd83..bd13dc8c7595d14cc8066b9d1dfe3819dcd64540 100644
--- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go
+++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go
@@ -40,8 +40,8 @@ var (
 )
 
 func init() {
-	flag.Set("alsologtostderr", "true")
-	flag.Set("v", "5")
+	flag.Set("alsologtostderr", "true") //nolint:errcheck
+	flag.Set("v", "5")                  //nolint:errcheck
 }
 
 func parseLabelSelector(selector string) labels.Selector {
diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go
index a791d137391276c4d9db34e6b0c45bee7271f4f0..4fbb4bc7d24719094bde8a276a38d60268e18185 100644
--- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go
+++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go
@@ -26,11 +26,6 @@ import (
 	"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
 )
 
-var (
-	requestsAndLimits vpa_types.ContainerControlledValues = vpa_types.ContainerControlledValuesRequestsAndLimits
-	requestsOnly      vpa_types.ContainerControlledValues = vpa_types.ContainerControlledValuesRequestsOnly
-)
-
 func TestRecommendationNotAvailable(t *testing.T) {
 	pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get()
 	podRecommendation := vpa_types.RecommendedPodResources{
diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go
index 7262dd6d2e7986a9c7fcaf8b6b1a613f06f98b90..c743d2fd4f4719fce1407fa6a0637905dfdaa806 100644
--- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go
+++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go
@@ -31,13 +31,6 @@ type ContainerResources struct {
 	Requests core.ResourceList
 }
 
-func newContainerResources() ContainerResources {
-	return ContainerResources{
-		Requests: core.ResourceList{},
-		Limits:   core.ResourceList{},
-	}
-}
-
 // GetProportionalLimit returns limit that will be in the same proportion to recommended request as original limit had to original request.
 func GetProportionalLimit(originalLimit, originalRequest, recommendation, defaultLimit core.ResourceList) (core.ResourceList, []string) {
 	annotations := []string{}