From 0a960e6384b824751cd40e6bb46a0769c48775d4 Mon Sep 17 00:00:00 2001 From: Mike Fedosin <mfedosin@redhat.com> Date: Tue, 15 Feb 2022 14:53:07 +0100 Subject: [PATCH] Fix lint issues in vertical pod autoscaller Issues reported by: golangci-lint run --timeout 15m --- .../pod/patch/resource_updates_test.go | 30 ++++----- .../resource/vpa/handler_test.go | 3 +- .../checkpoint/checkpoint_writer_test.go | 27 +++++--- .../pkg/recommender/input/cluster_feeder.go | 4 +- .../recommender/input/cluster_feeder_test.go | 12 ++-- .../controller_cache_storage_test.go | 6 +- .../controller_fetcher/controller_fetcher.go | 63 +------------------ .../controller_fetcher_test.go | 8 +-- .../input/history/history_provider.go | 6 +- .../input/history/history_provider_test.go | 30 ++++----- .../pkg/recommender/input/oom/observer.go | 4 +- .../recommender/input/oom/observer_test.go | 3 +- .../input/spec/spec_client_test_util.go | 3 +- .../pkg/recommender/logic/estimator_test.go | 12 +++- .../model/aggregate_container_state_test.go | 9 ++- .../pkg/recommender/model/cluster_test.go | 32 +++++----- .../pkg/recommender/routines/recommender.go | 6 +- .../util/decaying_histogram_test.go | 3 +- vertical-pod-autoscaler/pkg/target/fetcher.go | 32 +--------- vertical-pod-autoscaler/pkg/updater/main.go | 4 +- .../pkg/utils/metrics/admission/admission.go | 2 +- .../pkg/utils/metrics/healthcheck.go | 10 ++- .../pkg/utils/metrics/metrics.go | 2 +- .../utils/metrics/recommender/recommender.go | 2 +- .../pkg/utils/test/test_pod.go | 4 +- .../pkg/utils/test/test_vpa.go | 4 +- .../pkg/utils/vpa/api_test.go | 4 +- .../pkg/utils/vpa/capping_test.go | 5 -- .../utils/vpa/limit_and_request_scaling.go | 7 --- 29 files changed, 138 insertions(+), 199 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go index 0aee3cd64f..88f87636f5 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go @@ -50,41 +50,41 @@ func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *core.Po func addResourcesPatch(idx int) resource_admission.PatchRecord { return resource_admission.PatchRecord{ - "add", - fmt.Sprintf("/spec/containers/%d/resources", idx), - core.ResourceRequirements{}, + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources", idx), + Value: core.ResourceRequirements{}, } } func addRequestsPatch(idx int) resource_admission.PatchRecord { return resource_admission.PatchRecord{ - "add", - fmt.Sprintf("/spec/containers/%d/resources/requests", idx), - core.ResourceList{}, + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/requests", idx), + Value: core.ResourceList{}, } } func addLimitsPatch(idx int) resource_admission.PatchRecord { return resource_admission.PatchRecord{ - "add", - fmt.Sprintf("/spec/containers/%d/resources/limits", idx), - core.ResourceList{}, + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/limits", idx), + Value: core.ResourceList{}, } } func addResourceRequestPatch(index int, res, amount string) resource_admission.PatchRecord { return resource_admission.PatchRecord{ - "add", - fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res), - resource.MustParse(amount), + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res), + Value: resource.MustParse(amount), } } func addResourceLimitPatch(index int, res, amount string) resource_admission.PatchRecord { return resource_admission.PatchRecord{ - "add", - fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res), - resource.MustParse(amount), + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res), + Value: resource.MustParse(amount), } } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go index f40291a784..cacadfa445 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler_test.go @@ -27,8 +27,7 @@ import ( ) const ( - cpu = apiv1.ResourceCPU - memory = apiv1.ResourceMemory + cpu = apiv1.ResourceCPU ) func TestValidateVPA(t *testing.T) { diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go index ea9fc36f58..c7bcedf7ec 100644 --- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go @@ -31,12 +31,21 @@ import ( // TODO: Extract these constants to a common test module. var ( - testPodID1 = model.PodID{"namespace-1", "pod-1"} - testContainerID1 = model.ContainerID{testPodID1, "container-1"} - testVpaID1 = model.VpaID{"namespace-1", "vpa-1"} - testLabels = map[string]string{"label-1": "value-1"} - testSelectorStr = "label-1 = value-1" - testRequest = model.Resources{ + testPodID1 = model.PodID{ + Namespace: "namespace-1", + PodName: "pod-1", + } + testContainerID1 = model.ContainerID{ + PodID: testPodID1, + ContainerName: "container-1", + } + testVpaID1 = model.VpaID{ + Namespace: "namespace-1", + VpaName: "vpa-1", + } + testLabels = map[string]string{"label-1": "value-1"} + testSelectorStr = "label-1 = value-1" + testRequest = model.Resources{ model.ResourceCPU: model.CPUAmountFromCores(3.14), model.ResourceMemory: model.MemoryAmountFromBytes(3.14e9), } @@ -65,7 +74,11 @@ func TestMergeContainerStateForCheckpointDropsRecentMemoryPeak(t *testing.T) { timeNow := time.Unix(1, 0) container.AddSample(&model.ContainerUsageSample{ - timeNow, model.MemoryAmountFromBytes(1024 * 1024 * 1024), testRequest[model.ResourceMemory], model.ResourceMemory}) + MeasureStart: timeNow, + Usage: model.MemoryAmountFromBytes(1024 * 1024 * 1024), + Request: testRequest[model.ResourceMemory], + Resource: model.ResourceMemory, + }) vpa := addVpa(t, cluster, testVpaID1, testSelectorStr) // Verify that the current peak is excluded from the aggregation. diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go index 6c21ba6cf4..bfce561a3a 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go @@ -400,7 +400,9 @@ func (feeder *clusterStateFeeder) LoadVPAs() { for vpaID := range feeder.clusterState.Vpas { if _, exists := vpaKeys[vpaID]; !exists { klog.V(3).Infof("Deleting VPA %v", vpaID) - feeder.clusterState.DeleteVpa(vpaID) + if err := feeder.clusterState.DeleteVpa(vpaID); err != nil { + klog.Errorf("Deleting VPA %v failed: %v", vpaID, err) + } } } feeder.clusterState.ObservedVpas = vpaCRDs diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go index 360cc2e7b0..234815ae4b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go @@ -52,13 +52,11 @@ func parseLabelSelector(selector string) labels.Selector { } var ( - unsupportedConditionNoLongerSupported = "Label selector is no longer supported, please migrate to targetRef" - unsupportedConditionTextFromFetcher = "Cannot read targetRef. Reason: targetRef not defined" - unsupportedConditionNoExtraText = "Cannot read targetRef" - unsupportedConditionBothDefined = "Both targetRef and label selector defined. Please remove label selector" - unsupportedConditionNoTargetRef = "Cannot read targetRef" - unsupportedConditionMudaMudaMuda = "Error checking if target is a topmost well-known or scalable controller: muda muda muda" - unsupportedTargetRefHasParent = "The targetRef controller has a parent but it should point to a topmost well-known or scalable controller" + unsupportedConditionTextFromFetcher = "Cannot read targetRef. Reason: targetRef not defined" + unsupportedConditionNoExtraText = "Cannot read targetRef" + unsupportedConditionNoTargetRef = "Cannot read targetRef" + unsupportedConditionMudaMudaMuda = "Error checking if target is a topmost well-known or scalable controller: muda muda muda" + unsupportedTargetRefHasParent = "The targetRef controller has a parent but it should point to a topmost well-known or scalable controller" ) const ( diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go index 0ee7b8d389..b924194563 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_cache_storage_test.go @@ -125,7 +125,8 @@ func TestControllerCache_GetRefreshesDeleteAfter(t *testing.T) { assert.Equal(t, startTime.Add(10*time.Second), c.cache[key].deleteAfter) timeNow = startTime.Add(5 * time.Second) - c.Get(key.namespace, key.groupResource, key.name) + _, _, err := c.Get(key.namespace, key.groupResource, key.name) + assert.NoError(t, err) assert.Equal(t, startTime.Add(15*time.Second), c.cache[key].deleteAfter) } @@ -154,7 +155,8 @@ func TestControllerCache_GetChangesLifeTimeNotFreshness(t *testing.T) { assertTimeBetween(t, firstRefreshAfter, startTime.Add(time.Second), startTime.Add(2*time.Second)) timeNow = startTime.Add(5 * time.Second) - c.Get(key.namespace, key.groupResource, key.name) + _, _, err := c.Get(key.namespace, key.groupResource, key.name) + assert.NoError(t, err) cacheEntry = c.cache[key] // scheduled to delete 10s after get (15s after insert) assert.Equal(t, startTime.Add(15*time.Second), cacheEntry.deleteAfter) diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go index 8aa18ac89f..d257222aa6 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher.go @@ -180,52 +180,24 @@ func getParentOfWellKnownController(informer cache.SharedIndexInformer, controll if !exists { return nil, fmt.Errorf("%s %s/%s does not exist", kind, namespace, name) } - switch obj.(type) { + switch apiObj := obj.(type) { case (*appsv1.DaemonSet): - apiObj, ok := obj.(*appsv1.DaemonSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*appsv1.Deployment): - apiObj, ok := obj.(*appsv1.Deployment) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*appsv1.StatefulSet): - apiObj, ok := obj.(*appsv1.StatefulSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*appsv1.ReplicaSet): - apiObj, ok := obj.(*appsv1.ReplicaSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*batchv1.Job): - apiObj, ok := obj.(*batchv1.Job) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*batchv1beta1.CronJob): - apiObj, ok := obj.(*batchv1beta1.CronJob) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil case (*corev1.ReplicationController): - apiObj, ok := obj.(*corev1.ReplicationController) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return getOwnerController(apiObj.OwnerReferences, namespace), nil } - return nil, fmt.Errorf("Don't know how to read owner controller") + return nil, fmt.Errorf("don't know how to read owner controller") } func (f *controllerFetcher) getParentOfController(controllerKey ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { @@ -376,34 +348,3 @@ func (f *controllerFetcher) FindTopMostWellKnownOrScalable(key *ControllerKeyWit key = owner } } - -type identityControllerFetcher struct { -} - -func (f *identityControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { - return controller, nil -} - -type constControllerFetcher struct { - ControllerKeyWithAPIVersion *ControllerKeyWithAPIVersion -} - -func (f *constControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { - return f.ControllerKeyWithAPIVersion, nil -} - -type mockControllerFetcher struct { - expected *ControllerKeyWithAPIVersion - result *ControllerKeyWithAPIVersion -} - -func (f *mockControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { - if controller == nil && f.expected == nil { - return f.result, nil - } - if controller == nil || *controller != *f.expected { - return nil, fmt.Errorf("Unexpected argument: %v", controller) - } - - return f.result, nil -} diff --git a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go index 8c5e284b5a..705a022389 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher/controller_fetcher_test.go @@ -98,18 +98,18 @@ func simpleControllerFetcher() *controllerFetcher { return &f } -func addController(controller *controllerFetcher, obj runtime.Object) { +func addController(t *testing.T, controller *controllerFetcher, obj runtime.Object) { kind := wellKnownController(obj.GetObjectKind().GroupVersionKind().Kind) _, ok := controller.informersMap[kind] if ok { - controller.informersMap[kind].GetStore().Add(obj) + err := controller.informersMap[kind].GetStore().Add(obj) + assert.NoError(t, err) } } func TestControllerFetcher(t *testing.T) { type testCase struct { name string - apiVersion string key *ControllerKeyWithAPIVersion objects []runtime.Object expectedKey *ControllerKeyWithAPIVersion @@ -383,7 +383,7 @@ func TestControllerFetcher(t *testing.T) { t.Run(tc.name, func(t *testing.T) { f := simpleControllerFetcher() for _, obj := range tc.objects { - addController(f, obj) + addController(t, f, obj) } topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(tc.key) if tc.expectedKey == nil { diff --git a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go index a7f4082567..087244329d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider.go @@ -289,6 +289,10 @@ func (p *prometheusHistoryProvider) GetClusterHistory() (map[model.PodID]*PodHis sort.Slice(samples, func(i, j int) bool { return samples[i].MeasureStart.Before(samples[j].MeasureStart) }) } } - p.readLastLabels(res, p.config.PodLabelsMetricName) + err = p.readLastLabels(res, p.config.PodLabelsMetricName) + if err != nil { + return nil, fmt.Errorf("cannot read last labels: %v", err) + } + return res, nil } diff --git a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go index 83ea4393cf..51e20a7e9a 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/history/history_provider_test.go @@ -59,59 +59,59 @@ type mockPrometheusAPI struct { mock.Mock } -func (m mockPrometheusAPI) AlertManagers(ctx context.Context) (prometheusv1.AlertManagersResult, error) { +func (m *mockPrometheusAPI) AlertManagers(ctx context.Context) (prometheusv1.AlertManagersResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) Alerts(ctx context.Context) (prometheusv1.AlertsResult, error) { +func (m *mockPrometheusAPI) Alerts(ctx context.Context) (prometheusv1.AlertsResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) CleanTombstones(ctx context.Context) error { +func (m *mockPrometheusAPI) CleanTombstones(ctx context.Context) error { panic("not implemented") } -func (m mockPrometheusAPI) Config(ctx context.Context) (prometheusv1.ConfigResult, error) { +func (m *mockPrometheusAPI) Config(ctx context.Context) (prometheusv1.ConfigResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { +func (m *mockPrometheusAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { panic("not implemented") } -func (m mockPrometheusAPI) Flags(ctx context.Context) (prometheusv1.FlagsResult, error) { +func (m *mockPrometheusAPI) Flags(ctx context.Context) (prometheusv1.FlagsResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) LabelNames(ctx context.Context) ([]string, error) { +func (m *mockPrometheusAPI) LabelNames(ctx context.Context) ([]string, error) { panic("not implemented") } -func (m mockPrometheusAPI) LabelValues(ctx context.Context, label string) (prommodel.LabelValues, error) { +func (m *mockPrometheusAPI) LabelValues(ctx context.Context, label string) (prommodel.LabelValues, error) { panic("not implemented") } -func (m mockPrometheusAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]prommodel.LabelSet, api.Warnings, error) { +func (m *mockPrometheusAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]prommodel.LabelSet, api.Warnings, error) { panic("not implemented") } -func (m mockPrometheusAPI) Rules(ctx context.Context) (prometheusv1.RulesResult, error) { +func (m *mockPrometheusAPI) Rules(ctx context.Context) (prometheusv1.RulesResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) Snapshot(ctx context.Context, skipHead bool) (prometheusv1.SnapshotResult, error) { +func (m *mockPrometheusAPI) Snapshot(ctx context.Context, skipHead bool) (prometheusv1.SnapshotResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) Targets(ctx context.Context) (prometheusv1.TargetsResult, error) { +func (m *mockPrometheusAPI) Targets(ctx context.Context) (prometheusv1.TargetsResult, error) { panic("not implemented") } -func (m mockPrometheusAPI) TargetsMetadata(ctx context.Context, _, _, _ string) ([]prometheusv1.MetricMetadata, error) { +func (m *mockPrometheusAPI) TargetsMetadata(ctx context.Context, _, _, _ string) ([]prometheusv1.MetricMetadata, error) { panic("not implemented") } -func (m mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time) (prommodel.Value, api.Warnings, error) { +func (m *mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time) (prommodel.Value, api.Warnings, error) { args := m.Called(ctx, query, ts) var returnArg prommodel.Value if args.Get(0) != nil { @@ -120,7 +120,7 @@ func (m mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time return returnArg, nil, args.Error(1) } -func (m mockPrometheusAPI) QueryRange(ctx context.Context, query string, r prometheusv1.Range) (prommodel.Value, api.Warnings, error) { +func (m *mockPrometheusAPI) QueryRange(ctx context.Context, query string, r prometheusv1.Range) (prommodel.Value, api.Warnings, error) { args := m.Called(ctx, query, r) var returnArg prommodel.Value if args.Get(0) != nil { diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go index f40df0f1e3..7e572540e2 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go @@ -138,11 +138,11 @@ func (*observer) OnAdd(obj interface{}) {} // passess it to the ObservedOomsChannel func (o *observer) OnUpdate(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*apiv1.Pod) - if oldPod == nil || !ok { + if !ok { klog.Errorf("OOM observer received invalid oldObj: %v", oldObj) } newPod, ok := newObj.(*apiv1.Pod) - if newPod == nil || !ok { + if !ok { klog.Errorf("OOM observer received invalid newObj: %v", newObj) } diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go index d45ac6ba84..1319beb75d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" ) @@ -32,7 +33,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { - v1.AddToScheme(scheme) + utilruntime.Must(v1.AddToScheme(scheme)) } const pod1Yaml = ` diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go index 6118579a7e..ccb6aabe4b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" v1lister "k8s.io/client-go/listers/core/v1" ) @@ -33,7 +34,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { - v1.AddToScheme(scheme) + utilruntime.Must(v1.AddToScheme(scheme)) } const pod1Yaml = ` diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go index 30a280d850..4e83e448ad 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go @@ -71,14 +71,22 @@ func TestConfidenceMultiplier(t *testing.T) { model.ResourceCPU: model.CPUAmountFromCores(3.14), model.ResourceMemory: model.MemoryAmountFromBytes(3.14e9), }) - testedEstimator := &confidenceMultiplier{0.1, 2.0, baseEstimator} + testedEstimator := &confidenceMultiplier{ + multiplier: 0.1, + exponent: 2.0, + baseEstimator: baseEstimator, + } s := model.NewAggregateContainerState() // Add 9 CPU samples at the frequency of 1/(2 mins). timestamp := anyTime for i := 1; i <= 9; i++ { s.AddSample(&model.ContainerUsageSample{ - timestamp, model.CPUAmountFromCores(1.0), testRequest[model.ResourceCPU], model.ResourceCPU}) + MeasureStart: timestamp, + Usage: model.CPUAmountFromCores(1.0), + Request: testRequest[model.ResourceCPU], + Resource: model.ResourceCPU, + }) timestamp = timestamp.Add(time.Minute * 2) } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go index 2b4383b88a..0067f80745 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go @@ -29,10 +29,9 @@ import ( ) var ( - testPodID1 = PodID{"namespace-1", "pod-1"} - testPodID2 = PodID{"namespace-1", "pod-2"} - testContainerID1 = ContainerID{testPodID1, "container-1"} - testRequest = Resources{ + testPodID1 = PodID{"namespace-1", "pod-1"} + testPodID2 = PodID{"namespace-1", "pod-2"} + testRequest = Resources{ ResourceCPU: CPUAmountFromCores(3.14), ResourceMemory: MemoryAmountFromBytes(3.14e9), } @@ -141,7 +140,7 @@ func TestAggregateContainerStateSaveToCheckpoint(t *testing.T) { assert.NoError(t, err) - assert.True(t, time.Now().Sub(checkpoint.LastUpdateTime.Time) < 10*time.Second) + assert.True(t, time.Since(checkpoint.LastUpdateTime.Time) < 10*time.Second) assert.Equal(t, t1, checkpoint.FirstSampleStart.Time) assert.Equal(t, t2, checkpoint.LastSampleStart.Time) assert.Equal(t, 10, checkpoint.TotalSamplesCount) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go index bfe05b2c05..043a6870ef 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go @@ -236,7 +236,7 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) { cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) pod := addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest)) usageSample := makeTestUsageSample() @@ -352,8 +352,9 @@ func addTestPod(cluster *ClusterState) *PodState { return cluster.Pods[testPodID] } -func addTestContainer(cluster *ClusterState) *ContainerState { - cluster.AddOrUpdateContainer(testContainerID, testRequest) +func addTestContainer(t *testing.T, cluster *ClusterState) *ContainerState { + err := cluster.AddOrUpdateContainer(testContainerID, testRequest) + assert.NoError(t, err) return cluster.GetContainer(testContainerID) } @@ -364,7 +365,7 @@ func TestAddVpaThenAddPod(t *testing.T) { vpa := addTestVpa(cluster) assert.Empty(t, vpa.aggregateContainerStates) addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID) assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey) } @@ -374,7 +375,7 @@ func TestAddVpaThenAddPod(t *testing.T) { func TestAddPodThenAddVpa(t *testing.T) { cluster := NewClusterState(testGcPeriod) addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) vpa := addTestVpa(cluster) aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID) assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey) @@ -387,7 +388,7 @@ func TestChangePodLabels(t *testing.T) { cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID) assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey) // Update Pod labels to no longer match the VPA. @@ -418,12 +419,11 @@ func TestUpdateAnnotations(t *testing.T) { // between the pod and the VPA are updated correctly each time. func TestUpdatePodSelector(t *testing.T) { cluster := NewClusterState(testGcPeriod) - vpa := addTestVpa(cluster) addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) // Update the VPA selector such that it still matches the Pod. - vpa = addVpa(cluster, testVpaID, testAnnotations, "label-1 in (value-1,value-2)", testTargetRef) + vpa := addVpa(cluster, testVpaID, testAnnotations, "label-1 in (value-1,value-2)", testTargetRef) assert.Contains(t, vpa.aggregateContainerStates, cluster.aggregateStateKeyForContainerID(testContainerID)) // Update the VPA selector to no longer match the Pod. @@ -549,7 +549,7 @@ func TestAddOrUpdateVPAPolicies(t *testing.T) { t.Run(tc.name, func(t *testing.T) { cluster := NewClusterState(testGcPeriod) addTestPod(cluster) - addTestContainer(cluster) + addTestContainer(t, cluster) if tc.oldVpa != nil { oldVpa := addVpaObject(cluster, testVpaID, tc.oldVpa, testSelectorStr) if !assert.Contains(t, cluster.Vpas, testVpaID) { @@ -596,8 +596,10 @@ func TestTwoPodsWithSameLabels(t *testing.T) { cluster := NewClusterState(testGcPeriod) cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning) cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning) - cluster.AddOrUpdateContainer(containerID1, testRequest) - cluster.AddOrUpdateContainer(containerID2, testRequest) + err := cluster.AddOrUpdateContainer(containerID1, testRequest) + assert.NoError(t, err) + err = cluster.AddOrUpdateContainer(containerID2, testRequest) + assert.NoError(t, err) // Expect only one aggregation to be created. assert.Equal(t, 1, len(cluster.aggregateStateMap)) @@ -613,8 +615,10 @@ func TestTwoPodsWithDifferentNamespaces(t *testing.T) { cluster := NewClusterState(testGcPeriod) cluster.AddOrUpdatePod(podID1, testLabels, apiv1.PodRunning) cluster.AddOrUpdatePod(podID2, testLabels, apiv1.PodRunning) - cluster.AddOrUpdateContainer(containerID1, testRequest) - cluster.AddOrUpdateContainer(containerID2, testRequest) + err := cluster.AddOrUpdateContainer(containerID1, testRequest) + assert.NoError(t, err) + err = cluster.AddOrUpdateContainer(containerID2, testRequest) + assert.NoError(t, err) // Expect two separate aggregations to be created. assert.Equal(t, 2, len(cluster.aggregateStateMap)) diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go index df76b307e5..5a4fbf3064 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go @@ -151,7 +151,9 @@ func getCappedRecommendation(vpaID model.VpaID, resources logic.RecommendedPodRe UncappedTarget: model.ResourcesAsResourceList(res.Target), }) } - recommendation := &vpa_types.RecommendedPodResources{containerResources} + recommendation := &vpa_types.RecommendedPodResources{ + ContainerRecommendations: containerResources, + } cappedRecommendation, err := vpa_utils.ApplyVPAPolicy(recommendation, policy) if err != nil { klog.Errorf("Failed to apply policy for VPA %v/%v: %v", vpaID.Namespace, vpaID.VpaName, err) @@ -166,7 +168,7 @@ func (r *recommender) MaintainCheckpoints(ctx context.Context, minCheckpointsPer if err := r.checkpointWriter.StoreCheckpoints(ctx, now, minCheckpointsPerRun); err != nil { klog.Warningf("Failed to store checkpoints. Reason: %+v", err) } - if time.Now().Sub(r.lastCheckpointGC) > r.checkpointsGCInterval { + if time.Since(r.lastCheckpointGC) > r.checkpointsGCInterval { r.lastCheckpointGC = now r.clusterStateFeeder.GarbageCollectCheckpoints() } diff --git a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go index 71f4b4ad2a..94cdb3dbd1 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go @@ -163,7 +163,8 @@ func TestDecayingHistogramLoadFromCheckpoint(t *testing.T) { halfLife: time.Hour, referenceTimestamp: time.Time{}, } - d.LoadFromCheckpoint(&checkpoint) + err := d.LoadFromCheckpoint(&checkpoint) + assert.NoError(t, err) assert.False(t, d.histogram.IsEmpty()) assert.Equal(t, timestamp, d.referenceTimestamp) diff --git a/vertical-pod-autoscaler/pkg/target/fetcher.go b/vertical-pod-autoscaler/pkg/target/fetcher.go index dd8ac7b9c1..a26848d882 100644 --- a/vertical-pod-autoscaler/pkg/target/fetcher.go +++ b/vertical-pod-autoscaler/pkg/target/fetcher.go @@ -154,51 +154,23 @@ func getLabelSelector(informer cache.SharedIndexInformer, kind, namespace, name if !exists { return nil, fmt.Errorf("%s %s/%s does not exist", kind, namespace, name) } - switch obj.(type) { + switch apiObj := obj.(type) { case (*appsv1.DaemonSet): - apiObj, ok := obj.(*appsv1.DaemonSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector) case (*appsv1.Deployment): - apiObj, ok := obj.(*appsv1.Deployment) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector) case (*appsv1.StatefulSet): - apiObj, ok := obj.(*appsv1.StatefulSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector) case (*appsv1.ReplicaSet): - apiObj, ok := obj.(*appsv1.ReplicaSet) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector) case (*batchv1.Job): - apiObj, ok := obj.(*batchv1.Job) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(apiObj.Spec.Selector) case (*batchv1beta1.CronJob): - apiObj, ok := obj.(*batchv1beta1.CronJob) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(metav1.SetAsLabelSelector(apiObj.Spec.JobTemplate.Spec.Template.Labels)) case (*corev1.ReplicationController): - apiObj, ok := obj.(*corev1.ReplicationController) - if !ok { - return nil, fmt.Errorf("Failed to parse %s %s/%s", kind, namespace, name) - } return metav1.LabelSelectorAsSelector(metav1.SetAsLabelSelector(apiObj.Spec.Selector)) } - return nil, fmt.Errorf("Don't know how to read label seletor") + return nil, fmt.Errorf("don't know how to read label seletor") } func (f *vpaTargetSelectorFetcher) getLabelSelectorFromResource( diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go index ea2342d675..6f738c25ce 100644 --- a/vertical-pod-autoscaler/pkg/updater/main.go +++ b/vertical-pod-autoscaler/pkg/updater/main.go @@ -113,9 +113,9 @@ func main() { klog.Fatalf("Failed to create updater: %v", err) } ticker := time.Tick(*updaterInterval) + ctx, cancel := context.WithTimeout(context.Background(), *updaterInterval) + defer cancel() for range ticker { - ctx, cancel := context.WithTimeout(context.Background(), *updaterInterval) - defer cancel() updater.RunOnce(ctx) healthCheck.UpdateLastActivity() } diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go b/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go index a3f01147d5..a043939b25 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/admission/admission.go @@ -100,5 +100,5 @@ func NewAdmissionLatency() *AdmissionLatency { // Observe measures the execution time from when the AdmissionLatency was created func (t *AdmissionLatency) Observe(status AdmissionStatus, resource AdmissionResource) { - (*t.histo).WithLabelValues(string(status), string(resource)).Observe(time.Now().Sub(t.start).Seconds()) + (*t.histo).WithLabelValues(string(status), string(resource)).Observe(time.Since(t.start).Seconds()) } diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go index e72f5c9265..78417dc137 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go @@ -21,6 +21,8 @@ import ( "net/http" "sync" "time" + + "k8s.io/klog/v2" ) // HealthCheck contains information about last activity time of the monitored component. @@ -59,11 +61,13 @@ func (hc *HealthCheck) checkLastActivity() (bool, time.Duration) { func (hc *HealthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) { timedOut, ago := hc.checkLastActivity() if timedOut { - w.WriteHeader(500) - w.Write([]byte(fmt.Sprintf("Error: last activity more than %v ago", ago))) + http.Error(w, fmt.Sprintf("Error: last activity more than %v ago", ago), http.StatusInternalServerError) } else { w.WriteHeader(200) - w.Write([]byte("OK")) + _, err := w.Write([]byte("OK")) + if err != nil { + klog.Fatalf("Failed to write response message: %v", err) + } } } diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go index 4c0718ebb3..fa8329025a 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go @@ -77,7 +77,7 @@ func (t *ExecutionTimer) ObserveStep(step string) { // ObserveTotal measures the execution time from the creation of the ExecutionTimer func (t *ExecutionTimer) ObserveTotal() { - (*t.histo).WithLabelValues("total").Observe(time.Now().Sub(t.start).Seconds()) + (*t.histo).WithLabelValues("total").Observe(time.Since(t.start).Seconds()) } // CreateExecutionTimeMetric prepares a new histogram labeled with execution step diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go index 83dca2ce56..c8d564585c 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go @@ -98,7 +98,7 @@ func NewExecutionTimer() *metrics.ExecutionTimer { // ObserveRecommendationLatency observes the time it took for the first recommendation to appear func ObserveRecommendationLatency(created time.Time) { - recommendationLatency.Observe(time.Now().Sub(created).Seconds()) + recommendationLatency.Observe(time.Since(created).Seconds()) } // RecordAggregateContainerStatesCount records the number of containers being tracked by the recommender diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go index 0a85523753..e5f66e81a5 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go @@ -87,7 +87,9 @@ func (pb *podBuilderImpl) WithPhase(phase apiv1.PodPhase) PodBuilder { } func (pb *podBuilderImpl) Get() *apiv1.Pod { - startTime := metav1.Time{testTimestamp} + startTime := metav1.Time{ + Time: testTimestamp, + } pod := &apiv1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go index 1da3a076af..f75a078d29 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go @@ -183,9 +183,7 @@ func (b *verticalPodAutoscalerBuilder) Get() *vpa_types.VerticalPodAutoscaler { }}} recommendation := b.recommendation.WithContainer(b.containerName).Get() - for _, rec := range b.appendedRecommendations { - recommendation.ContainerRecommendations = append(recommendation.ContainerRecommendations, rec) - } + recommendation.ContainerRecommendations = append(recommendation.ContainerRecommendations, b.appendedRecommendations...) return &vpa_types.VerticalPodAutoscaler{ ObjectMeta: meta.ObjectMeta{ diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go index 19db3ab278..bd13dc8c75 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go @@ -40,8 +40,8 @@ var ( ) func init() { - flag.Set("alsologtostderr", "true") - flag.Set("v", "5") + flag.Set("alsologtostderr", "true") //nolint:errcheck + flag.Set("v", "5") //nolint:errcheck } func parseLabelSelector(selector string) labels.Selector { diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index a791d13739..4fbb4bc7d2 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -26,11 +26,6 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) -var ( - requestsAndLimits vpa_types.ContainerControlledValues = vpa_types.ContainerControlledValuesRequestsAndLimits - requestsOnly vpa_types.ContainerControlledValues = vpa_types.ContainerControlledValuesRequestsOnly -) - func TestRecommendationNotAvailable(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() podRecommendation := vpa_types.RecommendedPodResources{ diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go index 7262dd6d2e..c743d2fd4f 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go @@ -31,13 +31,6 @@ type ContainerResources struct { Requests core.ResourceList } -func newContainerResources() ContainerResources { - return ContainerResources{ - Requests: core.ResourceList{}, - Limits: core.ResourceList{}, - } -} - // GetProportionalLimit returns limit that will be in the same proportion to recommended request as original limit had to original request. func GetProportionalLimit(originalLimit, originalRequest, recommendation, defaultLimit core.ResourceList) (core.ResourceList, []string) { annotations := []string{} -- GitLab