diff --git a/pkg/controllers/federatedhpa/config/types_test.go b/pkg/controllers/federatedhpa/config/types_test.go new file mode 100644 index 000000000000..d9af9317b949 --- /dev/null +++ b/pkg/controllers/federatedhpa/config/types_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + "github.com/spf13/pflag" +) + +// TestHPAControllerConfiguration_AddFlags tests that AddFlags adds all expected flags +func TestHPAControllerConfiguration_AddFlags(t *testing.T) { + config := &HPAControllerConfiguration{} + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + config.AddFlags(fs) + + expectedFlags := []string{ + "horizontal-pod-autoscaler-sync-period", + "horizontal-pod-autoscaler-upscale-delay", + "horizontal-pod-autoscaler-downscale-stabilization", + "horizontal-pod-autoscaler-downscale-delay", + "horizontal-pod-autoscaler-tolerance", + "horizontal-pod-autoscaler-cpu-initialization-period", + "horizontal-pod-autoscaler-initial-readiness-delay", + } + + for _, flagName := range expectedFlags { + if fs.Lookup(flagName) == nil { + t.Errorf("Expected flag %s not found", flagName) + } + } +} + +// TestHPAControllerConfiguration_AddFlags_NilReceiver tests AddFlags with a nil receiver +func TestHPAControllerConfiguration_AddFlags_NilReceiver(t *testing.T) { + var config *HPAControllerConfiguration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + config.AddFlags(fs) + + if fs.HasFlags() { + t.Error("Expected no flags to be added when receiver is nil, but flags were added") + } +} diff --git a/pkg/controllers/federatedhpa/federatedhpa_controller_test.go b/pkg/controllers/federatedhpa/federatedhpa_controller_test.go new file mode 100644 index 000000000000..f23c8818146b --- /dev/null +++ b/pkg/controllers/federatedhpa/federatedhpa_controller_test.go @@ -0,0 +1,1704 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/lifted/selectors" +) + +type MockClient struct { + mock.Mock +} + +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} + +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// TestGetBindingByLabel verifies the behavior of getBindingByLabel function +func TestGetBindingByLabel(t *testing.T) { + tests := []struct { + name string + resourceLabel map[string]string + resourceRef autoscalingv2.CrossVersionObjectReference + bindingList *workv1alpha2.ResourceBindingList + expectedError string + }{ + { + name: "Successful retrieval", + resourceLabel: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-policy-id", + }, + resourceRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + bindingList: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + }, + }, + }, + }, + { + name: "Empty resource label", + resourceLabel: map[string]string{}, + expectedError: "target resource has no label", + }, + { + name: "No matching bindings", + resourceLabel: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-policy-id", + }, + resourceRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "non-existent-deployment", + APIVersion: "apps/v1", + }, + bindingList: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{}, + }, + expectedError: "length of binding list is zero", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + controller := &FHPAController{ + Client: mockClient, + } + + ctx := context.Background() + + if tt.bindingList != nil { + mockClient.On("List", ctx, mock.AnythingOfType("*v1alpha2.ResourceBindingList"), mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + arg := args.Get(1).(*workv1alpha2.ResourceBindingList) + *arg = *tt.bindingList + }) + } + + binding, err := controller.getBindingByLabel(ctx, tt.resourceLabel, tt.resourceRef) + + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.NotNil(t, binding) + assert.Equal(t, tt.resourceRef.Name, binding.Spec.Resource.Name) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetTargetCluster checks the getTargetCluster function's handling of various cluster states +func TestGetTargetCluster(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + clusters map[string]*clusterv1alpha1.Cluster + getErrors map[string]error + expectedClusters []string + expectedError string + }{ + { + name: "Two clusters, one ready and one not ready", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + }, + }, + }, + clusters: map[string]*clusterv1alpha1.Cluster{ + "cluster1": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster2": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + }, + expectedClusters: []string{"cluster1"}, + }, + { + name: "Empty binding.Spec.Clusters", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{}, + }, + }, + expectedError: "binding has no schedulable clusters", + }, + { + name: "Client.Get returns error", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + }, + }, + getErrors: map[string]error{ + "cluster1": errors.New("get error"), + }, + expectedError: "get error", + }, + { + name: "Multiple ready and not ready clusters", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + {Name: "cluster3"}, + {Name: "cluster4"}, + {Name: "cluster5"}, + }, + }, + }, + clusters: map[string]*clusterv1alpha1.Cluster{ + "cluster1": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster2": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + "cluster3": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster4": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + "cluster5": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedClusters: []string{"cluster1", "cluster3", "cluster5"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + controller := &FHPAController{ + Client: mockClient, + } + + ctx := context.Background() + + for _, targetCluster := range tt.binding.Spec.Clusters { + var err error + if tt.getErrors != nil { + err = tt.getErrors[targetCluster.Name] + } + + mockClient.On("Get", ctx, types.NamespacedName{Name: targetCluster.Name}, mock.AnythingOfType("*v1alpha1.Cluster"), mock.Anything). + Return(err). + Run(func(args mock.Arguments) { + if tt.clusters != nil { + arg := args.Get(2).(*clusterv1alpha1.Cluster) + *arg = *tt.clusters[targetCluster.Name] + } + }) + } + + clusters, err := controller.getTargetCluster(ctx, tt.binding) + + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedClusters, clusters) + } + + mockClient.AssertExpectations(t) + + for _, targetCluster := range tt.binding.Spec.Clusters { + mockClient.AssertCalled(t, "Get", ctx, types.NamespacedName{Name: targetCluster.Name}, mock.AnythingOfType("*v1alpha1.Cluster"), mock.Anything) + } + }) + } +} + +// TestValidateAndParseSelector ensures proper parsing and validation of selectors +func TestValidateAndParseSelector(t *testing.T) { + tests := []struct { + name string + selector string + expectedError bool + }{ + { + name: "Valid selector", + selector: "app=myapp", + expectedError: false, + }, + { + name: "Invalid selector", + selector: "invalid=selector=format", + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + hpaSelectors: selectors.NewBiMultimap(), + hpaSelectorsMux: sync.Mutex{}, + EventRecorder: &record.FakeRecorder{}, + } + + hpa := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "default", + }, + } + + parsedSelector, err := controller.validateAndParseSelector(hpa, tt.selector, []*corev1.Pod{}) + + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, parsedSelector) + assert.Contains(t, err.Error(), "couldn't convert selector into a corresponding internal selector object") + } else { + assert.NoError(t, err) + assert.NotNil(t, parsedSelector) + } + }) + } +} + +// TestRecordInitialRecommendation verifies correct recording of initial recommendations +func TestRecordInitialRecommendation(t *testing.T) { + tests := []struct { + name string + key string + currentReplicas int32 + initialRecs []timestampedRecommendation + expectedCount int + expectedReplicas int32 + }{ + { + name: "New recommendation", + key: "test-hpa-1", + currentReplicas: 3, + initialRecs: nil, + expectedCount: 1, + expectedReplicas: 3, + }, + { + name: "Existing recommendations", + key: "test-hpa-2", + currentReplicas: 5, + initialRecs: []timestampedRecommendation{ + {recommendation: 3, timestamp: time.Now().Add(-1 * time.Minute)}, + }, + expectedCount: 1, + expectedReplicas: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + } + + if tt.initialRecs != nil { + controller.recommendations[tt.key] = tt.initialRecs + } + + controller.recordInitialRecommendation(tt.currentReplicas, tt.key) + + assert.Len(t, controller.recommendations[tt.key], tt.expectedCount) + assert.Equal(t, tt.expectedReplicas, controller.recommendations[tt.key][0].recommendation) + + if tt.initialRecs == nil { + assert.WithinDuration(t, time.Now(), controller.recommendations[tt.key][0].timestamp, 2*time.Second) + } else { + assert.Equal(t, tt.initialRecs[0].timestamp, controller.recommendations[tt.key][0].timestamp) + } + }) + } +} + +// TestStabilizeRecommendation checks the stabilization logic for recommendations +func TestStabilizeRecommendation(t *testing.T) { + tests := []struct { + name string + key string + initialRecommendations []timestampedRecommendation + newRecommendation int32 + expectedStabilized int32 + expectedStoredCount int + }{ + { + name: "No previous recommendations", + key: "test-hpa-1", + initialRecommendations: []timestampedRecommendation{}, + newRecommendation: 5, + expectedStabilized: 5, + expectedStoredCount: 1, + }, + { + name: "With previous recommendations within window", + key: "test-hpa-2", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 3, timestamp: time.Now().Add(-30 * time.Second)}, + {recommendation: 4, timestamp: time.Now().Add(-45 * time.Second)}, + }, + newRecommendation: 2, + expectedStabilized: 4, + expectedStoredCount: 3, + }, + { + name: "With old recommendation outside window", + key: "test-hpa-3", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 7, timestamp: time.Now().Add(-2 * time.Minute)}, + {recommendation: 4, timestamp: time.Now().Add(-45 * time.Second)}, + }, + newRecommendation: 5, + expectedStabilized: 5, + expectedStoredCount: 2, + }, + { + name: "All recommendations outside window", + key: "test-hpa-4", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 7, timestamp: time.Now().Add(-2 * time.Minute)}, + {recommendation: 8, timestamp: time.Now().Add(-3 * time.Minute)}, + }, + newRecommendation: 3, + expectedStabilized: 3, + expectedStoredCount: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: time.Minute, + } + controller.recommendations[tt.key] = tt.initialRecommendations + + stabilized := controller.stabilizeRecommendation(tt.key, tt.newRecommendation) + + assert.Equal(t, tt.expectedStabilized, stabilized, "Unexpected stabilized recommendation") + assert.Len(t, controller.recommendations[tt.key], tt.expectedStoredCount, "Unexpected number of stored recommendations") + assert.True(t, containsRecommendation(controller.recommendations[tt.key], tt.newRecommendation), "New recommendation not found in stored recommendations") + + oldCount := countOldRecommendations(controller.recommendations[tt.key], controller.DownscaleStabilisationWindow) + assert.LessOrEqual(t, oldCount, 1, "Too many recommendations older than stabilization window") + }) + } +} + +// TestNormalizeDesiredReplicas verifies the normalization of desired replicas +func TestNormalizeDesiredReplicas(t *testing.T) { + testCases := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + minReplicas int32 + maxReplicas int32 + recommendations []timestampedRecommendation + expectedReplicas int32 + expectedAbleToScale autoscalingv2.HorizontalPodAutoscalerConditionType + expectedAbleToScaleReason string + expectedScalingLimited corev1.ConditionStatus + expectedScalingLimitedReason string + }{ + { + name: "scale up within limits", + currentReplicas: 2, + desiredReplicas: 4, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{4, time.Now()}}, + expectedReplicas: 4, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionFalse, + expectedScalingLimitedReason: "DesiredWithinRange", + }, + { + name: "scale down stabilized", + currentReplicas: 5, + desiredReplicas: 3, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{4, time.Now().Add(-1 * time.Minute)}, {3, time.Now()}}, + expectedReplicas: 4, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ScaleDownStabilized", + expectedScalingLimited: corev1.ConditionFalse, + expectedScalingLimitedReason: "DesiredWithinRange", + }, + { + name: "at min replicas", + currentReplicas: 2, + desiredReplicas: 1, + minReplicas: 2, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{1, time.Now()}}, + expectedReplicas: 2, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionTrue, + expectedScalingLimitedReason: "TooFewReplicas", + }, + { + name: "at max replicas", + currentReplicas: 10, + desiredReplicas: 12, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{12, time.Now()}}, + expectedReplicas: 10, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionTrue, + expectedScalingLimitedReason: "TooManyReplicas", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: 5 * time.Minute, + } + controller.recommendations["test-hpa"] = tc.recommendations + + hpa := &autoscalingv1alpha1.FederatedHPA{ + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: &tc.minReplicas, + MaxReplicas: tc.maxReplicas, + }, + } + + normalized := controller.normalizeDesiredReplicas(hpa, "test-hpa", tc.currentReplicas, tc.desiredReplicas, tc.minReplicas) + + assert.Equal(t, tc.expectedReplicas, normalized, "Unexpected normalized replicas") + + ableToScaleCondition := getCondition(hpa.Status.Conditions, autoscalingv2.AbleToScale) + assert.NotNil(t, ableToScaleCondition, "AbleToScale condition not found") + assert.Equal(t, corev1.ConditionTrue, ableToScaleCondition.Status, "Unexpected AbleToScale condition status") + assert.Equal(t, tc.expectedAbleToScaleReason, ableToScaleCondition.Reason, "Unexpected AbleToScale condition reason") + + scalingLimitedCondition := getCondition(hpa.Status.Conditions, autoscalingv2.ScalingLimited) + assert.NotNil(t, scalingLimitedCondition, "ScalingLimited condition not found") + assert.Equal(t, tc.expectedScalingLimited, scalingLimitedCondition.Status, "Unexpected ScalingLimited condition status") + assert.Equal(t, tc.expectedScalingLimitedReason, scalingLimitedCondition.Reason, "Unexpected ScalingLimited condition reason") + }) + } +} + +// TestNormalizeDesiredReplicasWithBehaviors checks replica normalization with scaling behaviors +func TestNormalizeDesiredReplicasWithBehaviors(t *testing.T) { + defaultStabilizationWindowSeconds := int32(300) + defaultSelectPolicy := autoscalingv2.MaxChangePolicySelect + + tests := []struct { + name string + hpa *autoscalingv1alpha1.FederatedHPA + key string + currentReplicas int32 + prenormalizedReplicas int32 + expectedReplicas int32 + }{ + { + name: "Scale up with behavior", + hpa: createTestHPA(1, 10, &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}}), + ScaleDown: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 60}}), + }), + key: "test-hpa", + currentReplicas: 5, + prenormalizedReplicas: 15, + expectedReplicas: 10, + }, + { + name: "Scale down with behavior", + hpa: createTestHPA(1, 10, &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}}), + ScaleDown: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 50, PeriodSeconds: 60}}), + }), + key: "test-hpa", + currentReplicas: 8, + prenormalizedReplicas: 2, + expectedReplicas: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: 5 * time.Minute, + } + + normalized := controller.normalizeDesiredReplicasWithBehaviors(tt.hpa, tt.key, tt.currentReplicas, tt.prenormalizedReplicas, *tt.hpa.Spec.MinReplicas) + assert.Equal(t, tt.expectedReplicas, normalized, "Unexpected normalized replicas") + }) + } +} + +// TestGetReplicasChangePerPeriod ensures correct calculation of replica changes over time +func TestGetReplicasChangePerPeriod(t *testing.T) { + now := time.Now() + tests := []struct { + name string + periodSeconds int32 + scaleEvents []timestampedScaleEvent + expectedChange int32 + }{ + { + name: "No events", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{}, + expectedChange: 0, + }, + { + name: "Single event within period", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{ + {replicaChange: 3, timestamp: now.Add(-30 * time.Second)}, + }, + expectedChange: 3, + }, + { + name: "Multiple events, some outside period", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{ + {replicaChange: 3, timestamp: now.Add(-30 * time.Second)}, + {replicaChange: 2, timestamp: now.Add(-45 * time.Second)}, + {replicaChange: 1, timestamp: now.Add(-70 * time.Second)}, + }, + expectedChange: 5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + change := getReplicasChangePerPeriod(tt.periodSeconds, tt.scaleEvents) + assert.Equal(t, tt.expectedChange, change, "Unexpected change in replicas") + }) + } +} + +// TestGetUnableComputeReplicaCountCondition verifies condition creation for compute failures +func TestGetUnableComputeReplicaCountCondition(t *testing.T) { + tests := []struct { + name string + object runtime.Object + reason string + err error + expectedEvent string + expectedMessage string + }{ + { + name: "FederatedHPA with simple error", + object: createTestFederatedHPA("test-hpa", "default"), + reason: "TestReason", + err: fmt.Errorf("test error"), + expectedEvent: "Warning TestReason test error", + expectedMessage: "the HPA was unable to compute the replica count: test error", + }, + { + name: "Different object type", + object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}}, + reason: "PodError", + err: fmt.Errorf("pod error"), + expectedEvent: "Warning PodError pod error", + expectedMessage: "the HPA was unable to compute the replica count: pod error", + }, + { + name: "Complex error message", + object: createTestFederatedHPA("complex-hpa", "default"), + reason: "ComplexError", + err: fmt.Errorf("error: %v", fmt.Errorf("nested error")), + expectedEvent: "Warning ComplexError error: nested error", + expectedMessage: "the HPA was unable to compute the replica count: error: nested error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + controller := &FHPAController{ + EventRecorder: fakeRecorder, + } + + condition := controller.getUnableComputeReplicaCountCondition(tt.object, tt.reason, tt.err) + + assert.Equal(t, autoscalingv2.ScalingActive, condition.Type, "Unexpected condition type") + assert.Equal(t, corev1.ConditionFalse, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + assert.Equal(t, tt.expectedMessage, condition.Message, "Unexpected condition message") + + select { + case event := <-fakeRecorder.Events: + assert.Equal(t, tt.expectedEvent, event, "Unexpected event recorded") + case <-time.After(time.Second): + t.Error("Expected an event to be recorded, but none was") + } + }) + } +} + +// TestStoreScaleEvent checks proper storage of scaling events +func TestStoreScaleEvent(t *testing.T) { + tests := []struct { + name string + behavior *autoscalingv2.HorizontalPodAutoscalerBehavior + key string + prevReplicas int32 + newReplicas int32 + expectedUp int + expectedDown int + }{ + { + name: "Scale up event", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](int32(60)), + }, + }, + key: "test-hpa", + prevReplicas: 5, + newReplicas: 10, + expectedUp: 1, + expectedDown: 0, + }, + { + name: "Scale down event", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](int32(60)), + }, + }, + key: "test-hpa", + prevReplicas: 10, + newReplicas: 5, + expectedUp: 0, + expectedDown: 1, + }, + { + name: "Nil behavior", + behavior: nil, + key: "test-hpa", + prevReplicas: 5, + newReplicas: 5, + expectedUp: 0, + expectedDown: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + scaleUpEvents: make(map[string][]timestampedScaleEvent), + scaleDownEvents: make(map[string][]timestampedScaleEvent), + } + + controller.storeScaleEvent(tt.behavior, tt.key, tt.prevReplicas, tt.newReplicas) + + assert.Len(t, controller.scaleUpEvents[tt.key], tt.expectedUp, "Unexpected number of scale up events") + assert.Len(t, controller.scaleDownEvents[tt.key], tt.expectedDown, "Unexpected number of scale down events") + }) + } +} + +// TestStabilizeRecommendationWithBehaviors verifies recommendation stabilization with behaviors +func TestStabilizeRecommendationWithBehaviors(t *testing.T) { + now := time.Now() + upWindow := int32(300) // 5 minutes + downWindow := int32(600) // 10 minutes + + tests := []struct { + name string + args NormalizationArg + initialRecommendations []timestampedRecommendation + expectedReplicas int32 + expectedReason string + expectedMessage string + }{ + { + name: "Scale up stabilized", + args: NormalizationArg{ + Key: "test-hpa-1", + DesiredReplicas: 10, + CurrentReplicas: 5, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 8, timestamp: now.Add(-2 * time.Minute)}, + {recommendation: 7, timestamp: now.Add(-4 * time.Minute)}, + }, + expectedReplicas: 7, + expectedReason: "ScaleUpStabilized", + expectedMessage: "recent recommendations were lower than current one, applying the lowest recent recommendation", + }, + { + name: "Scale down stabilized", + args: NormalizationArg{ + Key: "test-hpa-2", + DesiredReplicas: 3, + CurrentReplicas: 8, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 5, timestamp: now.Add(-5 * time.Minute)}, + {recommendation: 4, timestamp: now.Add(-8 * time.Minute)}, + }, + expectedReplicas: 5, + expectedReason: "ScaleDownStabilized", + expectedMessage: "recent recommendations were higher than current one, applying the highest recent recommendation", + }, + { + name: "No change needed", + args: NormalizationArg{ + Key: "test-hpa-3", + DesiredReplicas: 5, + CurrentReplicas: 5, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 5, timestamp: now.Add(-1 * time.Minute)}, + }, + expectedReplicas: 5, + expectedReason: "ScaleUpStabilized", + expectedMessage: "recent recommendations were lower than current one, applying the lowest recent recommendation", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + } + + controller.recommendations[tt.args.Key] = tt.initialRecommendations + + gotReplicas, gotReason, gotMessage := controller.stabilizeRecommendationWithBehaviors(tt.args) + + assert.Equal(t, tt.expectedReplicas, gotReplicas, "Unexpected stabilized replicas") + assert.Equal(t, tt.expectedReason, gotReason, "Unexpected stabilization reason") + assert.Equal(t, tt.expectedMessage, gotMessage, "Unexpected stabilization message") + + storedRecommendations := controller.recommendations[tt.args.Key] + assert.True(t, containsRecommendation(storedRecommendations, tt.args.DesiredReplicas), "New recommendation not found in stored recommendations") + assert.Len(t, storedRecommendations, len(tt.initialRecommendations)+1, "Unexpected number of stored recommendations") + }) + } +} + +// TestConvertDesiredReplicasWithBehaviorRate verifies replica conversion with behavior rates +func TestConvertDesiredReplicasWithBehaviorRate(t *testing.T) { + tests := []struct { + name string + args NormalizationArg + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + expectedReplicas int32 + expectedReason string + expectedMessage string + }{ + { + name: "Scale up within limits", + args: NormalizationArg{ + Key: "test-hpa", + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 5, + DesiredReplicas: 8, + }, + expectedReplicas: 8, + expectedReason: "DesiredWithinRange", + expectedMessage: "the desired count is within the acceptable range", + }, + { + name: "Scale down within limits", + args: NormalizationArg{ + Key: "test-hpa", + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 5, + DesiredReplicas: 3, + }, + expectedReplicas: 3, + expectedReason: "DesiredWithinRange", + expectedMessage: "the desired count is within the acceptable range", + }, + { + name: "Scale up beyond MaxReplicas", + args: NormalizationArg{ + Key: "test-hpa", + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 8, + DesiredReplicas: 12, + }, + expectedReplicas: 10, + expectedReason: "TooManyReplicas", + expectedMessage: "the desired replica count is more than the maximum replica count", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + scaleUpEvents: make(map[string][]timestampedScaleEvent), + scaleDownEvents: make(map[string][]timestampedScaleEvent), + } + controller.scaleUpEvents[tt.args.Key] = tt.scaleUpEvents + controller.scaleDownEvents[tt.args.Key] = tt.scaleDownEvents + + replicas, reason, message := controller.convertDesiredReplicasWithBehaviorRate(tt.args) + + assert.Equal(t, tt.expectedReplicas, replicas, "Unexpected number of replicas") + assert.Equal(t, tt.expectedReason, reason, "Unexpected reason") + assert.Equal(t, tt.expectedMessage, message, "Unexpected message") + }) + } +} + +// TestConvertDesiredReplicasWithRules checks replica conversion using basic rules +func TestConvertDesiredReplicasWithRules(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + hpaMinReplicas int32 + hpaMaxReplicas int32 + expectedReplicas int32 + expectedCondition string + expectedReason string + }{ + { + name: "Desired within range", + currentReplicas: 5, + desiredReplicas: 7, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 7, + expectedCondition: "DesiredWithinRange", + expectedReason: "the desired count is within the acceptable range", + }, + { + name: "Desired below min", + currentReplicas: 5, + desiredReplicas: 2, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 3, + expectedCondition: "TooFewReplicas", + expectedReason: "the desired replica count is less than the minimum replica count", + }, + { + name: "Desired above max", + currentReplicas: 5, + desiredReplicas: 15, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 10, + expectedCondition: "TooManyReplicas", + expectedReason: "the desired replica count is more than the maximum replica count", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + replicas, condition, reason := convertDesiredReplicasWithRules(tt.currentReplicas, tt.desiredReplicas, tt.hpaMinReplicas, tt.hpaMaxReplicas) + assert.Equal(t, tt.expectedReplicas, replicas, "Unexpected number of replicas") + assert.Equal(t, tt.expectedCondition, condition, "Unexpected condition") + assert.Equal(t, tt.expectedReason, reason, "Unexpected reason") + }) + } +} + +// TestCalculateScaleUpLimitWithScalingRules verifies scale-up limit calculation with rules +func TestCalculateScaleUpLimit(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + expectedLimit int32 + }{ + { + name: "Small scale up", + currentReplicas: 1, + expectedLimit: 4, + }, + { + name: "Medium scale up", + currentReplicas: 10, + expectedLimit: 20, + }, + { + name: "Large scale up", + currentReplicas: 100, + expectedLimit: 200, + }, + { + name: "Zero replicas", + currentReplicas: 0, + expectedLimit: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleUpLimit(tt.currentReplicas) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale up limit") + }) + } +} + +// TestMarkScaleEventsOutdated ensures proper marking of outdated scale events +func TestMarkScaleEventsOutdated(t *testing.T) { + now := time.Now() + tests := []struct { + name string + scaleEvents []timestampedScaleEvent + longestPolicyPeriod int32 + expectedOutdated []bool + }{ + { + name: "All events within period", + scaleEvents: []timestampedScaleEvent{ + {timestamp: now.Add(-30 * time.Second)}, + {timestamp: now.Add(-60 * time.Second)}, + }, + longestPolicyPeriod: 120, + expectedOutdated: []bool{false, false}, + }, + { + name: "Some events outdated", + scaleEvents: []timestampedScaleEvent{ + {timestamp: now.Add(-30 * time.Second)}, + {timestamp: now.Add(-90 * time.Second)}, + {timestamp: now.Add(-150 * time.Second)}, + }, + longestPolicyPeriod: 120, + expectedOutdated: []bool{false, false, true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + markScaleEventsOutdated(tt.scaleEvents, tt.longestPolicyPeriod) + for i, event := range tt.scaleEvents { + assert.Equal(t, tt.expectedOutdated[i], event.outdated, "Unexpected outdated status for event %d", i) + } + }) + } +} + +// TestGetLongestPolicyPeriod checks retrieval of the longest policy period +func TestGetLongestPolicyPeriod(t *testing.T) { + tests := []struct { + name string + scalingRules *autoscalingv2.HPAScalingRules + expectedPeriod int32 + }{ + { + name: "Single policy", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {PeriodSeconds: 60}, + }, + }, + expectedPeriod: 60, + }, + { + name: "Multiple policies", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {PeriodSeconds: 60}, + {PeriodSeconds: 120}, + {PeriodSeconds: 30}, + }, + }, + expectedPeriod: 120, + }, + { + name: "No policies", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{}, + }, + expectedPeriod: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + period := getLongestPolicyPeriod(tt.scalingRules) + assert.Equal(t, tt.expectedPeriod, period, "Unexpected longest policy period") + }) + } +} + +// TestCalculateScaleUpLimitWithScalingRules verifies scale-up limit calculation with rules +func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { + baseTime := time.Now() + disabledPolicy := autoscalingv2.DisabledPolicySelect + minChangePolicy := autoscalingv2.MinChangePolicySelect + + tests := []struct { + name string + currentReplicas int32 + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + scalingRules *autoscalingv2.HPAScalingRules + expectedLimit int32 + }{ + { + name: "No previous events", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 9, + }, + { + name: "With previous scale up event", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{ + {replicaChange: 2, timestamp: baseTime.Add(-30 * time.Second)}, + }, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 7, + }, + { + name: "Disabled policy", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &disabledPolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 5, + }, + { + name: "MinChange policy", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &minChangePolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 2, PeriodSeconds: 60}, + }, + }, + expectedLimit: 7, + }, + { + name: "Percent scaling policy", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 50, PeriodSeconds: 60}, + }, + }, + expectedLimit: 15, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleUpLimitWithScalingRules(tt.currentReplicas, tt.scaleUpEvents, tt.scaleDownEvents, tt.scalingRules) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale up limit") + }) + } +} + +// TestCalculateScaleDownLimitWithBehaviors checks scale-down limit calculation with behaviors +func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { + baseTime := time.Now() + disabledPolicy := autoscalingv2.DisabledPolicySelect + minChangePolicy := autoscalingv2.MinChangePolicySelect + + tests := []struct { + name string + currentReplicas int32 + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + scalingRules *autoscalingv2.HPAScalingRules + expectedLimit int32 + }{ + { + name: "No previous events", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 8, + }, + { + name: "With previous scale down event", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{ + {replicaChange: 1, timestamp: baseTime.Add(-30 * time.Second)}, + }, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 8, + }, + { + name: "Multiple policies", + currentReplicas: 100, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 10, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 5, PeriodSeconds: 60}, + }, + }, + expectedLimit: 90, + }, + { + name: "Disabled policy", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &disabledPolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 10, + }, + { + name: "MinChange policy", + currentReplicas: 100, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &minChangePolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 10, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 15, PeriodSeconds: 60}, + }, + }, + expectedLimit: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleDownLimitWithBehaviors(tt.currentReplicas, tt.scaleUpEvents, tt.scaleDownEvents, tt.scalingRules) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale down limit") + }) + } +} + +// TestSetCurrentReplicasInStatus verifies setting of current replicas in HPA status +func TestSetCurrentReplicasInStatus(t *testing.T) { + controller := &FHPAController{} + hpa := &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + DesiredReplicas: 5, + CurrentMetrics: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + }, + } + + controller.setCurrentReplicasInStatus(hpa, 3) + + assert.Equal(t, int32(3), hpa.Status.CurrentReplicas) + assert.Equal(t, int32(5), hpa.Status.DesiredReplicas) + assert.Len(t, hpa.Status.CurrentMetrics, 1) + assert.Nil(t, hpa.Status.LastScaleTime) +} + +// TestSetStatus ensures correct status setting for FederatedHPA +func TestSetStatus(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + metricStatuses []autoscalingv2.MetricStatus + rescale bool + initialLastScale *metav1.Time + }{ + { + name: "Update without rescale", + currentReplicas: 3, + desiredReplicas: 5, + metricStatuses: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + rescale: false, + initialLastScale: nil, + }, + { + name: "Update with rescale", + currentReplicas: 3, + desiredReplicas: 5, + metricStatuses: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + rescale: true, + initialLastScale: &metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{} + hpa := &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + LastScaleTime: tt.initialLastScale, + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + {Type: autoscalingv2.ScalingActive}, + }, + }, + } + + controller.setStatus(hpa, tt.currentReplicas, tt.desiredReplicas, tt.metricStatuses, tt.rescale) + + assert.Equal(t, tt.currentReplicas, hpa.Status.CurrentReplicas) + assert.Equal(t, tt.desiredReplicas, hpa.Status.DesiredReplicas) + assert.Equal(t, tt.metricStatuses, hpa.Status.CurrentMetrics) + assert.Len(t, hpa.Status.Conditions, 1) + + if tt.rescale { + assert.NotNil(t, hpa.Status.LastScaleTime) + assert.True(t, hpa.Status.LastScaleTime.After(time.Now().Add(-1*time.Second))) + } else { + assert.Equal(t, tt.initialLastScale, hpa.Status.LastScaleTime) + } + }) + } +} + +// TestSetCondition verifies proper condition setting in FederatedHPA +func TestSetCondition(t *testing.T) { + tests := []struct { + name string + initialHPA *autoscalingv1alpha1.FederatedHPA + conditionType autoscalingv2.HorizontalPodAutoscalerConditionType + status corev1.ConditionStatus + reason string + message string + args []interface{} + expectedLength int + checkIndex int + }{ + { + name: "Add new condition", + initialHPA: &autoscalingv1alpha1.FederatedHPA{}, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "TestReason", + message: "Test message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Update existing condition", + initialHPA: &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "UpdatedReason", + message: "Updated message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Add condition with formatted message", + initialHPA: &autoscalingv1alpha1.FederatedHPA{}, + conditionType: autoscalingv2.AbleToScale, + status: corev1.ConditionTrue, + reason: "FormattedReason", + message: "Formatted message: %d", + args: []interface{}{42}, + expectedLength: 1, + checkIndex: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setCondition(tt.initialHPA, tt.conditionType, tt.status, tt.reason, tt.message, tt.args...) + + assert.Len(t, tt.initialHPA.Status.Conditions, tt.expectedLength, "Unexpected number of conditions") + + condition := tt.initialHPA.Status.Conditions[tt.checkIndex] + assert.Equal(t, tt.conditionType, condition.Type, "Unexpected condition type") + assert.Equal(t, tt.status, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + + expectedMessage := tt.message + if len(tt.args) > 0 { + expectedMessage = fmt.Sprintf(tt.message, tt.args...) + } + assert.Equal(t, expectedMessage, condition.Message, "Unexpected condition message") + assert.False(t, condition.LastTransitionTime.IsZero(), "LastTransitionTime should be set") + }) + } +} + +// TestSetConditionInList ensures proper condition setting in a list of conditions +func TestSetConditionInList(t *testing.T) { + tests := []struct { + name string + inputList []autoscalingv2.HorizontalPodAutoscalerCondition + conditionType autoscalingv2.HorizontalPodAutoscalerConditionType + status corev1.ConditionStatus + reason string + message string + args []interface{} + expectedLength int + checkIndex int + }{ + { + name: "Add new condition", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{}, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "TestReason", + message: "Test message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Update existing condition", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionFalse, + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "UpdatedReason", + message: "Updated message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Add condition with formatted message", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + }, + }, + conditionType: autoscalingv2.AbleToScale, + status: corev1.ConditionTrue, + reason: "FormattedReason", + message: "Formatted message: %d", + args: []interface{}{42}, + expectedLength: 2, + checkIndex: 1, + }, + { + name: "Update condition without changing status", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "NewReason", + message: "New message", + expectedLength: 1, + checkIndex: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := setConditionInList(tt.inputList, tt.conditionType, tt.status, tt.reason, tt.message, tt.args...) + + assert.Len(t, result, tt.expectedLength, "Unexpected length of result list") + + condition := result[tt.checkIndex] + assert.Equal(t, tt.conditionType, condition.Type, "Unexpected condition type") + assert.Equal(t, tt.status, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + + expectedMessage := tt.message + if len(tt.args) > 0 { + expectedMessage = fmt.Sprintf(tt.message, tt.args...) + } + assert.Equal(t, expectedMessage, condition.Message, "Unexpected condition message") + + if tt.name == "Update existing condition" { + assert.False(t, condition.LastTransitionTime.IsZero(), "LastTransitionTime should be set") + } + + if tt.name == "Update condition without changing status" { + assert.Equal(t, tt.inputList[0].LastTransitionTime, condition.LastTransitionTime, "LastTransitionTime should not change") + } + }) + } +} + +// Helper functions +func getCondition(conditions []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType) *autoscalingv2.HorizontalPodAutoscalerCondition { + for _, condition := range conditions { + if condition.Type == conditionType { + return &condition + } + } + return nil +} + +func createTestFederatedHPA(name, namespace string) *autoscalingv1alpha1.FederatedHPA { + return &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +func createTestHPA(minReplicas, maxReplicas int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv1alpha1.FederatedHPA { + return &autoscalingv1alpha1.FederatedHPA{ + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: &minReplicas, + MaxReplicas: maxReplicas, + Behavior: behavior, + }, + } +} + +func createTestScalingRules(stabilizationWindowSeconds *int32, selectPolicy *autoscalingv2.ScalingPolicySelect, policies []autoscalingv2.HPAScalingPolicy) *autoscalingv2.HPAScalingRules { + return &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: stabilizationWindowSeconds, + SelectPolicy: selectPolicy, + Policies: policies, + } +} + +func countOldRecommendations(recommendations []timestampedRecommendation, window time.Duration) int { + count := 0 + now := time.Now() + for _, rec := range recommendations { + if rec.timestamp.Before(now.Add(-window)) { + count++ + } + } + return count +} + +func containsRecommendation(slice []timestampedRecommendation, recommendation int32) bool { + for _, item := range slice { + if item.recommendation == recommendation { + return true + } + } + return false +} diff --git a/pkg/controllers/federatedhpa/metrics/client_test.go b/pkg/controllers/federatedhpa/metrics/client_test.go new file mode 100644 index 000000000000..e3824ad3a5c5 --- /dev/null +++ b/pkg/controllers/federatedhpa/metrics/client_test.go @@ -0,0 +1,452 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "errors" + "testing" + "time" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" + externalapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" + "k8s.io/metrics/pkg/apis/metrics/v1beta1" + resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + customclient "k8s.io/metrics/pkg/client/custom_metrics" + externalclient "k8s.io/metrics/pkg/client/external_metrics" +) + +// Mock clients and interfaces +type mockResourceClient struct { + resourceclient.PodMetricsesGetter +} + +type mockCustomClient struct { + customclient.CustomMetricsClient +} + +type mockExternalClient struct { + externalclient.ExternalMetricsClient +} + +type mockExternalMetricsClient struct { + externalclient.ExternalMetricsClient + metrics *externalapi.ExternalMetricValueList + err error +} + +type mockExternalMetricsInterface struct { + externalclient.MetricsInterface + metrics *externalapi.ExternalMetricValueList + err error +} + +type mockCustomMetricsClient struct { + customclient.CustomMetricsClient + metrics *customapi.MetricValueList + err error +} + +type mockCustomMetricsInterface struct { + customclient.MetricsInterface + metrics *customapi.MetricValueList + err error +} + +type mockPodMetricsGetter struct { + metrics *v1beta1.PodMetricsList + err error +} + +type mockPodMetricsInterface struct { + resourceclient.PodMetricsInterface + metrics *v1beta1.PodMetricsList + err error +} + +func (m *mockExternalMetricsClient) NamespacedMetrics(_ string) externalclient.MetricsInterface { + return &mockExternalMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockExternalMetricsInterface) List(_ string, _ labels.Selector) (*externalapi.ExternalMetricValueList, error) { + return m.metrics, m.err +} + +func (m *mockCustomMetricsClient) NamespacedMetrics(_ string) customclient.MetricsInterface { + return &mockCustomMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockCustomMetricsInterface) GetForObjects(_ schema.GroupKind, _ labels.Selector, _ string, _ labels.Selector) (*customapi.MetricValueList, error) { + return m.metrics, m.err +} + +func (m *mockCustomMetricsInterface) GetForObject(_ schema.GroupKind, _ string, _ string, _ labels.Selector) (*customapi.MetricValue, error) { + if len(m.metrics.Items) > 0 { + return &m.metrics.Items[0], m.err + } + return nil, m.err +} + +func (m *mockPodMetricsGetter) PodMetricses(_ string) resourceclient.PodMetricsInterface { + return &mockPodMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockPodMetricsInterface) List(_ context.Context, _ metav1.ListOptions) (*v1beta1.PodMetricsList, error) { + return m.metrics, m.err +} + +// Test functions + +// NewRESTMetricsClient creates a new REST metrics client with the given clients. +func TestNewRESTMetricsClient(t *testing.T) { + resourceClient := &mockResourceClient{} + customClient := &mockCustomClient{} + externalClient := &mockExternalClient{} + + client := NewRESTMetricsClient(resourceClient, customClient, externalClient) + + if client == nil { + t.Error("Expected non-nil client, got nil") + } +} + +// TestGetResourceMetric tests the GetResourceMetric function with various scenarios. +func TestGetResourceMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *v1beta1.PodMetricsList + mockError error + container string + expectedError string + expectedResult PodMetricsInfo + }{ + { + name: "Successful retrieval", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + createPodMetrics("pod1", "container1", 100), + }, + }, + expectedResult: PodMetricsInfo{ + "pod1": {Value: 100}, + }, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from resource metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &v1beta1.PodMetricsList{}, + expectedError: "no metrics returned from resource metrics API", + }, + { + name: "Container-specific metrics", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}, + Containers: []v1beta1.ContainerMetrics{ + createPodMetrics("pod1", "container1", 100).Containers[0], + createPodMetrics("pod1", "container2", 200).Containers[0], + }, + }, + }, + }, + container: "container2", + expectedResult: PodMetricsInfo{ + "pod1": {Value: 200}, + }, + }, + { + name: "Container not found", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + createPodMetrics("pod1", "container1", 100), + }, + }, + container: "nonexistent", + expectedError: "failed to get container metrics: container nonexistent not present in metrics for pod default/pod1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockResourceClient(tt.mockMetrics, tt.mockError) + result, _, err := client.GetResourceMetric(context.Background(), corev1.ResourceCPU, "default", labels.Everything(), tt.container) + + assertError(t, err, tt.expectedError) + assertPodMetricsInfoEqual(t, result, tt.expectedResult) + }) + } +} + +// TestGetExternalMetric tests the retrieval of external metrics. +func TestGetExternalMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *externalapi.ExternalMetricValueList + mockError error + expectedValues []int64 + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &externalapi.ExternalMetricValueList{ + Items: []externalapi.ExternalMetricValue{ + {Value: *resource.NewQuantity(100, resource.DecimalSI)}, + {Value: *resource.NewQuantity(200, resource.DecimalSI)}, + }, + }, + expectedValues: []int64{100000, 200000}, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from external metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &externalapi.ExternalMetricValueList{}, + expectedError: "no metrics returned from external metrics API", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockExternalClient(tt.mockMetrics, tt.mockError) + values, _, err := client.GetExternalMetric("test-metric", "default", labels.Everything()) + + assertError(t, err, tt.expectedError) + assertInt64SliceEqual(t, values, tt.expectedValues) + }) + } +} + +// TestGetRawMetric tests the retrieval of raw custom metrics. +func TestGetRawMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *customapi.MetricValueList + mockError error + expectedResult PodMetricsInfo + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &customapi.MetricValueList{ + Items: []customapi.MetricValue{ + { + DescribedObject: corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + APIVersion: "v1", + }, + Metric: customapi.MetricIdentifier{ + Name: "test-metric", + }, + Timestamp: metav1.Time{Time: time.Now()}, + Value: *resource.NewQuantity(100, resource.DecimalSI), + }, + }, + }, + expectedResult: PodMetricsInfo{ + "pod1": {Value: 100000}, + }, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from custom metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &customapi.MetricValueList{}, + expectedError: "no metrics returned from custom metrics API", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockCustomClient(tt.mockMetrics, tt.mockError) + result, _, err := client.GetRawMetric("test-metric", "default", labels.Everything(), labels.Everything()) + + assertError(t, err, tt.expectedError) + assertPodMetricsInfoEqual(t, result, tt.expectedResult) + }) + } +} + +// TestGetObjectMetric tests the retrieval of object-specific custom metrics. +func TestGetObjectMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *customapi.MetricValueList + mockError error + objectRef *autoscalingv2.CrossVersionObjectReference + expectedValue int64 + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &customapi.MetricValueList{ + Items: []customapi.MetricValue{ + { + DescribedObject: corev1.ObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + Metric: customapi.MetricIdentifier{ + Name: "test-metric", + }, + Timestamp: metav1.Time{Time: time.Now()}, + Value: *resource.NewQuantity(100, resource.DecimalSI), + }, + }, + }, + objectRef: &autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + expectedValue: 100000, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockCustomClient(tt.mockMetrics, tt.mockError) + value, _, err := client.GetObjectMetric("test-metric", "default", tt.objectRef, labels.Everything()) + + assertError(t, err, tt.expectedError) + assertInt64Equal(t, value, tt.expectedValue) + }) + } +} + +// Helper functions + +// createPodMetrics creates a PodMetrics object with specified name, container name, and CPU value +func createPodMetrics(name string, containerName string, cpuValue int64) v1beta1.PodMetrics { + return v1beta1.PodMetrics{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Timestamp: metav1.Time{Time: time.Now()}, + Window: metav1.Duration{Duration: time.Minute}, + Containers: []v1beta1.ContainerMetrics{ + { + Name: containerName, + Usage: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(cpuValue, resource.DecimalSI), + }, + }, + }, + } +} + +// setupMockResourceClient creates a mock resource metrics client for testing +func setupMockResourceClient(mockMetrics *v1beta1.PodMetricsList, mockError error) *resourceMetricsClient { + mockClient := &mockResourceClient{} + mockClient.PodMetricsesGetter = &mockPodMetricsGetter{ + metrics: mockMetrics, + err: mockError, + } + return &resourceMetricsClient{client: mockClient} +} + +// setupMockExternalClient creates a mock external metrics client for testing +func setupMockExternalClient(mockMetrics *externalapi.ExternalMetricValueList, mockError error) *externalMetricsClient { + mockClient := &mockExternalMetricsClient{ + metrics: mockMetrics, + err: mockError, + } + return &externalMetricsClient{client: mockClient} +} + +// setupMockCustomClient creates a mock custom metrics client for testing +func setupMockCustomClient(mockMetrics *customapi.MetricValueList, mockError error) *customMetricsClient { + mockClient := &mockCustomMetricsClient{ + metrics: mockMetrics, + err: mockError, + } + return &customMetricsClient{client: mockClient} +} + +// assertError checks if the error matches the expected error string +func assertError(t *testing.T, got error, want string) { + if want == "" { + if got != nil { + t.Errorf("Unexpected error: %v", got) + } + } else if got == nil || got.Error() != want { + t.Errorf("Expected error '%s', got '%v'", want, got) + } +} + +// assertPodMetricsInfoEqual compares two PodMetricsInfo objects for equality +func assertPodMetricsInfoEqual(t *testing.T, got, want PodMetricsInfo) { + if !podMetricsInfoEqual(got, want) { + t.Errorf("Expected result %v, got %v", want, got) + } +} + +// assertInt64SliceEqual compares two int64 slices for equality +func assertInt64SliceEqual(t *testing.T, got, want []int64) { + if !int64SliceEqual(got, want) { + t.Errorf("Expected values %v, got %v", want, got) + } +} + +// assertInt64Equal compares two int64 values for equality +func assertInt64Equal(t *testing.T, got, want int64) { + if got != want { + t.Errorf("Expected value %d, got %d", want, got) + } +} + +// int64SliceEqual checks if two int64 slices are equal +func int64SliceEqual(a, b []int64) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// podMetricsInfoEqual checks if two PodMetricsInfo objects are equal +func podMetricsInfoEqual(a, b PodMetricsInfo) bool { + if len(a) != len(b) { + return false + } + for k, v := range a { + if bv, ok := b[k]; !ok || v.Value != bv.Value { + return false + } + } + return true +} diff --git a/pkg/controllers/federatedhpa/replica_calculator_test.go b/pkg/controllers/federatedhpa/replica_calculator_test.go new file mode 100644 index 000000000000..2814939259df --- /dev/null +++ b/pkg/controllers/federatedhpa/replica_calculator_test.go @@ -0,0 +1,1478 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + + metricsclient "github.com/karmada-io/karmada/pkg/controllers/federatedhpa/metrics" +) + +// MockQueryClient implements a mock for the metrics client +type MockQueryClient struct { + mock.Mock +} + +func (m *MockQueryClient) GetResourceMetric(ctx context.Context, resource corev1.ResourceName, namespace string, selector labels.Selector, container string) (metricsclient.PodMetricsInfo, time.Time, error) { + args := m.Called(ctx, resource, namespace, selector, container) + return args.Get(0).(metricsclient.PodMetricsInfo), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (metricsclient.PodMetricsInfo, time.Time, error) { + args := m.Called(metricName, namespace, selector, metricSelector) + return args.Get(0).(metricsclient.PodMetricsInfo), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscalingv2.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) { + args := m.Called(metricName, namespace, objectRef, metricSelector) + return args.Get(0).(int64), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetExternalMetric(metricName string, namespace string, selector labels.Selector) ([]int64, time.Time, error) { + args := m.Called(metricName, namespace, selector) + return args.Get(0).([]int64), args.Get(1).(time.Time), args.Error(2) +} + +// TestNewReplicaCalculator verifies the creation of a new ReplicaCalculator +func TestNewReplicaCalculator(t *testing.T) { + const ( + defaultTolerance = 0.1 + defaultCPUInitPeriod = 5 * time.Minute + defaultDelayInitReadinessStatus = 30 * time.Second + ) + + tests := []struct { + name string + tolerance float64 + cpuInitPeriod time.Duration + delayInitReadinessStatus time.Duration + }{ + { + name: "Default values", + tolerance: defaultTolerance, + cpuInitPeriod: defaultCPUInitPeriod, + delayInitReadinessStatus: defaultDelayInitReadinessStatus, + }, + { + name: "Zero values", + tolerance: 0, + cpuInitPeriod: 0, + delayInitReadinessStatus: 0, + }, + { + name: "Custom values", + tolerance: 0.2, + cpuInitPeriod: 10 * time.Minute, + delayInitReadinessStatus: 1 * time.Minute, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, + tt.tolerance, + tt.cpuInitPeriod, + tt.delayInitReadinessStatus) + + assert.NotNil(t, calculator, "Calculator should not be nil") + assert.Equal(t, mockClient, calculator.metricsClient, "Metrics client should match") + assert.Equal(t, tt.tolerance, calculator.tolerance, "Tolerance should match") + assert.Equal(t, tt.cpuInitPeriod, calculator.cpuInitializationPeriod, "CPU initialization period should match") + assert.Equal(t, tt.delayInitReadinessStatus, calculator.delayOfInitialReadinessStatus, "Delay of initial readiness status should match") + }) + } +} + +// TestGetResourceReplicas checks the calculation of resource-based replicas +func TestGetResourceReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + defaultContainer = "" + defaultCalibration = 1.0 + defaultTolerance = 0.1 + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUtilization int32 + pods []*corev1.Pod + metrics metricsclient.PodMetricsInfo + expectedReplicas int32 + expectedUtilization int32 + expectedRawUtilization int64 + calibration float64 + tolerance float64 + expectError bool + }{ + { + name: "Scale up", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 6, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale down", + currentReplicas: 4, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 50}, + "pod2": {Value: 50}, + "pod3": {Value: 50}, + "pod4": {Value: 50}, + }, + expectedReplicas: 4, + expectedUtilization: 50, + expectedRawUtilization: 50, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "No change (within tolerance)", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 52}, + "pod2": {Value: 48}, + }, + expectedReplicas: 2, + expectedUtilization: 50, + expectedRawUtilization: 50, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale up with unready pods", + currentReplicas: 3, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createUnreadyPod("pod3", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 6, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 12, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: 0.5, + tolerance: defaultTolerance, + }, + { + name: "Error: No pods", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{}, + metrics: metricsclient.PodMetricsInfo{}, + expectError: true, + }, + { + name: "Error: No metrics for ready pods", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{}, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, + tc.tolerance, + 5*time.Minute, + 30*time.Second) + + mockClient.On("GetResourceMetric", mock.Anything, corev1.ResourceCPU, defaultNamespace, labels.Everything(), defaultContainer). + Return(tc.metrics, mockTime, nil).Once() + + replicas, utilization, rawUtilization, timestamp, err := calculator.GetResourceReplicas( + context.Background(), tc.currentReplicas, tc.targetUtilization, corev1.ResourceCPU, + defaultNamespace, labels.Everything(), defaultContainer, tc.pods, tc.calibration) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUtilization, utilization) + assert.Equal(t, tc.expectedRawUtilization, rawUtilization) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetRawResourceReplicas verifies the calculation of raw resource-based replicas +func TestGetRawResourceReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + defaultContainer = "" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + resource corev1.ResourceName + namespace string + selector labels.Selector + container string + podList []*corev1.Pod + metrics metricsclient.PodMetricsInfo + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on raw metrics", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 150, + expectError: false, + }, + { + name: "Scale down based on raw metrics", + currentReplicas: 4, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 50}, + "pod2": {Value: 50}, + "pod3": {Value: 50}, + "pod4": {Value: 50}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 50, + expectError: false, + }, + { + name: "No change (at target usage)", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 100}, + "pod2": {Value: 100}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 100, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 150, + expectError: false, + }, + { + name: "Error: No pods", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{}, + metrics: metricsclient.PodMetricsInfo{}, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + 0.1, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + mockClient.On("GetResourceMetric", + mock.Anything, + tc.resource, + tc.namespace, + tc.selector, + tc.container, + ).Return(tc.metrics, mockTime, nil).Once() + + replicas, usage, timestamp, err := calculator.GetRawResourceReplicas( + context.Background(), + tc.currentReplicas, + tc.targetUsage, + tc.resource, + tc.namespace, + tc.selector, + tc.container, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetMetricReplicas checks the calculation of metric-based replicas +func TestGetMetricReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + metricName string + namespace string + selector labels.Selector + metricSelector labels.Selector + podList []*corev1.Pod + metrics metricsclient.PodMetricsInfo + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on custom metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down based on custom metrics", + currentReplicas: 4, + targetUsage: 20, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 10}, + "pod2": {Value: 10}, + "pod3": {Value: 10}, + "pod4": {Value: 10}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 10, + expectError: false, + }, + { + name: "No change (at target usage)", + currentReplicas: 2, + targetUsage: 15, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error: No metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{}, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + 0.1, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + mockClient.On("GetRawMetric", + tc.metricName, + tc.namespace, + tc.selector, + tc.metricSelector, + ).Return(tc.metrics, mockTime, nil).Once() + + replicas, usage, timestamp, err := calculator.GetMetricReplicas( + tc.currentReplicas, + tc.targetUsage, + tc.metricName, + tc.namespace, + tc.selector, + tc.metricSelector, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestCalcPlainMetricReplicas verifies the calculation of plain metric-based replicas +func TestCalcPlainMetricReplicas(t *testing.T) { + const ( + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + testCases := []struct { + name string + metrics metricsclient.PodMetricsInfo + currentReplicas int32 + targetUsage int64 + resource corev1.ResourceName + podList []*corev1.Pod + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 5}, + "pod2": {Value: 5}, + "pod3": {Value: 5}, + "pod4": {Value: 5}, + }, + currentReplicas: 4, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 5, + expectError: false, + }, + { + name: "No change (within tolerance)", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 10}, + "pod2": {Value: 10}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 10, + expectError: false, + }, + { + name: "Scale up with unready pods", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 3, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createUnreadyPod("pod3", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down with missing pods", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 5}, + "pod2": {Value: 5}, + }, + currentReplicas: 3, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 5, + expectError: false, + }, + { + name: "Scale with calibration", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error: No pods", + metrics: metricsclient.PodMetricsInfo{}, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{}, + calibration: 1.0, + expectError: true, + }, + { + name: "Error: No metrics for ready pods", + metrics: metricsclient.PodMetricsInfo{}, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createUnreadyPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + calculator := NewReplicaCalculator( + nil, // metrics client not needed for this test + defaultTolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + replicas, usage, err := calculator.calcPlainMetricReplicas( + tc.metrics, + tc.currentReplicas, + tc.targetUsage, + tc.resource, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + } + }) + } +} + +// Helper function to create an unready pod +func createUnreadyPod(name string, request, limit int64) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.Conditions = []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + } + return pod +} + +func TestGetObjectMetricReplicas(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, 0.1, 5*time.Minute, 30*time.Second) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + metricName string + namespace string + objectRef *autoscalingv2.CrossVersionObjectReference + metricSelector labels.Selector + podList []*corev1.Pod + objectMetric int64 + calibration float64 + expectedReplicas int32 + expectedError bool + }{ + { + name: "Scale up based on object metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "queue_length", + namespace: "default", + objectRef: &autoscalingv2.CrossVersionObjectReference{Kind: "Service", Name: "my-svc"}, + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + objectMetric: 30, + calibration: 1.0, + expectedReplicas: 6, + expectedError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector).Return(tc.objectMetric, time.Now(), nil).Once() + + replicas, _, _, err := calculator.GetObjectMetricReplicas(tc.currentReplicas, tc.targetUsage, tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector, tc.podList, tc.calibration) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + } + }) + } +} + +// TestGetObjectPerPodMetricReplicas verifies the calculation of per-pod object metric-based replicas +func TestGetObjectPerPodMetricReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + defaultObjectRef := &autoscalingv2.CrossVersionObjectReference{Kind: "Service", Name: "my-svc"} + + testCases := []struct { + name string + statusReplicas int32 + targetAverageUsage int64 + metricName string + namespace string + objectRef *autoscalingv2.CrossVersionObjectReference + metricSelector labels.Selector + objectMetric int64 + calibration float64 + tolerance float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on per-pod object metrics", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 30, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down based on per-pod object metrics", + statusReplicas: 4, + targetAverageUsage: 20, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 60, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "No change due to tolerance", + statusReplicas: 3, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 32, // Just within tolerance (10% of 30) + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 11, + expectError: false, + }, + { + name: "Scale with calibration", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 30, + calibration: 0.5, + tolerance: defaultTolerance, + expectedReplicas: 12, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error getting metric", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 0, + calibration: 1.0, + tolerance: defaultTolerance, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + tc.tolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + if tc.expectError { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector). + Return(int64(0), time.Time{}, fmt.Errorf("metric not available")).Once() + } else { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector). + Return(tc.objectMetric, time.Now(), nil).Once() + } + + replicas, usage, timestamp, err := calculator.GetObjectPerPodMetricReplicas( + tc.statusReplicas, + tc.targetAverageUsage, + tc.metricName, + tc.namespace, + tc.objectRef, + tc.metricSelector, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas, "Unexpected replica count") + assert.Equal(t, tc.expectedUsage, usage, "Unexpected usage value") + assert.False(t, timestamp.IsZero(), "Timestamp should not be zero") + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetUsageRatioReplicaCount checks the calculation of usage ratio-based replica count +func TestGetUsageRatioReplicaCount(t *testing.T) { + const ( + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + testCases := []struct { + name string + currentReplicas int32 + usageRatio float64 + podList []*corev1.Pod + calibration float64 + tolerance float64 + expectedReplicas int32 + expectError bool + }{ + { + name: "Scale up", + currentReplicas: 2, + usageRatio: 1.5, + podList: []*corev1.Pod{createPod("pod1", 100, 200), createPod("pod2", 100, 200)}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectError: false, + }, + { + name: "Scale down", + currentReplicas: 4, + usageRatio: 0.5, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 2, + expectError: false, + }, + { + name: "No change due to tolerance", + currentReplicas: 3, + usageRatio: 1.05, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectError: false, + }, + { + name: "Scale to zero", + currentReplicas: 0, + usageRatio: 0.0, + podList: []*corev1.Pod{}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 0, + expectError: false, + }, + { + name: "Scale from zero", + currentReplicas: 0, + usageRatio: 1.5, + podList: []*corev1.Pod{}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 2, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + usageRatio: 1.5, + podList: []*corev1.Pod{createPod("pod1", 100, 200), createPod("pod2", 100, 200)}, + calibration: 0.5, + tolerance: defaultTolerance, + expectedReplicas: 6, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + tc.tolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + replicas, timestamp, err := calculator.getUsageRatioReplicaCount( + tc.currentReplicas, + tc.usageRatio, + tc.podList, + tc.calibration, + ) + + assert.NoError(t, err, "Unexpected error: %v", err) + assert.Equal(t, tc.expectedReplicas, replicas, "Unexpected replica count") + assert.True(t, timestamp.IsZero(), "Expected zero timestamp, but got: %v", timestamp) + }) + } +} + +// TestGetReadyPodsCount verifies the counting of ready pods +func TestGetReadyPodsCount(t *testing.T) { + testCases := []struct { + name string + podList []*corev1.Pod + expectedCount int64 + expectError bool + }{ + { + name: "All pods ready", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + expectedCount: 3, + expectError: false, + }, + { + name: "Mixed ready and unready pods", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createUnreadyPod("pod4", 100, 200), + }, + expectedCount: 2, + expectError: false, + }, + { + name: "All pods unready", + podList: []*corev1.Pod{ + createUnreadyPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + expectedCount: 0, + expectError: false, + }, + { + name: "Empty pod list", + podList: []*corev1.Pod{}, + expectedCount: 0, + expectError: true, + }, + { + name: "Pods with different phases", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodPending), + createPodWithPhase("pod3", 100, 200, corev1.PodSucceeded), + createPodWithPhase("pod4", 100, 200, corev1.PodFailed), + }, + expectedCount: 1, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + calculator := &ReplicaCalculator{} // Don't need to initialize other fields for this test + + count, err := calculator.getReadyPodsCount(tc.podList) + + if tc.expectError { + assert.Error(t, err, "Expected an error, but got none") + } else { + assert.NoError(t, err, "Unexpected error: %v", err) + assert.Equal(t, tc.expectedCount, count, "Unexpected ready pod count") + } + }) + } +} + +// TestGroupPods checks the grouping of pods based on their status and metrics +func TestGroupPods(t *testing.T) { + now := time.Now() + + testCases := []struct { + name string + pods []*corev1.Pod + metrics metricsclient.PodMetricsInfo + resource corev1.ResourceName + cpuInitializationPeriod time.Duration + delayOfInitialReadinessStatus time.Duration + expectedReadyCount int + expectedUnreadyPods []string + expectedMissingPods []string + expectedIgnoredPods []string + }{ + { + name: "All pods ready and with metrics", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 2, + }, + { + name: "One pod unready (Pending)", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodPending), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedUnreadyPods: []string{"pod2"}, + }, + { + name: "One pod missing metrics", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedMissingPods: []string{"pod2"}, + }, + { + name: "One pod ignored (Failed)", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodFailed), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedIgnoredPods: []string{"pod2"}, + }, + { + name: "Pod within CPU initialization period", + pods: []*corev1.Pod{ + createPodWithStartTime("pod1", 100, 200, now.Add(-2*time.Minute)), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedUnreadyPods: []string{}, + }, + { + name: "Non-CPU resource", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceMemory, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + readyCount, unreadyPods, missingPods, ignoredPods := groupPods(tc.pods, tc.metrics, tc.resource, tc.cpuInitializationPeriod, tc.delayOfInitialReadinessStatus) + + assert.Equal(t, tc.expectedReadyCount, readyCount, "Ready pod count mismatch") + assertSetContains(t, unreadyPods, tc.expectedUnreadyPods, "Unready pods mismatch") + assertSetContains(t, missingPods, tc.expectedMissingPods, "Missing pods mismatch") + assertSetContains(t, ignoredPods, tc.expectedIgnoredPods, "Ignored pods mismatch") + }) + } +} + +// TestCalculatePodRequests verifies the calculation of pod resource requests +func TestCalculatePodRequests(t *testing.T) { + testCases := []struct { + name string + pods []*corev1.Pod + container string + resource corev1.ResourceName + expectedResult map[string]int64 + expectedError bool + }{ + { + name: "Calculate CPU requests for all containers", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 200, 300), + }, + container: "", + resource: corev1.ResourceCPU, + expectedResult: map[string]int64{"pod1": 100, "pod2": 200}, + expectedError: false, + }, + { + name: "Calculate memory requests for specific container", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container1", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + { + Name: "container2", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + container: "container2", + resource: corev1.ResourceMemory, + expectedResult: map[string]int64{"pod1": 209715200000}, + expectedError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := calculatePodRequests(tc.pods, tc.container, tc.resource) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedResult, result) + } + }) + } +} + +// TestRemoveMetricsForPods checks the removal of metrics for specified pods +func TestRemoveMetricsForPods(t *testing.T) { + metrics := metricsclient.PodMetricsInfo{ + "pod1": {Value: 100}, + "pod2": {Value: 200}, + "pod3": {Value: 300}, + } + + podsToRemove := sets.New("pod1", "pod3") + + removeMetricsForPods(metrics, podsToRemove) + + assert.Equal(t, 1, len(metrics)) + assert.Contains(t, metrics, "pod2") + assert.NotContains(t, metrics, "pod1") + assert.NotContains(t, metrics, "pod3") +} + +// Helper Functions + +// Helper function to create a pod +func createPod(name string, request, limit int64) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "test": "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(request, resource.DecimalSI), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(limit, resource.DecimalSI), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + StartTime: &metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + } +} + +// Helper function to create a pod with a specific phase +func createPodWithPhase(name string, request, limit int64, phase corev1.PodPhase) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.Phase = phase + return pod +} + +// Helper function to assert that a set contains expected elements +func assertSetContains(t *testing.T, set sets.Set[string], expected []string, message string) { + assert.Equal(t, len(expected), set.Len(), message) + for _, item := range expected { + assert.True(t, set.Has(item), fmt.Sprintf("%s: %s not found", message, item)) + } +} + +// Helper function to create a pod with a specific start time +func createPodWithStartTime(name string, request, limit int64, startTime time.Time) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.StartTime = &metav1.Time{Time: startTime} + return pod +}