diff --git a/Tiltfile b/Tiltfile index 26e681617..462b86f9e 100644 --- a/Tiltfile +++ b/Tiltfile @@ -66,6 +66,17 @@ k8s_resource( ] ) +k8s_resource( + workload = 'kargo-garbage-collector', + new_name = 'garbage-collector', + labels = ['kargo'], + objects = [ + 'kargo-garbage-collector:clusterrole', + 'kargo-garbage-collector:clusterrolebinding', + 'kargo-garbage-collector:serviceaccount' + ] +) + k8s_resource( workload = 'kargo-webhooks-server', new_name = 'webhooks-server', diff --git a/charts/kargo/templates/_helpers.tpl b/charts/kargo/templates/_helpers.tpl index 45905b865..e77fac200 100644 --- a/charts/kargo/templates/_helpers.tpl +++ b/charts/kargo/templates/_helpers.tpl @@ -28,6 +28,10 @@ If release name contains chart name it will be used as a full name. {{ include "kargo.fullname" . | printf "%s-controller" }} {{- end -}} +{{- define "kargo.garbageCollector.fullname" -}} +{{ include "kargo.fullname" . | printf "%s-garbage-collector" }} +{{- end -}} + {{- define "kargo.webhooksServer.fullname" -}} {{ include "kargo.fullname" . | printf "%s-webhooks-server" }} {{- end -}} @@ -76,6 +80,10 @@ app.kubernetes.io/component: api app.kubernetes.io/component: controller {{- end -}} +{{- define "kargo.garbageCollector.labels" -}} +app.kubernetes.io/component: garbage-collector +{{- end -}} + {{- define "kargo.webhooksServer.labels" -}} app.kubernetes.io/component: webhooks-server {{- end -}} diff --git a/charts/kargo/templates/garbage-collector/cluster-role-bindings.yaml b/charts/kargo/templates/garbage-collector/cluster-role-bindings.yaml new file mode 100644 index 000000000..c283909f0 --- /dev/null +++ b/charts/kargo/templates/garbage-collector/cluster-role-bindings.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.garbageCollector.enabled .Values.rbac.installClusterRoleBindings }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kargo.garbageCollector.fullname" . }} + labels: + {{- include "kargo.labels" . | nindent 4 }} + {{- include "kargo.garbageCollector.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "kargo.garbageCollector.fullname" . }} +subjects: +- kind: ServiceAccount + namespace: {{ .Release.Namespace }} + name: {{ include "kargo.garbageCollector.fullname" . }} +{{- end }} diff --git a/charts/kargo/templates/garbage-collector/cluster-roles.yaml b/charts/kargo/templates/garbage-collector/cluster-roles.yaml new file mode 100644 index 000000000..0705ff480 --- /dev/null +++ b/charts/kargo/templates/garbage-collector/cluster-roles.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.garbageCollector.enabled .Values.rbac.installClusterRoles }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kargo.garbageCollector.fullname" . }} + labels: + {{- include "kargo.labels" . | nindent 4 }} + {{- include "kargo.garbageCollector.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - kargo.akuity.io + resources: + - promotions + verbs: + - delete + - get + - list + - watch +{{- end }} diff --git a/charts/kargo/templates/garbage-collector/cron-job.yaml b/charts/kargo/templates/garbage-collector/cron-job.yaml new file mode 100644 index 000000000..6dbc608c5 --- /dev/null +++ b/charts/kargo/templates/garbage-collector/cron-job.yaml @@ -0,0 +1,40 @@ +{{- if .Values.garbageCollector.enabled }} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "kargo.garbageCollector.fullname" . }} + labels: + {{- include "kargo.labels" . | nindent 4 }} + {{- include "kargo.garbageCollector.labels" . | nindent 4 }} +spec: + schedule: {{ quote .Values.garbageCollector.schedule }} + jobTemplate: + spec: + template: + spec: + serviceAccountName: {{ include "kargo.garbageCollector.fullname" . }} + containers: + - name: garbage-collector + image: {{ include "kargo.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/usr/local/bin/kargo", "garbage-collector"] + env: + - name: NUM_WORKERS + value: {{ quote .Values.garbageCollector.workers }} + - name: MAX_RETAINED_PROMOTIONS + value: {{ quote .Values.garbageCollector.maxRetainedPromotions }} + - name: LOG_LEVEL + value: {{ .Values.garbageCollector.logLevel }} + resources: + {{- toYaml .Values.garbageCollector.resources | nindent 14 }} + restartPolicy: Never + concurrencyPolicy: Forbid + {{- with .Values.garbageCollector.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.garbageCollector.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end }} diff --git a/charts/kargo/templates/garbage-collector/service-account.yaml b/charts/kargo/templates/garbage-collector/service-account.yaml new file mode 100644 index 000000000..03d0a942b --- /dev/null +++ b/charts/kargo/templates/garbage-collector/service-account.yaml @@ -0,0 +1,9 @@ +{{- if .Values.garbageCollector.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kargo.garbageCollector.fullname" . }} + labels: + {{- include "kargo.labels" . | nindent 4 }} + {{- include "kargo.garbageCollector.labels" . | nindent 4 }} +{{- end }} diff --git a/charts/kargo/values.yaml b/charts/kargo/values.yaml index 31d93b151..fa013e53e 100755 --- a/charts/kargo/values.yaml +++ b/charts/kargo/values.yaml @@ -156,3 +156,37 @@ webhooksServer: nodeSelector: {} tolerations: [] + +garbageCollector: + + ## Whether the garbage collector is enabled. + enabled: true + + ## When to run the garbage collector. + schedule: "0 * * * *" + ## The number of concurrent workers to run. Tuning this too low will result in + ## slow garbage collection. Tuning this too high will result in too many API + ## calls and may result in throttling. + workers: 3 + ## The maximum number of Promotions in terminal phases PER PROJECT that may be + ## spared by the garbage collector. + maxRetainedPromotions: 20 + + logLevel: INFO + + resources: {} + # We usually recommend not to specify default resources and to leave this as + # a conscious choice for the user. This also increases chances charts run on + # environments with little resources, such as KinD. If you do want to + # specify resources, uncomment the following lines, adjust them as + # necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] diff --git a/cmd/controlplane/garbage_collector.go b/cmd/controlplane/garbage_collector.go new file mode 100644 index 000000000..2cc7084f9 --- /dev/null +++ b/cmd/controlplane/garbage_collector.go @@ -0,0 +1,60 @@ +package main + +import ( + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/akuity/kargo/api/v1alpha1" + "github.com/akuity/kargo/internal/garbage" + "github.com/akuity/kargo/internal/os" + versionpkg "github.com/akuity/kargo/internal/version" +) + +func newGarbageCollectorCommand() *cobra.Command { + return &cobra.Command{ + Use: "garbage-collector", + DisableAutoGenTag: true, + SilenceErrors: true, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + version := versionpkg.GetVersion() + log.WithFields(log.Fields{ + "version": version.Version, + "commit": version.GitCommit, + }).Info("Starting Kargo Garbage Collector") + + cfg := garbage.CollectorConfigFromEnv() + + var kubeClient client.Client + { + restCfg, err := getRestConfig(ctx, os.GetEnv("KUBECONFIG", "")) + if err != nil { + return errors.Wrap(err, "error loading REST config") + } + scheme := runtime.NewScheme() + if err = corev1.AddToScheme(scheme); err != nil { + return errors.Wrap(err, "error adding Kubernetes core API to scheme") + } + if err = api.AddToScheme(scheme); err != nil { + return errors.Wrap(err, "error adding Kargo API to scheme") + } + if kubeClient, err = client.New( + restCfg, + client.Options{ + Scheme: scheme, + }, + ); err != nil { + return errors.Wrap(err, "error initializing Kubernetes client") + } + } + + return garbage.NewCollector(kubeClient, cfg).Run(ctx) + }, + } +} diff --git a/cmd/controlplane/root.go b/cmd/controlplane/root.go index 450d3b283..0b14e6cfb 100644 --- a/cmd/controlplane/root.go +++ b/cmd/controlplane/root.go @@ -21,6 +21,7 @@ var ( func Execute(ctx context.Context) error { rootCmd.AddCommand(newAPICommand()) rootCmd.AddCommand(newControllerCommand()) + rootCmd.AddCommand(newGarbageCollectorCommand()) rootCmd.AddCommand(newVersionCommand()) rootCmd.AddCommand(newWebhooksServerCommand()) return rootCmd.ExecuteContext(ctx) diff --git a/internal/garbage/collector.go b/internal/garbage/collector.go new file mode 100644 index 000000000..48f18633c --- /dev/null +++ b/internal/garbage/collector.go @@ -0,0 +1,267 @@ +package garbage + +import ( + "context" + "math" + "sort" + "sync" + + "github.com/kelseyhightower/envconfig" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/akuity/kargo/api/v1alpha1" + logging "github.com/akuity/kargo/internal/logging" +) + +// CollectorConfig is configuration for the garbage collector. +type CollectorConfig struct { + // NumWorkers specifies the number of concurrent workers working on garbage + // collection. Tuning this too low will result in slow garbage collection. + // Tuning this too high will result in too many API calls and may result in + // throttling. + NumWorkers int `envconfig:"NUM_WORKERS" default:"3"` + // MaxRetainedPromotions specifies the maximum number of Promotions in + // terminal phases per Project that may be spared by the garbage collector. + MaxRetainedPromotions int `envconfig:"MAX_RETAINED_PROMOTIONS" default:"20"` +} + +// CollectorConfigFromEnv returns a CollectorConfig populated from environment +// variables. +func CollectorConfigFromEnv() CollectorConfig { + cfg := CollectorConfig{} + envconfig.MustProcess("", &cfg) + return cfg +} + +// Collector is an interface for the garbage collector. +type Collector interface { + // Run runs the garbage collector until all eligible Promotion resources have + // been deleted -- or until an unrecoverable error occurs. + Run(context.Context) error +} + +// collector is an implementation of the Collector interface. +type collector struct { + cfg CollectorConfig + + // The following behaviors are overridable for testing purposes: + cleanProjectsFn func( + ctx context.Context, + projectCh <-chan string, + errCh chan<- struct{}, + ) + + cleanProjectFn func( + ctx context.Context, + project string, + ) error + + listProjectsFn func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error + + listPromotionsFn func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error + + deletePromotionFn func( + context.Context, + client.Object, + ...client.DeleteOption, + ) error +} + +// NewCollector initializes and returns an implementation of the Collector +// interface. +func NewCollector(client client.Client, cfg CollectorConfig) Collector { + c := &collector{ + cfg: cfg, + } + c.cleanProjectsFn = c.cleanProjects + c.cleanProjectFn = c.cleanProject + c.listProjectsFn = client.List + c.listPromotionsFn = client.List + c.deletePromotionFn = client.Delete + return c +} + +func (c *collector) Run(ctx context.Context) error { + projects := corev1.NamespaceList{} + if err := c.listProjectsFn( + ctx, + &projects, + &client.ListOptions{ + LabelSelector: labels.Set( + map[string]string{ + api.LabelProjectKey: "true", + }, + ).AsSelector(), + }, + ); err != nil { + return errors.Wrap( + err, + "error listing projects; no garbage collection performed", + ) + } + + projectCh := make(chan string) + errCh := make(chan struct{}) + + // Fan out -- start workers + numWorkers := + int(math.Min(float64(c.cfg.NumWorkers), float64(len(projects.Items)))) + workersWG := sync.WaitGroup{} + workersWG.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go func() { + defer workersWG.Done() + c.cleanProjectsFn(ctx, projectCh, errCh) + }() + } + + // This is a very simple mechanism for workers to communicate that they have + // encountered an error. We don't do anything other than count them, and when + // the process completes, exit non-zero if the count is greater than zero. + var errCount int + errsWG := sync.WaitGroup{} + errsWG.Add(1) + go func() { + defer errsWG.Done() + for { + select { + case _, ok := <-errCh: + if !ok { + return // Channel was closed + } + errCount++ + case <-ctx.Done(): + return + } + } + }() + + // Distribute work across workers + for _, project := range projects.Items { + select { + case projectCh <- project.Name: + case <-ctx.Done(): + return ctx.Err() + } + } + + // Workers idly waiting for a project will return when the channel is closed + close(projectCh) + // Wait for remaining workers to finish + workersWG.Wait() + // Close error channel to signal that no more errors will be received + close(errCh) + // Wait for error counter to finish + errsWG.Wait() + + if errCount > 0 { + return errors.New( + "one or more errors were encountered during garbage collection; " + + "see logs for details", + ) + } + + return nil +} + +// cleanProjects is a worker function that receives Project names over a channel +// until that channel is closed. It will execute garbage collection for each +// Project name received. +func (c *collector) cleanProjects( + ctx context.Context, + projectCh <-chan string, + errCh chan<- struct{}, +) { + for { + select { + case project, ok := <-projectCh: + if !ok { + return // Channel was closed + } + if err := c.cleanProjectFn(ctx, project); err != nil { + select { + case errCh <- struct{}{}: + case <-ctx.Done(): + return + } + } + case <-ctx.Done(): + return + } + } +} + +// cleanProject executes garbage collection for a single Project. +func (c *collector) cleanProject(ctx context.Context, project string) error { + logger := logging.LoggerFromContext(ctx).WithField("project", project) + + promos := api.PromotionList{} + if err := c.listPromotionsFn( + ctx, + &promos, + client.InNamespace(project), + ); err != nil { + return errors.Wrapf(err, "error listing Promotions for Project %q", project) + } + + if len(promos.Items) <= c.cfg.MaxRetainedPromotions { + return nil // Done + } + + // Sort Promotions by creation time + sort.Sort(byCreation(promos.Items)) + + // Delete oldest Promotions (in terminal phases only) that are in excess of + // MaxRetainedPromotions + var deleteErrCount int + for i := c.cfg.MaxRetainedPromotions; i < len(promos.Items); i++ { + promo := promos.Items[i] + switch promo.Status.Phase { + case api.PromotionPhaseComplete, api.PromotionPhaseFailed: + promoLogger := logger.WithField("promotion", promo.Name) + if err := c.deletePromotionFn(ctx, &promo); err != nil { + promoLogger.Errorf("error deleting Promotion: %s", err) + deleteErrCount++ + } else { + promoLogger.Debug("deleted Promotion") + } + } + } + + if deleteErrCount > 0 { + return errors.Errorf( + "error deleting one or more Promotions from Project %q", + project, + ) + } + + return nil +} + +// byCreation implements sort.Interface for []api.Promotion. +type byCreation []api.Promotion + +func (b byCreation) Len() int { + return len(b) +} + +func (b byCreation) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byCreation) Less(i, j int) bool { + return b[i].ObjectMeta.CreationTimestamp.Time.After( + b[j].ObjectMeta.CreationTimestamp.Time, + ) +} diff --git a/internal/garbage/collector_test.go b/internal/garbage/collector_test.go new file mode 100644 index 000000000..507b46fc9 --- /dev/null +++ b/internal/garbage/collector_test.go @@ -0,0 +1,395 @@ +package garbage + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + api "github.com/akuity/kargo/api/v1alpha1" + "github.com/akuity/kargo/internal/logging" +) + +func TestNewCollector(t *testing.T) { + client := fake.NewClientBuilder().Build() + testCfg := CollectorConfigFromEnv() + c, ok := NewCollector(client, testCfg).(*collector) + require.True(t, ok) + require.Equal(t, testCfg, c.cfg) + require.NotNil(t, c.cleanProjectsFn) + require.NotNil(t, c.cleanProjectFn) + require.NotNil(t, c.listProjectsFn) + require.NotNil(t, c.listPromotionsFn) + require.NotNil(t, c.deletePromotionFn) +} + +func TestRun(t *testing.T) { + testCases := []struct { + name string + listProjectsFn func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error + cleanProjectsFn func( + ctx context.Context, + projectCh <-chan string, + errCh chan<- struct{}, + ) + assertions func(error) + }{ + { + name: "error listing Projects", + listProjectsFn: func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error { + return errors.New("something went wrong") + }, + assertions: func(err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "something went wrong") + require.Contains( + t, + err.Error(), + "error listing projects; no garbage collection performed", + ) + }, + }, + + { + // The objective of this test case is to ensure that when a worker sends + // a signal indicating that it handled an error, that Run() handles + // that correctly. + name: "cleanProjectsFn sends an error", + listProjectsFn: func( + _ context.Context, + objList client.ObjectList, + _ ...client.ListOption, + ) error { + projects, ok := objList.(*corev1.NamespaceList) + require.True(t, ok) + projects.Items = []corev1.Namespace{{}} + return nil + }, + cleanProjectsFn: func( + ctx context.Context, + projectCh <-chan string, + errCh chan<- struct{}, + ) { + // All we want to do is receive one Project name and send one error + select { + case <-projectCh: + case <-ctx.Done(): + require.FailNow(t, "timed out waiting for a Project name") + } + select { + case errCh <- struct{}{}: + case <-ctx.Done(): + require.FailNow(t, "timed out signaling an error") + } + }, + assertions: func(err error) { + require.Error(t, err) + require.Equal( + t, + "one or more errors were encountered during garbage collection; "+ + "see logs for details", + err.Error(), + ) + }, + }, + + { + // The objective of this test case is to ensure that when a worker sends + // no signals indicating that it handled any errors, that Run() handles + // that case correctly. + name: "success", + listProjectsFn: func( + _ context.Context, + objList client.ObjectList, + _ ...client.ListOption, + ) error { + projects, ok := objList.(*corev1.NamespaceList) + require.True(t, ok) + projects.Items = []corev1.Namespace{{}} + return nil + }, + cleanProjectsFn: func( + ctx context.Context, + projectCh <-chan string, + errCh chan<- struct{}, + ) { + // All we want to do is receive one Project name and return + select { + case <-projectCh: + case <-ctx.Done(): + require.FailNow(t, "timed out waiting for a Project name") + } + }, + assertions: func(err error) { + require.NoError(t, err) + }, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + c := &collector{ + cfg: CollectorConfig{ + NumWorkers: 1, + }, + listProjectsFn: testCase.listProjectsFn, + cleanProjectsFn: testCase.cleanProjectsFn, + } + err := c.Run(ctx) + testCase.assertions(err) + }) + } +} + +func TestCleanProjects(t *testing.T) { + testCases := []struct { + name string + cleanProjectFn func(ctx context.Context, project string) error + errHandlerFn func(ctx context.Context, errCh <-chan struct{}) + }{ + { + // The objective of this test case is to ensure that errCh is signaled + // when an error occurs. + name: "error cleaning individual Project", + cleanProjectFn: func(context.Context, string) error { + return errors.New("something went wrong") + }, + errHandlerFn: func(ctx context.Context, errCh <-chan struct{}) { + select { + case _, ok := <-errCh: + if !ok { + require.FailNow( + t, + "error channel was closed without receiving any signals", + ) + } + case <-ctx.Done(): + require.FailNow( + t, + "timed out without receiving an error signal", + ) + } + }, + }, + + { + // The objective of this test case is to ensure that errCh is NOT signaled + // when everything goes smoothly. + name: "success", + cleanProjectFn: func(context.Context, string) error { + return nil + }, + errHandlerFn: func(ctx context.Context, errCh <-chan struct{}) { + select { + case _, ok := <-errCh: + if ok { + require.FailNow(t, "an unexpected error signal was received") + } + case <-ctx.Done(): + } + }, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + c := &collector{ + cleanProjectFn: testCase.cleanProjectFn, + } + + projectCh := make(chan string) + errCh := make(chan struct{}) + + go c.cleanProjects(ctx, projectCh, errCh) + + select { + case projectCh <- "fake-project": + case <-ctx.Done(): + require.FailNow(t, "timed out sending a Project name") + } + + testCase.errHandlerFn(ctx, errCh) + }) + } +} + +func TestCleanProject(t *testing.T) { + ctx := context.Background() + logger := logging.LoggerFromContext(ctx) + logger.Logger.Level = log.PanicLevel + ctx = logging.ContextWithLogger(ctx, logger) + + testCases := []struct { + name string + listPromotionsFn func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error + deletePromotionFn func( + context.Context, + client.Object, + ...client.DeleteOption, + ) error + assertions func(error) + }{ + { + name: "error listing Promotions", + listPromotionsFn: func( + context.Context, + client.ObjectList, + ...client.ListOption, + ) error { + return errors.New("something went wrong") + }, + assertions: func(err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "error listing Promotions for Project") + require.Contains(t, err.Error(), "something went wrong") + }, + }, + + { + name: "fewer Promotions than max found", + listPromotionsFn: func( + _ context.Context, + objList client.ObjectList, + _ ...client.ListOption, + ) error { + promos, ok := objList.(*api.PromotionList) + require.True(t, ok) + promos.Items = []api.Promotion{} + return nil + }, + assertions: func(err error) { + require.NoError(t, err) + }, + }, + + { + name: "error deleting Promotion", + listPromotionsFn: func( + _ context.Context, + objList client.ObjectList, + _ ...client.ListOption, + ) error { + promos, ok := objList.(*api.PromotionList) + require.True(t, ok) + promos.Items = []api.Promotion{} + for i := 0; i < 100; i++ { + promos.Items = append( + promos.Items, + api.Promotion{ + Status: api.PromotionStatus{ + Phase: api.PromotionPhaseComplete, + }, + }, + ) + } + return nil + }, + deletePromotionFn: func( + context.Context, + client.Object, + ...client.DeleteOption, + ) error { + return errors.New("something went wrong") + }, + assertions: func(err error) { + require.Error(t, err) + require.Contains( + t, + err.Error(), + "error deleting one or more Promotions from Project", + ) + }, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + c := &collector{ + cfg: CollectorConfig{ + MaxRetainedPromotions: 20, + }, + listPromotionsFn: testCase.listPromotionsFn, + deletePromotionFn: testCase.deletePromotionFn, + } + testCase.assertions(c.cleanProject(ctx, "fake-project")) + }) + } + + t.Run("success", func(t *testing.T) { + const numPromos = 100 + const testProject = "fake-project" + + scheme := runtime.NewScheme() + err := api.AddToScheme(scheme) + require.NoError(t, err) + + initialPromos := []client.Object{} + creationTime := time.Now() + for i := 0; i < numPromos; i++ { + // We make each Promotion look newer then the last to ensure the sort + // isn't a no-op and actually gets covered by this test + creationTime = creationTime.Add(time.Hour) + initialPromos = append( + initialPromos, + &api.Promotion{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("promotion-%d", i), + Namespace: testProject, + CreationTimestamp: metav1.NewTime(creationTime), + }, + Status: api.PromotionStatus{ + Phase: api.PromotionPhaseComplete, + }, + }, + ) + } + + kubeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(initialPromos...). + Build() + + c := &collector{ + cfg: CollectorConfig{ + MaxRetainedPromotions: 20, + }, + listPromotionsFn: kubeClient.List, + deletePromotionFn: kubeClient.Delete, + } + + err = c.cleanProject(ctx, testProject) + require.NoError(t, err) + + promos := api.PromotionList{} + err = kubeClient.List( + ctx, + &promos, + client.InNamespace(testProject), + ) + require.NoError(t, err) + require.Len(t, promos.Items, c.cfg.MaxRetainedPromotions) + }) +}