Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DRAFT] create deployment slo's during the deployment state and delete them once canary finishes. Checks for pods in the state crashLoopBackOff or ImagePullBackOff #423

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions controllers/incarnation.go
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,25 @@ func (i *Incarnation) deleteCanaryRules(ctx context.Context) error {
})
}

func (i *Incarnation) syncDeploymentRules(ctx context.Context) error {
return i.controller.applyPlan(ctx, "Sync Deployment Rules", &rmplan.SyncDeploymentRules{
App: i.appName(),
Namespace: i.targetNamespace(),
Tag: i.tag,
Labels: i.defaultLabels(),
ServiceLevelObjectiveLabels: i.target().ServiceLevelObjectiveLabels,
ServiceLevelObjectives: i.target().SlothServiceLevelObjectives,
})
}

func (i *Incarnation) deleteDeploymentRules(ctx context.Context) error {
return i.controller.applyPlan(ctx, "Delete Deployment Rules", &rmplan.DeleteDeploymentRules{
App: i.appName(),
Namespace: i.targetNamespace(),
Tag: i.tag,
})
}

func (i *Incarnation) syncTaggedServiceLevels(ctx context.Context) error {
if i.picchuConfig.ServiceLevelsNamespace != "" {
// Account for a fleet other than Delivery (old way of configuring SLOs) and Production (the only other place we ideally want SLOs to go)
Expand Down
28 changes: 28 additions & 0 deletions controllers/mock_deployment.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

50 changes: 50 additions & 0 deletions controllers/plan/deleteDeploymentRules.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
package plan

import (
"context"

"github.com/go-logr/logr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
picchuv1alpha1 "go.medium.engineering/picchu/api/v1alpha1"
"go.medium.engineering/picchu/plan"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
)

type DeleteDeploymentRules struct {
App string
Namespace string
Tag string
}

func (p *DeleteDeploymentRules) Apply(ctx context.Context, cli client.Client, cluster *picchuv1alpha1.Cluster, log logr.Logger) error {
prlist := &monitoringv1.PrometheusRuleList{}

opts := &client.ListOptions{
Namespace: p.Namespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
picchuv1alpha1.LabelApp: p.App,
picchuv1alpha1.LabelTag: p.Tag,
picchuv1alpha1.LabelRuleType: RuleTypeDeployment,
}),
}

if err := cli.List(ctx, prlist, opts); err != nil {
log.Error(err, "Failed to delete DeploymentRules")
return err
}

for _, prometheusRule := range prlist.Items {
err := cli.Delete(ctx, prometheusRule)
if err != nil && !errors.IsNotFound(err) {
plan.LogSync(log, "deleted", err, prometheusRule)
return err
}
if err == nil {
plan.LogSync(log, "deleted", err, prometheusRule)
}
}

return nil
}
96 changes: 0 additions & 96 deletions controllers/plan/syncCanaryRules.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"fmt"
"strconv"
"strings"

"github.com/go-logr/logr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
Expand Down Expand Up @@ -78,16 +77,6 @@ func (p *SyncCanaryRules) prometheusRules(log logr.Logger) (*monitoringv1.Promet
}
}

config := SLOConfig{
App: p.App,
Tag: p.Tag,
Labels: p.ServiceLevelObjectiveLabels,
}
helperRules := config.helperRules(log)
for _, rg := range helperRules {
rule.Spec.Groups = append(rule.Spec.Groups, *rg)
}

prs = append(prs, rule)
prl.Items = prs
return prl, nil
Expand All @@ -114,53 +103,6 @@ func (p *SyncCanaryRules) prometheusRule() *monitoringv1.PrometheusRule {
}
}

// create CrashLoopBackOff and ImagePullBackOff for new deployment pods
func (s *SLOConfig) helperRules(log logr.Logger) []*monitoringv1.RuleGroup {
ruleGroups := []*monitoringv1.RuleGroup{}

crashLoopLabels := s.canaryRuleLabels()
for k, v := range s.Labels.AlertLabels {
crashLoopLabels[k] = v
}

crashLoopRuleGroup := &monitoringv1.RuleGroup{
Name: s.crashLoopAlertName(),
Rules: []monitoringv1.Rule{
{
Alert: s.crashLoopAlertName(),
For: monitoringv1.Duration("1m"),
Expr: intstr.FromString(s.crashLoopQuery(log)),
Labels: crashLoopLabels,
Annotations: s.crashLoopRuleAnnotations(log),
},
},
}

ruleGroups = append(ruleGroups, crashLoopRuleGroup)

imagePullBackOffLabels := s.canaryRuleLabels()
for k, v := range s.Labels.AlertLabels {
imagePullBackOffLabels[k] = v
}

imagePullBackOffRuleGroup := &monitoringv1.RuleGroup{
Name: s.imagePullBackOffAlertName(),
Rules: []monitoringv1.Rule{
{
Alert: s.imagePullBackOffAlertName(),
For: monitoringv1.Duration("1m"),
Expr: intstr.FromString(s.imagePullBackOffQuery(log)),
Labels: imagePullBackOffLabels,
Annotations: s.imagePullBackOffAnnotations(log),
},
},
}

ruleGroups = append(ruleGroups, imagePullBackOffRuleGroup)

return ruleGroups
}

func (s *SLOConfig) canaryRules(log logr.Logger) []*monitoringv1.RuleGroup {
ruleGroups := []*monitoringv1.RuleGroup{}

Expand Down Expand Up @@ -209,20 +151,6 @@ func (s *SLOConfig) canaryRuleAnnotations(log logr.Logger) map[string]string {
}
}

func (s *SLOConfig) crashLoopRuleAnnotations(log logr.Logger) map[string]string {
return map[string]string{
CanarySummaryAnnotation: fmt.Sprintf("%s - Canary is failing CrashLoopBackOff SLO - there is at least one pod in state `CrashLoopBackOff`", s.App),
CanaryMessageAnnotation: "There is at least one pod in state `CrashLoopBackOff`",
}
}

func (s *SLOConfig) imagePullBackOffAnnotations(log logr.Logger) map[string]string {
return map[string]string{
CanarySummaryAnnotation: fmt.Sprintf("%s - Canary is failing ImagePullBackOff SLO - there is at least one pod in state `ImagePullBackOff`", s.App),
CanaryMessageAnnotation: "There is at least one pod in state `ImagePullBackOff`",
}
}

func (p *SyncCanaryRules) canaryRuleName() string {
return fmt.Sprintf("%s-canary-%s", p.App, p.Tag)
}
Expand All @@ -231,17 +159,6 @@ func (s *SLOConfig) canaryAlertName() string {
return fmt.Sprintf("%s_canary", s.Name)
}

func (s *SLOConfig) crashLoopAlertName() string {
// per app not soo
name := strings.Replace(s.App, "-", "_", -1)
return fmt.Sprintf("%s_canary_crashloop", name)
}

func (s *SLOConfig) imagePullBackOffAlertName() string {
name := strings.Replace(s.App, "-", "_", -1)
return fmt.Sprintf("%s_canary_imagepullbackoff", name)
}

func (s *SLOConfig) canaryQuery(log logr.Logger) string {
return fmt.Sprintf("%s{%s=\"%s\"} / %s{%s=\"%s\"} - %v > ignoring(%s) sum(%s) / sum(%s)",
s.errorQuery(), s.SLO.ServiceLevelIndicator.TagKey, s.Tag,
Expand All @@ -250,19 +167,6 @@ func (s *SLOConfig) canaryQuery(log logr.Logger) string {
)
}

func (s *SLOConfig) crashLoopQuery(log logr.Logger) string {
// pod=~"main-20240109-181554-cba9e8cbbf-.*"
return fmt.Sprintf("sum by (reason) (kube_pod_container_status_waiting_reason{reason=\"CrashLoopBackOff\", container=\"%s\", pod=~\"%s-.*\"}) > 0",
s.App, s.Tag,
)
}

func (s *SLOConfig) imagePullBackOffQuery(log logr.Logger) string {
return fmt.Sprintf("sum by (reason) (kube_pod_container_status_waiting_reason{reason=\"ImagePullBackOff\", container=\"%s\", pod=~\"%s-.*\"}) > 0",
s.App, s.Tag,
)
}

func (s *SLOConfig) formatAllowancePercent(log logr.Logger) string {
allowancePercent := s.SLO.ServiceLevelIndicator.Canary.AllowancePercent
if s.SLO.ServiceLevelIndicator.Canary.AllowancePercentString != "" {
Expand Down
44 changes: 0 additions & 44 deletions controllers/plan/syncCanaryRules_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,50 +147,6 @@ var (
},
},
},
{
Name: "test_app_canary_crashloop",
Rules: []monitoringv1.Rule{
{
Alert: "test_app_canary_crashloop",
Expr: intstr.FromString("sum by (reason) (kube_pod_container_status_waiting_reason{reason=\"CrashLoopBackOff\", container=\"test-app\", pod=~\"tag-.*\"}) > 0"),
For: "1m",
Annotations: map[string]string{
CanaryMessageAnnotation: "There is at least one pod in state `CrashLoopBackOff`",
CanarySummaryAnnotation: "test-app - Canary is failing CrashLoopBackOff SLO - there is at least one pod in state `CrashLoopBackOff`",
},
Labels: map[string]string{
CanaryAppLabel: "test-app",
CanaryTagLabel: "tag",
CanaryLabel: "true",
CanarySLOLabel: "true",
"severity": "test",
"channel": "#eng-releases",
},
},
},
},
{
Name: "test_app_canary_imagepullbackoff",
Rules: []monitoringv1.Rule{
{
Alert: "test_app_canary_imagepullbackoff",
Expr: intstr.FromString("sum by (reason) (kube_pod_container_status_waiting_reason{reason=\"ImagePullBackOff\", container=\"test-app\", pod=~\"tag-.*\"}) > 0"),
For: "1m",
Annotations: map[string]string{
CanaryMessageAnnotation: "There is at least one pod in state `ImagePullBackOff`",
CanarySummaryAnnotation: "test-app - Canary is failing ImagePullBackOff SLO - there is at least one pod in state `ImagePullBackOff`",
},
Labels: map[string]string{
CanaryAppLabel: "test-app",
CanaryTagLabel: "tag",
CanaryLabel: "true",
CanarySLOLabel: "true",
"severity": "test",
"channel": "#eng-releases",
},
},
},
},
},
},
},
Expand Down
Loading