Skip to content

Commit

Permalink
Review feedback
Browse files Browse the repository at this point in the history
Signed-off-by: Carolyn Van Slyck <[email protected]>
  • Loading branch information
carolynvs committed Mar 11, 2022
1 parent 3a4addb commit ca09693
Show file tree
Hide file tree
Showing 7 changed files with 70 additions and 48 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# syntax=docker/dockerfile-upstream:1.4.0-rc2
# syntax=docker/dockerfile:1.4

# Build the manager binary
FROM golang:1.17 as builder
Expand Down
2 changes: 1 addition & 1 deletion api/v1/agent_types.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package v1

// AgentPhase are valid status of a Porter agent job
// AgentPhase are valid statuses of a Porter agent job
// that is managing a change to a Porter resource.
type AgentPhase string

Expand Down
27 changes: 25 additions & 2 deletions api/v1/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,35 @@ const (
LabelResourceGeneration = Prefix + "resourceGeneration"

// LabelRetry is a label applied to the resources created by the
// Porter Operator, representing the retry attempt identifier. It is used to
// help the operator determine if a resource has
// Porter Operator, representing the retry attempt identifier.
LabelRetry = Prefix + "retry"

// FinalizerName is the name of the finalizer applied to Porter Operator
// resources that should be reconciled by the operator before allowing it to
// be deleted.
FinalizerName = Prefix + "finalizer"

// VolumePorterSharedName is the name of the volume shared between the porter
// agent and the invocation image.
VolumePorterSharedName = "porter-shared"

// VolumePorterSharedPath is the mount path of the volume shared between the
// porter agent and the invocation image.
VolumePorterSharedPath = "/porter-shared"

// VolumePorterConfigName is the name of the volume that contains Porter's config
// file.
VolumePorterConfigName = "porter-config"

// VolumePorterConfigPath is the mount path of the volume containing Porter's
// config file.
VolumePorterConfigPath = "/porter-config"

// VolumePorterWorkDirName is the name of the volume that is used as the Porter's
// working directory.
VolumePorterWorkDirName = "porter-workdir"

// VolumePorterWorkDirPath is the mount path of the volume that is used as the
// Porter's working directory.
VolumePorterWorkDirPath = "/porter-workdir"
)
64 changes: 32 additions & 32 deletions controllers/agentaction_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,27 +153,27 @@ func (r *AgentActionReconciler) applyJobToStatus(log logr.Logger, action *porter
action.Status.Job = nil
action.Status.Conditions = nil
log.V(Log5Trace).Info("Cleared status because there is no current job")
} else {
action.Status.Job = &corev1.LocalObjectReference{Name: job.Name}
setCondition(log, action, porterv1.ConditionScheduled, "JobCreated")
action.Status.Phase = porterv1.PhasePending

if job.Status.Active+job.Status.Failed+job.Status.Succeeded > 0 {
action.Status.Phase = porterv1.PhaseRunning
setCondition(log, action, porterv1.ConditionStarted, "JobStarted")
}

for _, condition := range job.Status.Conditions {
switch condition.Type {
case batchv1.JobComplete:
action.Status.Phase = porterv1.PhaseSucceeded
setCondition(log, action, porterv1.ConditionComplete, "JobCompleted")
break
case batchv1.JobFailed:
action.Status.Phase = porterv1.PhaseFailed
setCondition(log, action, porterv1.ConditionFailed, "JobFailed")
break
}
return
}
action.Status.Job = &corev1.LocalObjectReference{Name: job.Name}
setCondition(log, action, porterv1.ConditionScheduled, "JobCreated")
action.Status.Phase = porterv1.PhasePending

if job.Status.Active+job.Status.Failed+job.Status.Succeeded > 0 {
action.Status.Phase = porterv1.PhaseRunning
setCondition(log, action, porterv1.ConditionStarted, "JobStarted")
}

for _, condition := range job.Status.Conditions {
switch condition.Type {
case batchv1.JobComplete:
action.Status.Phase = porterv1.PhaseSucceeded
setCondition(log, action, porterv1.ConditionComplete, "JobCompleted")
break
case batchv1.JobFailed:
action.Status.Phase = porterv1.PhaseFailed
setCondition(log, action, porterv1.ConditionFailed, "JobFailed")
break
}
}
}
Expand Down Expand Up @@ -398,7 +398,7 @@ func (r *AgentActionReconciler) createAgentJob(ctx context.Context, log logr.Log
Env: env,
EnvFrom: envFrom,
VolumeMounts: volumeMounts,
WorkingDir: "/porter-workdir",
WorkingDir: porterv1.VolumePorterWorkDirPath,
},
},
Volumes: volumes,
Expand Down Expand Up @@ -573,7 +573,7 @@ func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentC
},
{
Name: "JOB_VOLUME_PATH",
Value: "/porter-shared",
Value: porterv1.VolumePorterSharedPath,
},
{
Name: "CLEANUP_JOBS",
Expand Down Expand Up @@ -615,15 +615,15 @@ func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentC
func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret) ([]corev1.Volume, []corev1.VolumeMount) {
volumes := []corev1.Volume{
{
Name: "porter-shared",
Name: porterv1.VolumePorterSharedName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
},
{
Name: "porter-config",
Name: porterv1.VolumePorterConfigName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: configSecret.Name,
Expand All @@ -632,7 +632,7 @@ func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pv
},
},
{
Name: "porter-workdir",
Name: porterv1.VolumePorterWorkDirName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: workdirSecret.Name,
Expand All @@ -647,16 +647,16 @@ func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pv

volumeMounts := []corev1.VolumeMount{
{
Name: "porter-shared",
MountPath: "/porter-shared",
Name: porterv1.VolumePorterSharedName,
MountPath: porterv1.VolumePorterSharedPath,
},
{
Name: "porter-config",
MountPath: "/porter-config",
Name: porterv1.VolumePorterConfigName,
MountPath: porterv1.VolumePorterConfigPath,
},
{
Name: "porter-workdir",
MountPath: "/porter-workdir",
Name: porterv1.VolumePorterWorkDirName,
MountPath: porterv1.VolumePorterWorkDirPath,
},
}
for _, mount := range action.Spec.VolumeMounts {
Expand Down
14 changes: 7 additions & 7 deletions controllers/agentaction_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,9 +437,9 @@ func TestAgentActionReconciler_createAgentJob(t *testing.T) {
assertContains(t, podTemplate.Labels, "testLabel", "abc123", "incorrect label")
assert.Len(t, podTemplate.Spec.Volumes, 3, "incorrect pod volumes")
assert.Len(t, podTemplate.Spec.Volumes, 3)
assert.Equal(t, "porter-shared", podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume")
assert.Equal(t, "porter-config", podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume")
assert.Equal(t, "porter-workdir", podTemplate.Spec.Volumes[2].Name, "expected the porter-workdir volume")
assert.Equal(t, porterv1.VolumePorterSharedName, podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume")
assert.Equal(t, porterv1.VolumePorterConfigName, podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume")
assert.Equal(t, porterv1.VolumePorterWorkDirName, podTemplate.Spec.Volumes[2].Name, "expected the porter-workdir volume")
assert.Equal(t, "porteraccount", podTemplate.Spec.ServiceAccountName, "incorrect service account for the pod")
assert.Equal(t, pointer.Int64Ptr(65532), podTemplate.Spec.SecurityContext.RunAsUser, "incorrect RunAsUser")
assert.Equal(t, pointer.Int64Ptr(0), podTemplate.Spec.SecurityContext.RunAsGroup, "incorrect RunAsGroup")
Expand All @@ -455,16 +455,16 @@ func TestAgentActionReconciler_createAgentJob(t *testing.T) {
assertEnvVar(t, agentContainer.Env, "KUBE_NAMESPACE", "test")
assertEnvVar(t, agentContainer.Env, "IN_CLUSTER", "true")
assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_NAME", pvc.Name)
assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_PATH", "/porter-shared")
assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_PATH", porterv1.VolumePorterSharedPath)
assertEnvVar(t, agentContainer.Env, "CLEANUP_JOBS", "false") // this will be configurable in the future
assertEnvVar(t, agentContainer.Env, "SERVICE_ACCOUNT", "installeraccount")
assertEnvVar(t, agentContainer.Env, "LABELS", "porter.sh/jobType=bundle-installer porter.sh/managed=true porter.sh/resourceGeneration=1 porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/retry= testLabel=abc123")
assertEnvVar(t, agentContainer.Env, "AFFINITY_MATCH_LABELS", "porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/resourceGeneration=1 porter.sh/retry=")
assertEnvFrom(t, agentContainer.EnvFrom, "porter-env", pointer.BoolPtr(true))
assert.Len(t, agentContainer.VolumeMounts, 3)
assertVolumeMount(t, agentContainer.VolumeMounts, "porter-config", "/porter-config")
assertVolumeMount(t, agentContainer.VolumeMounts, "porter-shared", "/porter-shared")
assertVolumeMount(t, agentContainer.VolumeMounts, "porter-workdir", "/porter-workdir")
assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterConfigName, porterv1.VolumePorterConfigPath)
assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterSharedName, porterv1.VolumePorterSharedPath)
assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterWorkDirName, porterv1.VolumePorterWorkDirPath)
}

func assertSharedAgentLabels(t *testing.T, labels map[string]string) {
Expand Down
7 changes: 3 additions & 4 deletions controllers/installation_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,9 @@ func (r *InstallationReconciler) Reconcile(ctx context.Context, req ctrl.Request
log.V(Log4Debug).Info("Reconciliation complete: A porter agent has been dispatched to uninstall the installation.")
return ctrl.Result{}, err
} else if isDeleted(inst) {
// This is installation without a finalizer that was deleted
// We remove the finalizer af
//ter we successfully uninstall (or someone is manually cleaning things up)
// Just let it go
// This is installation without a finalizer that was deleted We remove the
// finalizer after we successfully uninstall (or someone is manually cleaning
// things up) Just let it go
log.V(Log4Debug).Info("Reconciliation complete: Installation CRD is ready for deletion.")
return ctrl.Result{}, nil
}
Expand Down
2 changes: 1 addition & 1 deletion installer/Dockerfile.tmpl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# syntax=docker.io/docker/dockerfile-upstream:1.4.0-rc2
# syntax=docker.io/docker/dockerfile:1.4
FROM debian:stretch-slim

# PORTER_INIT
Expand Down

0 comments on commit ca09693

Please sign in to comment.