diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b3eb384c..a4812b75 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,7 +22,7 @@ jobs: - name: Setup Porter uses: getporter/gh-action@v0.1.3 with: - porter_version: v1.0.0-alpha.8 + porter_version: v1.0.0-alpha.12 - name: Set up Mage run: go run mage.go EnsureMage - name: Test @@ -48,4 +48,4 @@ jobs: if: ${{ github.event_name != 'pull_request' }} run: mage -v Publish env: - ENV: production + PORTER_ENV: production diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 89827000..f1871ce7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,6 +7,7 @@ * [Makefile explained](#makefile-explained) * [Run a test installation](#run-a-test-installation) * [Modify the porter agent](#modify-the-porter-agent) + * [Publish to another registry](#publish-to-another-registry) --- # New Contributor Guide @@ -63,7 +64,7 @@ These are targets that you won't usually run directly, other targets use them as * **DeleteTestCluster** deletes the KIND cluster named porter. * **Clean** deletes all data from the test cluster. * **CleanManual** removes all -* **CleanTests** removes any namespaces created by the test suite (where porter-test=true). +* **CleanTests** removes any namespaces created by the test suite (with label porter.sh/testdata=true). ## Modify the porter agent @@ -147,3 +148,14 @@ kubectl get pods -n test --wait # Now you can see the result in porter! porter logs hello -n operator ``` + +## Publish to Another Registry + +When working on the operator, it can be helpful to publish the operator's bundle to another registry instead of the default localhost:5000. +For example, when you are testing the operator on a real cluster instead of KinD. + +``` +export PORTER_ENV=custom # this can be anything but production or test +export PORTER_OPERATOR_REGISTRY=ghcr.io/getporter/test # Set this to a registry for which you have push access +mage publish +``` diff --git a/Dockerfile b/Dockerfile index 208c13ae..5e482ae3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile-upstream:1.4.0-rc2 # Build the manager binary FROM golang:1.17 as builder @@ -14,8 +14,8 @@ RUN --mount=type=cache,target=/go/pkg/mod \ # Copy the go source COPY main.go main.go -COPY api/ api/ -COPY controllers/ controllers/ +COPY --link api/ api/ +COPY --link controllers/ controllers/ # Build RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \ @@ -23,9 +23,9 @@ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM gcr.io/distroless/static WORKDIR /app -COPY --from=builder /workspace/manager . -USER 65532:65532 +COPY --from=builder --chown=65532.0 /workspace/manager . +USER 65532 ENTRYPOINT ["/app/manager"] diff --git a/PROJECT b/PROJECT index 8302250c..149b9b6e 100644 --- a/PROJECT +++ b/PROJECT @@ -12,6 +12,9 @@ resources: - crdVersion: v1 kind: PorterConfig version: v1 +- crdVersion: v1 + kind: AgentAction + version: v1 version: 3-alpha plugins: manifests.sdk.operatorframework.io/v2: {} diff --git a/api/v1/agent_types.go b/api/v1/agent_types.go new file mode 100644 index 00000000..a4e18374 --- /dev/null +++ b/api/v1/agent_types.go @@ -0,0 +1,40 @@ +package v1 + +// AgentPhase are valid status of a Porter agent job +// that is managing a change to a Porter resource. +type AgentPhase string + +const ( + // PhaseUnknown means that we don't know what porter is doing yet. + PhaseUnknown AgentPhase = "Unknown" + + // PhasePending means that Porter's execution is pending. + PhasePending AgentPhase = "Pending" + + // PhaseRunning indicates that Porter is running. + PhaseRunning AgentPhase = "Running" + + // PhaseSucceeded means that calling Porter succeeded. + PhaseSucceeded AgentPhase = "Succeeded" + + // PhaseFailed means that calling Porter failed. + PhaseFailed AgentPhase = "Failed" +) + +// AgentConditionType are valid conditions of a Porter agent job +// that is managing a change to a Porter resource. +type AgentConditionType string + +const ( + // ConditionScheduled means that the Porter agent has been scheduled. + ConditionScheduled AgentConditionType = "Scheduled" + + // ConditionStarted means that the Porter agent has started. + ConditionStarted AgentConditionType = "Started" + + // ConditionComplete means the Porter agent has completed successfully. + ConditionComplete AgentConditionType = "Completed" + + // ConditionFailed means the Porter agent failed. + ConditionFailed AgentConditionType = "Failed" +) diff --git a/api/v1/agentaction_types.go b/api/v1/agentaction_types.go new file mode 100644 index 00000000..ff196319 --- /dev/null +++ b/api/v1/agentaction_types.go @@ -0,0 +1,100 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AgentActionSpec defines the desired state of AgentAction +type AgentActionSpec struct { + // AgentConfig is the name of an AgentConfig to use instead of the AgentConfig defined at the namespace or system level. + // +optional + AgentConfig *corev1.LocalObjectReference `json:"agentConfig,omitempty"` + + // PorterConfig is the name of a PorterConfig to use instead of the PorterConfig defined at the namespace or system level. + PorterConfig *corev1.LocalObjectReference `json:"porterConfig,omitempty"` + + // Command to run inside the Porter Agent job. Defaults to running the agent. + Command []string `json:"command,omitempty"` + + // Args to pass to the Porter Agent job. This should be the porter command that you want to run. + Args []string `json:"args,omitempty"` + + // Files that should be present in the working directory where the command is run. + Files map[string][]byte `json:"files,omitempty"` + + // Env variables to set on the Porter Agent job. + Env []corev1.EnvVar `json:"env,omitempty"` + + // EnvFrom allows setting environment variables on the Porter Agent job, using secrets or config maps as the source. + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + + // VolumeMounts that should be defined on the Porter Agent job. + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + + // Volumes that should be defined on the Porter Agent job. + Volumes []corev1.Volume `json:"volumes,omitempty"` +} + +// AgentActionStatus defines the observed state of AgentAction +type AgentActionStatus struct { + // The last generation observed by the controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // The currently active job that is running the Porter Agent. + Job *corev1.LocalObjectReference `json:"job,omitempty"` + + // The current status of the agent. + // Possible values are: Unknown, Pending, Running, Succeeded, and Failed. + // +kubebuilder:validation:Type=string + Phase AgentPhase `json:"phase,omitempty"` + + // Conditions store a list of states that have been reached. + // Each condition refers to the status of the Job + // Possible conditions are: Scheduled, Started, Completed, and Failed + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AgentAction is the Schema for the agentactions API +type AgentAction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AgentActionSpec `json:"spec,omitempty"` + Status AgentActionStatus `json:"status,omitempty"` +} + +func (a *AgentAction) GetConditions() *[]metav1.Condition { + return &a.Status.Conditions +} + +// GetRetryLabelValue returns a value that is safe to use +// as a label value and represents the retry annotation used +// to trigger reconciliation. +func (a *AgentAction) GetRetryLabelValue() string { + return getRetryLabelValue(a.Annotations) +} + +// SetRetryAnnotation flags the resource to retry its last operation. +func (a *AgentAction) SetRetryAnnotation(retry string) { + if a.Annotations == nil { + a.Annotations = make(map[string]string, 1) + } + a.Annotations[AnnotationRetry] = retry +} + +// +kubebuilder:object:root=true + +// AgentActionList contains a list of AgentAction +type AgentActionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AgentAction `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AgentAction{}, &AgentActionList{}) +} diff --git a/api/v1/agentaction_types_test.go b/api/v1/agentaction_types_test.go new file mode 100644 index 00000000..e496308c --- /dev/null +++ b/api/v1/agentaction_types_test.go @@ -0,0 +1,13 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAgentAction_SetRetryAnnotation(t *testing.T) { + action := AgentAction{} + action.SetRetryAnnotation("retry-1") + assert.Equal(t, "retry-1", action.Annotations[AnnotationRetry]) +} diff --git a/api/v1/agentconfig_types.go b/api/v1/agentconfig_types.go index b0e45a47..e0699980 100644 --- a/api/v1/agentconfig_types.go +++ b/api/v1/agentconfig_types.go @@ -54,7 +54,7 @@ func (c AgentConfigSpec) GetPorterImage() string { // We don't use a mutable tag like latest, or canary because it's a bad practice that we don't want to encourage. // As we test out the operator with new versions of Porter, keep this value up-to-date so that the default // version is guaranteed to work. - version = "v1.0.0-alpha.8" + version = "v1.0.0-alpha.12" } repo := c.PorterRepository if repo == "" { diff --git a/api/v1/agentconfig_types_test.go b/api/v1/agentconfig_types_test.go index 9c604ff1..a6e5b993 100644 --- a/api/v1/agentconfig_types_test.go +++ b/api/v1/agentconfig_types_test.go @@ -12,7 +12,7 @@ import ( func TestAgentConfigSpec_GetPorterImage(t *testing.T) { t.Run("default", func(t *testing.T) { c := AgentConfigSpec{} - assert.Equal(t, "ghcr.io/getporter/porter-agent:v1.0.0-alpha.8", c.GetPorterImage()) + assert.Equal(t, "ghcr.io/getporter/porter-agent:v1.0.0-alpha.12", c.GetPorterImage()) }) t.Run("porter version set", func(t *testing.T) { @@ -23,7 +23,7 @@ func TestAgentConfigSpec_GetPorterImage(t *testing.T) { t.Run("porter repository set", func(t *testing.T) { // Test if someone has mirrored porter's agent to another registry c := AgentConfigSpec{PorterRepository: "localhost:5000/myporter"} - assert.Equal(t, "localhost:5000/myporter:v1.0.0-alpha.8", c.GetPorterImage()) + assert.Equal(t, "localhost:5000/myporter:v1.0.0-alpha.12", c.GetPorterImage()) }) t.Run("porter repository and version set", func(t *testing.T) { diff --git a/api/v1/const.go b/api/v1/const.go new file mode 100644 index 00000000..9401d68c --- /dev/null +++ b/api/v1/const.go @@ -0,0 +1,16 @@ +package v1 + +const ( + LabelJobType = Prefix + "jobType" + JobTypeAgent = "porter-agent" + JobTypeInstaller = "bundle-installer" + LabelSecretType = Prefix + "secretType" + SecretTypeConfig = "porter-config" + SecretTypeWorkdir = "workdir" + LabelManaged = Prefix + "managed" + LabelResourceKind = Prefix + "resourceKind" + LabelResourceName = Prefix + "resourceName" + LabelResourceGeneration = Prefix + "resourceGeneration" + LabelRetry = Prefix + "retry" + FinalizerName = Prefix + "finalizer" +) diff --git a/api/v1/installation_types.go b/api/v1/installation_types.go index be940ed4..b34bdd8b 100644 --- a/api/v1/installation_types.go +++ b/api/v1/installation_types.go @@ -1,13 +1,11 @@ package v1 import ( - "crypto/md5" - "encoding/hex" "encoding/json" "github.com/pkg/errors" "gopkg.in/yaml.v3" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -28,10 +26,10 @@ var _ yaml.Marshaler = InstallationSpec{} type InstallationSpec struct { // AgentConfig is the name of an AgentConfig to use instead of the AgentConfig defined at the namespace or system level. // +optional - AgentConfig v1.LocalObjectReference `json:"agentConfig,omitempty" yaml:"-"` + AgentConfig *corev1.LocalObjectReference `json:"agentConfig,omitempty" yaml:"-"` // PorterConfig is the name of a PorterConfig to use instead of the PorterConfig defined at the namespace or system level. - PorterConfig v1.LocalObjectReference `json:"porterConfig,omitempty" yaml:"-"` + PorterConfig *corev1.LocalObjectReference `json:"porterConfig,omitempty" yaml:"-"` // // These are fields from the Porter installation resource. @@ -111,68 +109,9 @@ func (in InstallationSpec) MarshalYAML() (interface{}, error) { // InstallationStatus defines the observed state of Installation type InstallationStatus struct { - // The last generation observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // The currently active job that is running Porter. - ActiveJob *v1.LocalObjectReference `json:"activeJob,omitempty"` - - // The current status of the installation - // Possible values are: Unknown, Pending, Running, Succeeded, and Failed. - // +kubebuilder:validation:Type=string - Phase InstallationPhase `json:"phase,omitempty"` - - // Conditions store a list of states that have been reached. - // Each condition refers to the status of the ActiveJob - // Possible conditions are: Scheduled, Started, Completed, and Failed - Conditions []metav1.Condition `json:"conditions,omitempty"` + PorterResourceStatus `json:",inline"` } -// Reset the installation status before Porter is run. -// This wipes out the status from any previous runs. -func (s *InstallationStatus) Initialize() { - s.Conditions = []metav1.Condition{} - s.Phase = PhaseUnknown - s.ActiveJob = nil -} - -// These are valid statuses for an Installation. -type InstallationPhase string - -const ( - // PhaseUnknown means that we don't know what porter is doing yet. - PhaseUnknown InstallationPhase = "Unknown" - - // PhasePending means that Porter's execution is pending. - PhasePending InstallationPhase = "Pending" - - // PhasePending indicates that Porter is running. - PhaseRunning InstallationPhase = "Running" - - // PhaseSucceeded means that calling Porter succeeded. - PhaseSucceeded InstallationPhase = "Succeeded" - - // PhaseFailed means that calling Porter failed. - PhaseFailed InstallationPhase = "Failed" -) - -// These are valid conditions of an Installation. -type InstallationConditionType string - -const ( - // ConditionScheduled means that the Porter run has been scheduled. - ConditionScheduled InstallationConditionType = "Scheduled" - - // ConditionStarted means that the Porter run has started. - ConditionStarted InstallationConditionType = "Started" - - // ConditionComplete means the Porter run has completed successfully. - ConditionComplete InstallationConditionType = "Completed" - - // ConditionFailed means the Porter run failed. - ConditionFailed InstallationConditionType = "Failed" -) - // +kubebuilder:object:root=true // +kubebuilder:subresource:status @@ -185,19 +124,27 @@ type Installation struct { Status InstallationStatus `json:"status,omitempty"` } +func (i *Installation) GetStatus() PorterResourceStatus { + return i.Status.PorterResourceStatus +} + +func (i *Installation) SetStatus(value PorterResourceStatus) { + i.Status.PorterResourceStatus = value +} + // GetRetryLabelValue returns a value that is safe to use // as a label value and represents the retry annotation used -// to trigger reconciliation. Annotations don't have limits on -// the value, but labels are restricted to alphanumeric and .-_ -// I am just hashing the annotation value here to avoid problems -// using it directly as a label value. -func (i Installation) GetRetryLabelValue() string { - retry := i.Annotations[AnnotationRetry] - if retry == "" { - return "" +// to trigger reconciliation. +func (i *Installation) GetRetryLabelValue() string { + return getRetryLabelValue(i.Annotations) +} + +// SetRetryAnnotation flags the resource to retry its last operation. +func (i *Installation) SetRetryAnnotation(retry string) { + if i.Annotations == nil { + i.Annotations = make(map[string]string, 1) } - sum := md5.Sum([]byte(retry)) - return hex.EncodeToString(sum[:]) + i.Annotations[AnnotationRetry] = retry } // +kubebuilder:object:root=true diff --git a/api/v1/installation_types_test.go b/api/v1/installation_types_test.go index 68c82172..70557cee 100644 --- a/api/v1/installation_types_test.go +++ b/api/v1/installation_types_test.go @@ -19,8 +19,8 @@ func TestInstallationSpec_ToPorterDocument(t *testing.T) { Name: "mybuns", Namespace: "dev", Bundle: OCIReferenceParts{ - Repository: "getporter/porter-hello", - Version: "0.1.0", + Repository: "ghcr.io/getporter/porter-hello", + Version: "0.1.1", }, Parameters: runtime.RawExtension{ Raw: []byte(`{"name":"Porter Operator"}`), @@ -35,11 +35,13 @@ func TestInstallationSpec_ToPorterDocument(t *testing.T) { func TestInstallationStatus_Initialize(t *testing.T) { s := &InstallationStatus{ - ObservedGeneration: 2, - ActiveJob: &corev1.LocalObjectReference{Name: "somejob"}, - Phase: PhaseSucceeded, - Conditions: []metav1.Condition{ - {Type: string(ConditionComplete), Status: metav1.ConditionTrue}, + PorterResourceStatus: PorterResourceStatus{ + ObservedGeneration: 2, + Action: &corev1.LocalObjectReference{Name: "something"}, + Phase: PhaseSucceeded, + Conditions: []metav1.Condition{ + {Type: string(ConditionComplete), Status: metav1.ConditionTrue}, + }, }, } @@ -47,6 +49,12 @@ func TestInstallationStatus_Initialize(t *testing.T) { assert.Equal(t, int64(2), s.ObservedGeneration, "ObservedGeneration should not be reset") assert.Empty(t, s.Conditions, "Conditions should be empty") - assert.Nil(t, s.ActiveJob, "ActiveJob should be nil") + assert.Nil(t, s.Action, "Active should be nil") assert.Equal(t, PhaseUnknown, s.Phase, "Phase should reset to Unknown") } + +func TestInstallation_SetRetryAnnotation(t *testing.T) { + inst := Installation{} + inst.SetRetryAnnotation("retry-1") + assert.Equal(t, "retry-1", inst.Annotations[AnnotationRetry]) +} diff --git a/api/v1/porter_resource.go b/api/v1/porter_resource.go new file mode 100644 index 00000000..e741dded --- /dev/null +++ b/api/v1/porter_resource.go @@ -0,0 +1,50 @@ +package v1 + +import ( + "crypto/md5" + "encoding/hex" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PorterResourceStatus struct { + // The last generation observed by the controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // The most recent action executed for the resource + Action *corev1.LocalObjectReference `json:"action,omitempty"` + + // The current status of the agent. + // Possible values are: Unknown, Pending, Running, Succeeded, and Failed. + // +kubebuilder:validation:Type=string + Phase AgentPhase `json:"phase,omitempty"` + + // Conditions store a list of states that have been reached. + // Each condition refers to the status of the ActiveJob + // Possible conditions are: Scheduled, Started, Completed, and Failed + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// Initialize resets the resource status before Porter is run. +// This wipes out the status from any previous runs. +func (s *PorterResourceStatus) Initialize() { + s.Conditions = []metav1.Condition{} + s.Phase = PhaseUnknown + s.Action = nil +} + +// GetRetryLabelValue returns a value that is safe to use +// as a label value and represents the retry annotation used +// to trigger reconciliation. Annotations don't have limits on +// the value, but labels are restricted to alphanumeric and .-_ +// I am just hashing the annotation value here to avoid problems +// using it directly as a label value. +func getRetryLabelValue(annotations map[string]string) string { + retry := annotations[AnnotationRetry] + if retry == "" { + return "" + } + sum := md5.Sum([]byte(retry)) + return hex.EncodeToString(sum[:]) +} diff --git a/api/v1/porter_resource_test.go b/api/v1/porter_resource_test.go new file mode 100644 index 00000000..ad75689a --- /dev/null +++ b/api/v1/porter_resource_test.go @@ -0,0 +1,19 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetRetryLabelValue(t *testing.T) { + annotations := map[string]string{ + AnnotationRetry: "123", + } + + assert.Equal(t, "202cb962ac59075b964b07152d234b70", getRetryLabelValue(annotations), "retry label value should be populated when the annotation is set") + + delete(annotations, AnnotationRetry) + + assert.Empty(t, getRetryLabelValue(annotations), "retry label value should be empty when no annotation is set") +} diff --git a/api/v1/testdata/installation.yaml b/api/v1/testdata/installation.yaml index 505c30bf..a3e2e23c 100644 --- a/api/v1/testdata/installation.yaml +++ b/api/v1/testdata/installation.yaml @@ -2,7 +2,7 @@ schemaVersion: 1.0.0 name: mybuns namespace: dev bundle: - repository: getporter/porter-hello - version: 0.1.0 + repository: ghcr.io/getporter/porter-hello + version: 0.1.1 parameters: name: Porter Operator diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 5c473900..b645ede5 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -11,6 +11,170 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentAction) DeepCopyInto(out *AgentAction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentAction. +func (in *AgentAction) DeepCopy() *AgentAction { + if in == nil { + return nil + } + out := new(AgentAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AgentAction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentActionList) DeepCopyInto(out *AgentActionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AgentAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentActionList. +func (in *AgentActionList) DeepCopy() *AgentActionList { + if in == nil { + return nil + } + out := new(AgentActionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AgentActionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentActionSpec) DeepCopyInto(out *AgentActionSpec) { + *out = *in + if in.AgentConfig != nil { + in, out := &in.AgentConfig, &out.AgentConfig + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.PorterConfig != nil { + in, out := &in.PorterConfig, &out.PorterConfig + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentActionSpec. +func (in *AgentActionSpec) DeepCopy() *AgentActionSpec { + if in == nil { + return nil + } + out := new(AgentActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentActionStatus) DeepCopyInto(out *AgentActionStatus) { + *out = *in + if in.Job != nil { + in, out := &in.Job, &out.Job + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentActionStatus. +func (in *AgentActionStatus) DeepCopy() *AgentActionStatus { + if in == nil { + return nil + } + out := new(AgentActionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AgentConfig) DeepCopyInto(out *AgentConfig) { *out = *in @@ -146,8 +310,16 @@ func (in *InstallationList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallationSpec) DeepCopyInto(out *InstallationSpec) { *out = *in - out.AgentConfig = in.AgentConfig - out.PorterConfig = in.PorterConfig + if in.AgentConfig != nil { + in, out := &in.AgentConfig, &out.AgentConfig + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.PorterConfig != nil { + in, out := &in.PorterConfig, &out.PorterConfig + *out = new(corev1.LocalObjectReference) + **out = **in + } out.Bundle = in.Bundle if in.Labels != nil { in, out := &in.Labels, &out.Labels @@ -182,18 +354,7 @@ func (in *InstallationSpec) DeepCopy() *InstallationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallationStatus) DeepCopyInto(out *InstallationStatus) { *out = *in - if in.ActiveJob != nil { - in, out := &in.ActiveJob, &out.ActiveJob - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.PorterResourceStatus.DeepCopyInto(&out.PorterResourceStatus) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallationStatus. @@ -369,6 +530,33 @@ func (in *PorterConfigSpec) DeepCopy() *PorterConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PorterResourceStatus) DeepCopyInto(out *PorterResourceStatus) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PorterResourceStatus. +func (in *PorterResourceStatus) DeepCopy() *PorterResourceStatus { + if in == nil { + return nil + } + out := new(PorterResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretsConfig) DeepCopyInto(out *SecretsConfig) { *out = *in diff --git a/config/crd/bases/porter.sh_agentactions.yaml b/config/crd/bases/porter.sh_agentactions.yaml new file mode 100644 index 00000000..09f7c321 --- /dev/null +++ b/config/crd/bases/porter.sh_agentactions.yaml @@ -0,0 +1,1785 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: agentactions.porter.sh +spec: + group: porter.sh + names: + kind: AgentAction + listKind: AgentActionList + plural: agentactions + singular: agentaction + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: AgentAction is the Schema for the agentactions API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentActionSpec defines the desired state of AgentAction + properties: + agentConfig: + description: AgentConfig is the name of an AgentConfig to use instead + of the AgentConfig defined at the namespace or system level. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + args: + description: Args to pass to the Porter Agent job. This should be + the porter command that you want to run. + items: + type: string + type: array + command: + description: Command to run inside the Porter Agent job. Defaults + to running the agent. + items: + type: string + type: array + env: + description: Env variables to set on the Porter Agent job. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom allows setting environment variables on the Porter + Agent job, using secrets or config maps as the source. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + files: + additionalProperties: + format: byte + type: string + description: Files that should be present in the working directory + where the command is run. + type: object + porterConfig: + description: PorterConfig is the name of a PorterConfig to use instead + of the PorterConfig defined at the namespace or system level. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeMounts: + description: VolumeMounts that should be defined on the Porter Agent + job. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes that should be defined on the Porter Agent job. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver (Alpha feature). The volume's + lifecycle is tied to the pod that defines it - it will be + created before the pod starts, and deleted when the pod is + removed. \n Use this if: a) the volume is only needed while + the pod runs, b) features of normal volumes like restoring + from snapshot or capacity tracking are needed, c) the storage + driver is specified through a storage class, and d) the storage + driver supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the connection + between this volume type and PersistentVolumeClaim). \n Use + PersistentVolumeClaim or one of the vendor-specific APIs for + volumes that persist for longer than the lifecycle of an individual + pod. \n Use CSI for light-weight local ephemeral volumes if + the CSI driver is meant to be used that way - see the documentation + of the driver for more information. \n A pod can use both + types of ephemeral volumes and persistent volumes at the same + time." + properties: + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) * An + existing custom resource that implements data + population (Alpha) In order to use custom resource + types that implement data population, the AnyVolumeDataSource + feature gate must be enabled. If the provisioner + or an external controller can support the specified + data source, it will create a new volume based + on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: AgentActionStatus defines the observed state of AgentAction + properties: + conditions: + description: 'Conditions store a list of states that have been reached. + Each condition refers to the status of the Job Possible conditions + are: Scheduled, Started, Completed, and Failed' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + job: + description: The currently active job that is running the Porter Agent. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + observedGeneration: + description: The last generation observed by the controller. + format: int64 + type: integer + phase: + description: 'The current status of the agent. Possible values are: + Unknown, Pending, Running, Succeeded, and Failed.' + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/porter.sh_agentconfigs.yaml b/config/crd/bases/porter.sh_agentconfigs.yaml index d671e60a..0ad7789e 100644 --- a/config/crd/bases/porter.sh_agentconfigs.yaml +++ b/config/crd/bases/porter.sh_agentconfigs.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: agentconfigs.porter.sh spec: @@ -35,8 +34,8 @@ spec: type: object spec: description: "AgentConfigSpec defines the configuration for the Porter - agent. \n SERIALIZATION NOTE: \tThe json serialization is for persisting - this to Kubernetes. The mapstructure tags is used internally for AgentConfigSpec.MergeConfig." + agent. \n SERIALIZATION NOTE: The json serialization is for persisting + this to Kubernetes. The mapstructure tags is used internally for AgentConfigSpec.MergeConfig." properties: installationServiceAccount: description: InstallationServiceAccount specifies a service account diff --git a/config/crd/bases/porter.sh_installations.yaml b/config/crd/bases/porter.sh_installations.yaml index 16a2a3e2..a4e37924 100644 --- a/config/crd/bases/porter.sh_installations.yaml +++ b/config/crd/bases/porter.sh_installations.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: installations.porter.sh spec: @@ -120,8 +119,8 @@ spec: status: description: InstallationStatus defines the observed state of Installation properties: - activeJob: - description: The currently active job that is running Porter. + action: + description: The most recent action executed for the resource properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -136,13 +135,12 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -205,8 +203,8 @@ spec: format: int64 type: integer phase: - description: 'The current status of the installation Possible values - are: Unknown, Pending, Running, Succeeded, and Failed.' + description: 'The current status of the agent. Possible values are: + Unknown, Pending, Running, Succeeded, and Failed.' type: string type: object type: object diff --git a/config/crd/bases/porter.sh_porterconfigs.yaml b/config/crd/bases/porter.sh_porterconfigs.yaml index 4d423cea..f7f11eb7 100644 --- a/config/crd/bases/porter.sh_porterconfigs.yaml +++ b/config/crd/bases/porter.sh_porterconfigs.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null name: porterconfigs.porter.sh spec: @@ -35,8 +34,8 @@ spec: type: object spec: description: "PorterConfigSpec defines the desired state of PorterConfig - \n SERIALIZATION NOTE: Use json to persist this resource to Kubernetes. - \ Use yaml to convert to Porter's representation of the resource. The + \n SERIALIZATION NOTE: Use json to persist this resource to Kubernetes. + Use yaml to convert to Porter's representation of the resource. The mapstructure tags are used internally for PorterConfigSpec.MergeConfig." properties: buildDriver: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 8fd0f4d4..0264186f 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/porter.sh_agentconfigs.yaml - bases/porter.sh_installations.yaml - bases/porter.sh_porterconfigs.yaml + - bases/porter.sh_agentactions.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -12,12 +13,16 @@ patchesStrategicMerge: # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_installations.yaml #- patches/webhook_in_porterconfigs.yaml +#- patches/webhook_in_credentialsets.yaml +#- patches/webhook_in_agentactions.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_installations.yaml #- patches/cainjection_in_porterconfigs.yaml +#- patches/cainjection_in_credentialsets.yaml +#- patches/cainjection_in_agentactions.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_agentactions.yaml b/config/crd/patches/cainjection_in_agentactions.yaml new file mode 100644 index 00000000..d25bbb49 --- /dev/null +++ b/config/crd/patches/cainjection_in_agentactions.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: agentactions.porter.sh diff --git a/config/crd/patches/webhook_in_agentactions.yaml b/config/crd/patches/webhook_in_agentactions.yaml new file mode 100644 index 00000000..a9de088e --- /dev/null +++ b/config/crd/patches/webhook_in_agentactions.yaml @@ -0,0 +1,14 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: agentactions.porter.sh +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/agentaction_editor_role.yaml b/config/rbac/agentaction_editor_role.yaml new file mode 100644 index 00000000..d87582c2 --- /dev/null +++ b/config/rbac/agentaction_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit agentactions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agentaction-editor-role +rules: +- apiGroups: + - porter.sh + resources: + - agentactions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - porter.sh + resources: + - agentactions/status + verbs: + - get diff --git a/config/rbac/agentaction_viewer_role.yaml b/config/rbac/agentaction_viewer_role.yaml new file mode 100644 index 00000000..49d98008 --- /dev/null +++ b/config/rbac/agentaction_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view agentactions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agentaction-viewer-role +rules: +- apiGroups: + - porter.sh + resources: + - agentactions + verbs: + - get + - list + - watch +- apiGroups: + - porter.sh + resources: + - agentactions/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e67604b0..e68fc7eb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,4 +1,3 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -42,6 +41,32 @@ rules: - patch - update - watch +- apiGroups: + - porter.sh + resources: + - agentactions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - porter.sh + resources: + - agentactions/finalizers + verbs: + - update +- apiGroups: + - porter.sh + resources: + - agentactions/status + verbs: + - get + - patch + - update - apiGroups: - porter.sh resources: diff --git a/config/samples/_v1_agentaction.yaml b/config/samples/_v1_agentaction.yaml new file mode 100644 index 00000000..8d257c46 --- /dev/null +++ b/config/samples/_v1_agentaction.yaml @@ -0,0 +1,9 @@ +apiVersion: porter.sh/v1 +kind: AgentAction +metadata: + name: agentaction-sample +spec: + args: ["installation", "apply", "installation.yaml"] + files: + # base64 encoded file contents + installation.yaml: c2NoZW1hVmVyc2lvbjogMS4wLjAKbmFtZXNwYWNlOiBvcGVyYXRvcgpuYW1lOiBoZWxsbwpidW5kbGU6CiAgcmVwb3NpdG9yeTogZ2hjci5pby9nZXRwb3J0ZXIvdGVzdC9wb3J0ZXItaGVsbG8KICB2ZXJzaW9uOiAwLjIuMApwYXJhbWV0ZXJzOgogIG5hbWU6IGxsYW1hcyAK diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 3b2e9779..cd84f2e3 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -3,4 +3,5 @@ resources: - _v1_installation.yaml - _v1_porterconfig.yaml - _v1_agentconfig.yaml +- _v1_agentaction.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/porter-hello.yaml b/config/samples/porter-hello.yaml index e4b4e65c..057d2765 100644 --- a/config/samples/porter-hello.yaml +++ b/config/samples/porter-hello.yaml @@ -9,5 +9,7 @@ spec: namespace: operator name: hello bundle: - repository: getporter/porter-hello - version: 0.1.1 + repository: ghcr.io/getporter/test/porter-hello + version: 0.2.0 + parameters: + name: llamas diff --git a/controllers/agentaction_controller.go b/controllers/agentaction_controller.go new file mode 100644 index 00000000..e72985ac --- /dev/null +++ b/controllers/agentaction_controller.go @@ -0,0 +1,704 @@ +package controllers + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/go-logr/logr" + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// +kubebuilder:rbac:groups=porter.sh,resources=agentconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=porter.sh,resources=porterconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=porter.sh,resources=agentactions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=porter.sh,resources=agentactions/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=porter.sh,resources=agentactions/finalizers,verbs=update +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete + +type AgentActionReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AgentActionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&porterv1.AgentAction{}, builder.WithPredicates(resourceChanged{})). + Owns(&batchv1.Job{}). + Complete(r) +} + +// Reconcile is called when the spec of an AgentAction is changed +// or a job associated with an agent is updated. +// Either schedule a job to handle a spec change, or update the AgentAction status in response to the job's state. +func (r *AgentActionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("agentaction", req.Name, "namespace", req.Namespace) + + // Retrieve the action + action := &porterv1.AgentAction{} + err := r.Get(ctx, req.NamespacedName, action) + if err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: false}, err + } + + log = log.WithValues("resourceVersion", action.ResourceVersion, "generation", action.Generation, "observedGeneration", action.Status.ObservedGeneration) + + if action.Generation != action.Status.ObservedGeneration { + log.V(Log5Trace).Info("Reconciling agent action because its spec changed") + } else { + log.V(Log5Trace).Info("Reconciling agent action") + } + + // Check if we have scheduled a job for this change yet + job, handled, err := r.isHandled(ctx, log, action) + if err != nil { + return ctrl.Result{}, err + } + + // Sync the installation status from the job + if err = r.syncStatus(ctx, log, action, job); err != nil { + return ctrl.Result{}, err + } + + // Check if we have already handled any spec changes + if handled { + // Nothing for us to do at this point + log.V(Log4Debug).Info("Reconciliation complete: A porter agent has already been dispatched.") + return ctrl.Result{}, nil + } + + // Run a porter agent + err = r.runPorter(ctx, log, action) + if err != nil { + return ctrl.Result{}, err + } + + log.V(Log4Debug).Info("Reconciliation complete: A porter agent has been dispatched.") + return ctrl.Result{}, nil +} + +// Determines if this generation of the AgentAction has being processed by Porter. +func (r *AgentActionReconciler) isHandled(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (*batchv1.Job, bool, error) { + // Retrieve the Job running the porter action + // Only query by generation, not revision, since rev can be bumped when the status is updated, or a label changed + jobLabels := r.getAgentJobLabels(action) + + results := batchv1.JobList{} + err := r.List(ctx, &results, client.InNamespace(action.Namespace), client.MatchingLabels(jobLabels)) + if err != nil { + return nil, false, errors.Wrapf(err, "could not query for active porter jobs") + } + + if len(results.Items) == 0 { + log.V(Log4Debug).Info("No existing job was found") + return nil, false, nil + } + + job := results.Items[0] + log.V(Log4Debug).Info("Found existing job", "job", job.Name) + return &job, true, nil +} + +// Check the status of the porter-agent job and use that to update the AgentAction status +func (r *AgentActionReconciler) syncStatus(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, job *batchv1.Job) error { + origStatus := action.Status + + r.applyJobToStatus(log, action, job) + + if !reflect.DeepEqual(origStatus, action.Status) { + return r.saveStatus(ctx, log, action) + } + + return nil +} + +// Only update the status with a PATCH, don't clobber the entire resource +func (r *AgentActionReconciler) saveStatus(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) error { + log.V(Log5Trace).Info("Patching agent action status") + return PatchObjectWithRetry(ctx, log, r.Client, r.Client.Status().Patch, action, func() client.Object { + return &porterv1.AgentAction{} + }) +} + +// Takes a job and uses it to calculate the new status for the agent action +// Returns whether or not any changes were made +func (r *AgentActionReconciler) applyJobToStatus(log logr.Logger, action *porterv1.AgentAction, job *batchv1.Job) { + // Recalculate all conditions based on what we currently observe + action.Status.ObservedGeneration = action.Generation + action.Status.Phase = porterv1.PhaseUnknown + + if job == nil { + action.Status.Job = nil + action.Status.Conditions = nil + log.V(Log5Trace).Info("Cleared status because there is no current job") + } else { + action.Status.Job = &corev1.LocalObjectReference{Name: job.Name} + setCondition(log, action, porterv1.ConditionScheduled, "JobCreated") + action.Status.Phase = porterv1.PhasePending + + if job.Status.Active+job.Status.Failed+job.Status.Succeeded > 0 { + action.Status.Phase = porterv1.PhaseRunning + setCondition(log, action, porterv1.ConditionStarted, "JobStarted") + } + + for _, condition := range job.Status.Conditions { + switch condition.Type { + case batchv1.JobComplete: + action.Status.Phase = porterv1.PhaseSucceeded + setCondition(log, action, porterv1.ConditionComplete, "JobCompleted") + break + case batchv1.JobFailed: + action.Status.Phase = porterv1.PhaseFailed + setCondition(log, action, porterv1.ConditionFailed, "JobFailed") + break + } + } + } +} + +// Create a job that runs the specified porter command in a job +func (r *AgentActionReconciler) runPorter(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) error { + log.V(Log5Trace).Info("Porter agent requested", "namespace", action.Namespace, "action", action.Name) + + agentCfg, err := r.resolveAgentConfig(ctx, log, action) + if err != nil { + return err + } + + porterCfg, err := r.resolvePorterConfig(ctx, log, action) + if err != nil { + return err + } + + pvc, err := r.createAgentVolume(ctx, log, action, agentCfg) + if err != nil { + return err + } + + configSecret, err := r.createConfigSecret(ctx, log, action, porterCfg) + if err != nil { + return err + } + + workdirSecret, err := r.createWorkdirSecret(ctx, log, action) + if err != nil { + return err + } + + _, err = r.createAgentJob(ctx, log, action, agentCfg, pvc, configSecret, workdirSecret) + if err != nil { + return err + } + + return nil +} + +// get the labels that are used to match agent resources, merging custom labels defined on the action. +func (r *AgentActionReconciler) getSharedAgentLabels(action *porterv1.AgentAction) map[string]string { + labels := map[string]string{ + porterv1.LabelManaged: "true", + porterv1.LabelResourceKind: action.TypeMeta.Kind, + porterv1.LabelResourceName: action.Name, + porterv1.LabelResourceGeneration: fmt.Sprintf("%d", action.Generation), + porterv1.LabelRetry: action.GetRetryLabelValue(), + } + for k, v := range action.Labels { + // if the action has labels that conflict with existing labels, ignore them + if _, ok := labels[k]; ok { + continue + } + labels[k] = v + } + return labels +} + +func (r *AgentActionReconciler) createAgentVolume(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec) (*corev1.PersistentVolumeClaim, error) { + labels := r.getSharedAgentLabels(action) + + var results corev1.PersistentVolumeClaimList + if err := r.List(ctx, &results, client.MatchingLabels(labels)); err != nil { + return nil, errors.Wrap(err, "error checking for an existing agent volume (pvc)") + } + if len(results.Items) > 0 { + return &results.Items[0], nil + } + + // Create a volume to share data between porter and the invocation image + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: action.Name + "-", + Namespace: action.Namespace, + Labels: labels, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: agentCfg.GetVolumeSize(), + }, + }, + }, + } + + if err := r.Create(ctx, pvc); err != nil { + return nil, errors.Wrap(err, "error creating the agent volume (pvc)") + } + + log.V(Log4Debug).Info("Created PersistentVolumeClaim for the Porter agent", "name", pvc.Name) + return pvc, nil +} + +// creates a secret for the porter configuration directory +func (r *AgentActionReconciler) createConfigSecret(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, porterCfg porterv1.PorterConfigSpec) (*corev1.Secret, error) { + labels := r.getSharedAgentLabels(action) + labels[porterv1.LabelSecretType] = porterv1.SecretTypeConfig + + var results corev1.SecretList + if err := r.List(ctx, &results, client.MatchingLabels(labels)); err != nil { + return nil, errors.Wrap(err, "error checking for a existing config secret") + } + + if len(results.Items) > 0 { + return &results.Items[0], nil + } + + // Create a secret with all the files that should be copied into the porter config directory + // * porter config file (~/.porter/config.json) + porterCfgB, err := porterCfg.ToPorterDocument() + if err != nil { + return nil, errors.Wrap(err, "error marshaling the porter config.json file") + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: action.Name + "-", + Namespace: action.Namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Immutable: pointer.BoolPtr(true), + Data: map[string][]byte{ + "config.yaml": porterCfgB, + }, + } + + if err = r.Create(ctx, secret); err != nil { + return nil, errors.Wrap(err, "error creating the porter config secret") + } + + log.V(Log4Debug).Info("Created secret for the porter config", "name", secret.Name) + return secret, nil +} + +// creates a secret for the porter configuration directory +func (r *AgentActionReconciler) createWorkdirSecret(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (*corev1.Secret, error) { + labels := r.getSharedAgentLabels(action) + labels[porterv1.LabelSecretType] = porterv1.SecretTypeWorkdir + + var results corev1.SecretList + if err := r.List(ctx, &results, client.MatchingLabels(labels)); err != nil { + return nil, errors.Wrap(err, "error checking for a existing workdir secret") + } + + if len(results.Items) > 0 { + return &results.Items[0], nil + } + + // Create a secret with all the files that should be copied into the agent's working directory + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: action.Name + "-", + Namespace: action.Namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Immutable: pointer.BoolPtr(true), + Data: action.Spec.Files, + } + + if err := r.Create(ctx, secret); err != nil { + return nil, errors.Wrap(err, "error creating the porter workdir secret") + } + + log.V(Log4Debug).Info("Created secret for the porter workdir", "name", secret.Name) + return secret, nil +} + +func (r *AgentActionReconciler) getAgentJobLabels(action *porterv1.AgentAction) map[string]string { + labels := r.getSharedAgentLabels(action) + labels[porterv1.LabelJobType] = porterv1.JobTypeAgent + return labels +} + +func (r *AgentActionReconciler) createAgentJob(ctx context.Context, log logr.Logger, + action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec, + pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret) (batchv1.Job, error) { + + // not checking for an existing job because that happens earlier during reconcile + + labels := r.getAgentJobLabels(action) + env, envFrom := r.getAgentEnv(action, agentCfg, pvc) + volumes, volumeMounts := r.getAgentVolumes(action, pvc, configSecret, workdirSecret) + + porterJob := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: action.Name + "-", + Namespace: action.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: action.APIVersion, + Kind: action.Kind, + Name: action.Name, + UID: action.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: batchv1.JobSpec{ + Completions: pointer.Int32Ptr(1), + BackoffLimit: pointer.Int32Ptr(0), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: action.Name + "-", + Namespace: action.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "porter-agent", + Image: agentCfg.GetPorterImage(), + ImagePullPolicy: agentCfg.GetPullPolicy(), + Command: action.Spec.Command, + Args: action.Spec.Args, + Env: env, + EnvFrom: envFrom, + VolumeMounts: volumeMounts, + WorkingDir: "/porter-workdir", + }, + }, + Volumes: volumes, + RestartPolicy: "Never", // TODO: Make the retry policy configurable on the Installation + ServiceAccountName: agentCfg.ServiceAccount, + ImagePullSecrets: nil, // TODO: Make pulling from a private registry possible + SecurityContext: &corev1.PodSecurityContext{ + // Run as the well-known nonroot user that Porter uses for the invocation image and the agent + RunAsUser: pointer.Int64Ptr(65532), + // Porter builds the bundles with the root group having the same permissions as the owner + // So make sure that we are running as the root group + RunAsGroup: pointer.Int64Ptr(0), + FSGroup: pointer.Int64Ptr(0), + }, + }, + }, + }, + } + + if err := r.Create(ctx, &porterJob); err != nil { + return batchv1.Job{}, errors.Wrap(err, "error creating Porter agent job") + } + + log.V(Log4Debug).Info("Created Job for the Porter agent", "name", porterJob.Name) + return porterJob, nil +} + +func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (porterv1.AgentConfigSpec, error) { + log.V(Log5Trace).Info("Resolving porter agent configuration") + + logConfig := func(level string, config *porterv1.AgentConfig) { + if config == nil || config.Name == "" { + return + } + + log.V(Log4Debug).Info("Found porter agent configuration", + "level", level, + "namespace", config.Namespace, + "name", config.Name) + } + + // Read agent configuration defined at the system level + systemCfg := &porterv1.AgentConfig{} + err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: operatorNamespace}, systemCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") + } + logConfig("system", systemCfg) + + // Read agent configuration defined at the namespace level + nsCfg := &porterv1.AgentConfig{} + err = r.Get(ctx, types.NamespacedName{Name: "default", Namespace: action.Namespace}, nsCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve namespace level porter agent configuration") + } + logConfig("namespace", nsCfg) + + // Read agent configuration override + instCfg := &porterv1.AgentConfig{} + if action.Spec.AgentConfig != nil { + err = r.Get(ctx, types.NamespacedName{Name: action.Spec.AgentConfig.Name, Namespace: action.Namespace}, instCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.AgentConfigSpec{}, errors.Wrapf(err, "cannot retrieve agent configuration %s specified by the agent action", action.Spec.AgentConfig.Name) + } + logConfig("instance", instCfg) + } + + // Apply overrides + base := &systemCfg.Spec + cfg, err := base.MergeConfig(nsCfg.Spec, instCfg.Spec) + if err != nil { + return porterv1.AgentConfigSpec{}, err + } + + log.V(Log4Debug).Info("resolved porter agent configuration", + "porterImage", cfg.GetPorterImage(), + "pullPolicy", cfg.GetPullPolicy(), + "serviceAccount", cfg.ServiceAccount, + "volumeSize", cfg.GetVolumeSize(), + "installationServiceAccount", cfg.InstallationServiceAccount, + ) + return cfg, nil +} + +func (r *AgentActionReconciler) resolvePorterConfig(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (porterv1.PorterConfigSpec, error) { + log.V(Log5Trace).Info("Resolving porter configuration file") + + logConfig := func(level string, config *porterv1.PorterConfig) { + if config == nil || config.Name == "" { + return + } + log.V(Log4Debug).Info("Found porter config", + "level", level, + "namespace", config.Namespace, + "name", config.Name) + } + + // Provide a safe default config in case nothing is defined anywhere + defaultCfg := porterv1.PorterConfigSpec{ + DefaultStorage: pointer.StringPtr("in-cluster-mongodb"), + DefaultSecretsPlugin: pointer.StringPtr("kubernetes.secrets"), + Storage: []porterv1.StorageConfig{ + {PluginConfig: porterv1.PluginConfig{ + Name: "in-cluster-mongodb", + PluginSubKey: "mongodb", + Config: runtime.RawExtension{Raw: []byte(`{"url":"mongodb://mongodb.porter-operator-system.svc.cluster.local"}`)}, + }}, + }, + } + + // Read agent configuration defined at the system level + systemCfg := &porterv1.PorterConfig{} + err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: operatorNamespace}, systemCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.PorterConfigSpec{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") + } + logConfig("system", systemCfg) + + // Read agent configuration defined at the namespace level + nsCfg := &porterv1.PorterConfig{} + err = r.Get(ctx, types.NamespacedName{Name: "default", Namespace: action.Namespace}, nsCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.PorterConfigSpec{}, errors.Wrap(err, "cannot retrieve namespace level porter agent configuration") + } + logConfig("namespace", nsCfg) + + // Read agent configuration defines on the installation + instCfg := &porterv1.PorterConfig{} + if action.Spec.PorterConfig != nil { + err = r.Get(ctx, types.NamespacedName{Name: action.Spec.PorterConfig.Name, Namespace: action.Namespace}, instCfg) + if err != nil && !apierrors.IsNotFound(err) { + return porterv1.PorterConfigSpec{}, errors.Wrapf(err, "cannot retrieve agent configuration %s specified by the agent action", action.Spec.AgentConfig.Name) + } + logConfig("instance", instCfg) + } + + // Resolve final configuration + // We don't log the final config because we haven't yet added the feature to enable not having sensitive data in porter's config files + base := &defaultCfg + cfg, err := base.MergeConfig(systemCfg.Spec, nsCfg.Spec, instCfg.Spec) + if err != nil { + return porterv1.PorterConfigSpec{}, err + } + + return cfg, nil +} + +func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec, pvc *corev1.PersistentVolumeClaim) ([]corev1.EnvVar, []corev1.EnvFromSource) { + sharedLabels := r.getSharedAgentLabels(action) + + env := []corev1.EnvVar{ + { + Name: "PORTER_RUNTIME_DRIVER", + Value: "kubernetes", + }, + // Configuration for the Kubernetes Driver + { + Name: "KUBE_NAMESPACE", + Value: action.Namespace, + }, + { + Name: "IN_CLUSTER", + Value: "true", + }, + { + Name: "LABELS", + Value: r.getFormattedInstallerLabels(sharedLabels), + }, + { + Name: "JOB_VOLUME_NAME", + Value: pvc.Name, + }, + { + Name: "JOB_VOLUME_PATH", + Value: "/porter-shared", + }, + { + Name: "CLEANUP_JOBS", + Value: "false", + }, + { + Name: "SERVICE_ACCOUNT", + Value: agentCfg.InstallationServiceAccount, + }, + { + Name: "AFFINITY_MATCH_LABELS", + Value: r.getFormattedAffinityLabels(action), + }, + } + + for _, e := range action.Spec.Env { + env = append(env, e) + } + + envFrom := []corev1.EnvFromSource{ + // Environment variables for the plugins + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "porter-env", + }, + Optional: pointer.BoolPtr(true), + }, + }, + } + + for _, e := range action.Spec.EnvFrom { + envFrom = append(envFrom, e) + } + + return env, envFrom +} + +func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret) ([]corev1.Volume, []corev1.VolumeMount) { + volumes := []corev1.Volume{ + { + Name: "porter-shared", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + { + Name: "porter-config", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: configSecret.Name, + Optional: pointer.BoolPtr(false), + }, + }, + }, + { + Name: "porter-workdir", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: workdirSecret.Name, + Optional: pointer.BoolPtr(false), + }, + }, + }, + } + for _, volume := range action.Spec.Volumes { + volumes = append(volumes, volume) + } + + volumeMounts := []corev1.VolumeMount{ + { + Name: "porter-shared", + MountPath: "/porter-shared", + }, + { + Name: "porter-config", + MountPath: "/porter-config", + }, + { + Name: "porter-workdir", + MountPath: "/porter-workdir", + }, + } + for _, mount := range action.Spec.VolumeMounts { + volumeMounts = append(volumeMounts, mount) + } + + return volumes, volumeMounts +} + +func (r *AgentActionReconciler) getFormattedInstallerLabels(labels map[string]string) string { + // represent the shared labels that we are applying to all the things in a way that porter can accept on the command line + // These labels are added to the invocation image and should be sorted consistently + labels[porterv1.LabelJobType] = porterv1.JobTypeInstaller + formattedLabels := make([]string, 0, len(labels)) + for k, v := range labels { + formattedLabels = append(formattedLabels, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(formattedLabels) + return strings.Join(formattedLabels, " ") +} + +func (r *AgentActionReconciler) getFormattedAffinityLabels(action *porterv1.AgentAction) string { + // These labels are used by the kubernetes driver to ensure that the invocation image is scheduled + // on the same node as the agent + return fmt.Sprintf("%s=%s %s=%s %s=%d %s=%s", + porterv1.LabelResourceKind, action.Kind, + porterv1.LabelResourceName, action.Name, + porterv1.LabelResourceGeneration, action.Generation, + porterv1.LabelRetry, action.GetRetryLabelValue()) +} + +func setCondition(log logr.Logger, action *porterv1.AgentAction, condType porterv1.AgentConditionType, reason string) bool { + if apimeta.IsStatusConditionTrue(action.Status.Conditions, string(condType)) { + return false + } + + log.V(Log4Debug).Info("Setting condition", "condition", condType, "reason", reason) + apimeta.SetStatusCondition(&action.Status.Conditions, metav1.Condition{ + Type: string(condType), + Reason: reason, + Status: metav1.ConditionTrue, + ObservedGeneration: action.Generation, + }) + return true +} diff --git a/controllers/agentaction_controller_test.go b/controllers/agentaction_controller_test.go new file mode 100644 index 00000000..5eea02d1 --- /dev/null +++ b/controllers/agentaction_controller_test.go @@ -0,0 +1,532 @@ +package controllers + +import ( + "context" + "fmt" + "testing" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestPorterResourceStatus_ApplyAgentAction(t *testing.T) { + tests := []struct { + name string + action *porterv1.AgentAction + resource porterResource + wantStatus porterv1.PorterResourceStatus + }{ + { + name: "no action", + resource: &porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Phase: porterv1.PhaseUnknown, + }, + }, + { + name: "action created", + resource: &porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + action: &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{Name: "myaction"}, + Status: porterv1.AgentActionStatus{ + Phase: porterv1.PhasePending, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + }, + }}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Action: &corev1.LocalObjectReference{Name: "myaction"}, + Phase: porterv1.PhasePending, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + }}, + }, + {name: "action started", + resource: &porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + action: &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{Name: "myaction"}, + Status: porterv1.AgentActionStatus{ + Phase: porterv1.PhaseRunning, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + }, + }}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Action: &corev1.LocalObjectReference{Name: "myaction"}, + Phase: porterv1.PhaseRunning, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + }}, + }, + {name: "action succeeded", + resource: &porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + action: &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{Name: "myaction"}, + Status: porterv1.AgentActionStatus{ + Phase: porterv1.PhaseSucceeded, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}, + }, + }}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Action: &corev1.LocalObjectReference{Name: "myaction"}, + Phase: porterv1.PhaseSucceeded, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}, + }}, + }, + {name: "action failed", + resource: &porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + action: &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{Name: "myaction"}, + Status: porterv1.AgentActionStatus{ + Phase: porterv1.PhaseFailed, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}, + }}}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Action: &corev1.LocalObjectReference{Name: "myaction"}, + Phase: porterv1.PhaseFailed, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}, + }}, + }, + {name: "update resets status", + resource: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: porterv1.InstallationStatus{PorterResourceStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + Action: nil, + Phase: porterv1.PhaseFailed, + Conditions: []metav1.Condition{ + {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, + {Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}, + }}}}, + wantStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 2, + Action: nil, + Phase: porterv1.PhaseUnknown, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + applyAgentAction(logr.Discard(), tt.resource, tt.action) + + gotStatus := tt.resource.GetStatus() + assert.Equal(t, tt.wantStatus.Phase, gotStatus.Phase, "incorrect Phase") + assert.Equal(t, tt.wantStatus.ObservedGeneration, gotStatus.ObservedGeneration, "incorrect ObservedGeneration") + assert.Equal(t, tt.wantStatus.Action, gotStatus.Action, "incorrect Action") + + assert.Len(t, gotStatus.Conditions, len(tt.wantStatus.Conditions), "incorrect number of Conditions") + for _, cond := range tt.wantStatus.Conditions { + assert.True(t, apimeta.IsStatusConditionPresentAndEqual(gotStatus.Conditions, cond.Type, cond.Status), "expected condition %s to be %s", cond.Type, cond.Status) + } + }) + } +} + +func TestAgentActionReconciler_Reconcile(t *testing.T) { + // long test is long + // Run through a full resource lifecycle: create, update, delete + ctx := context.Background() + + namespace := "test" + name := "mybuns-install" + testdata := []client.Object{ + &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Generation: 1}}, + } + controller := setupAgentActionController(testdata...) + + var action porterv1.AgentAction + triggerReconcile := func() { + fullname := types.NamespacedName{Namespace: namespace, Name: name} + key := client.ObjectKey{Namespace: namespace, Name: name} + + request := controllerruntime.Request{ + NamespacedName: fullname, + } + result, err := controller.Reconcile(ctx, request) + require.NoError(t, err) + require.True(t, result.IsZero()) + + var updatedAction porterv1.AgentAction + if err := controller.Get(ctx, key, &updatedAction); err == nil { + action = updatedAction + } + } + + triggerReconcile() + + // Verify the action was picked up and the status initialized + assert.Equal(t, porterv1.PhaseUnknown, action.Status.Phase, "New resources should be initialized to Phase: Unknown") + + triggerReconcile() + + // Verify a job has been scheduled + var jobs batchv1.JobList + require.NoError(t, controller.List(ctx, &jobs)) + require.Len(t, jobs.Items, 1) + job := jobs.Items[0] + + require.NotNil(t, action.Status.Job, "expected ActiveJob to be set") + assert.Equal(t, job.Name, action.Status.Job.Name, "expected ActiveJob to contain the job name") + assert.Equal(t, porterv1.PhasePending, action.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(action.Status.Conditions, string(porterv1.ConditionScheduled))) + + // Start the job + job.Status.Active = 1 + require.NoError(t, controller.Status().Update(ctx, &job)) + + triggerReconcile() + + // Verify that the action status has the job + require.NotNil(t, action.Status.Job, "expected Job to be set") + assert.Equal(t, job.Name, action.Status.Job.Name, "expected Job to contain the job name") + assert.Equal(t, porterv1.PhaseRunning, action.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(action.Status.Conditions, string(porterv1.ConditionStarted))) + + // Complete the job + job.Status.Active = 0 + job.Status.Succeeded = 1 + job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &job)) + + triggerReconcile() + + // Verify that the action status shows the job is done + require.NotNil(t, action.Status.Job, "expected Job to still be set") + assert.Equal(t, porterv1.PhaseSucceeded, action.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(action.Status.Conditions, string(porterv1.ConditionComplete))) + + // Fail the job + job.Status.Active = 0 + job.Status.Succeeded = 0 + job.Status.Failed = 1 + job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobFailed, Status: corev1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &job)) + + triggerReconcile() + + // Verify that the action status shows the job is failed + require.NotNil(t, action.Status.Job, "expected Job to still be set") + assert.Equal(t, porterv1.PhaseFailed, action.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(action.Status.Conditions, string(porterv1.ConditionFailed))) + + // Edit the action spec + action.Generation = 2 + require.NoError(t, controller.Update(ctx, &action)) + + triggerReconcile() + + // Verify that the action status was re-initialized + assert.Equal(t, int64(2), action.Status.ObservedGeneration) + assert.Equal(t, porterv1.PhaseUnknown, action.Status.Phase, "New resources should be initialized to Phase: Unknown") + assert.Empty(t, action.Status.Conditions, "Conditions should have been reset") + + // Delete the action + controller.Delete(ctx, &action) + + // Verify that reconcile doesn't error out after it's deleted + triggerReconcile() +} + +func TestAgentActionReconciler_createAgentVolume(t *testing.T) { + controller := setupAgentActionController() + + action := &porterv1.AgentAction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentAction", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "porter-hello", + Generation: 1, + ResourceVersion: "123", + UID: "random-uid", + Labels: map[string]string{ + "testLabel": "abc123", + }, + }, + } + agentCfg := porterv1.AgentConfigSpec{ + VolumeSize: "128Mi", + PorterRepository: "getporter/custom-agent", + PorterVersion: "v1.0.0", + PullPolicy: "Always", + ServiceAccount: "porteraccount", + InstallationServiceAccount: "installeraccount", + } + pvc, err := controller.createAgentVolume(context.Background(), logr.Discard(), action, agentCfg) + require.NoError(t, err) + + // Verify the pvc properties + assert.Equal(t, "porter-hello-", pvc.GenerateName, "incorrect pvc name") + assert.Equal(t, action.Namespace, pvc.Namespace, "incorrect pvc namespace") + assert.Equal(t, []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, pvc.Spec.AccessModes, "incorrect pvc access modes") + assert.Equal(t, pvc.Spec.Resources.Requests[corev1.ResourceStorage], resource.MustParse("128Mi")) + assertSharedAgentLabels(t, pvc.Labels) + assertContains(t, pvc.Labels, "testLabel", "abc123", "incorrect label") +} + +func TestAgentActionReconciler_createConfigSecret(t *testing.T) { + controller := setupAgentActionController() + + action := &porterv1.AgentAction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentAction", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "porter-hello", + Generation: 1, + ResourceVersion: "123", + UID: "random-uid", + Labels: map[string]string{ + "testLabel": "abc123", + }, + }, + } + porterCfg := porterv1.PorterConfigSpec{} + secret, err := controller.createConfigSecret(context.Background(), logr.Discard(), action, porterCfg) + require.NoError(t, err) + + // Verify the secret properties + assert.Equal(t, "porter-hello-", secret.GenerateName, "incorrect secret name") + assert.Equal(t, action.Namespace, secret.Namespace, "incorrect secret namespace") + assert.Equal(t, corev1.SecretTypeOpaque, secret.Type, "expected the secret to be of type Opaque") + assert.Equal(t, pointer.BoolPtr(true), secret.Immutable, "expected the secret to be immutable") + assert.Contains(t, secret.Data, "config.yaml", "expected the secret to have config.yaml") + assertSharedAgentLabels(t, secret.Labels) + assertContains(t, secret.Labels, porterv1.LabelSecretType, porterv1.SecretTypeConfig, "incorrect label") + assertContains(t, secret.Labels, "testLabel", "abc123", "incorrect label") +} + +func TestAgentActionReconciler_createWorkdirSecret(t *testing.T) { + controller := setupAgentActionController() + + action := &porterv1.AgentAction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentAction", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "porter-hello", + Generation: 1, + ResourceVersion: "123", + UID: "random-uid", + Labels: map[string]string{ + "testLabel": "abc123", + }, + }, + Spec: porterv1.AgentActionSpec{ + Files: map[string][]byte{ + "installation.yaml": []byte(`{}`), + }, + }, + } + secret, err := controller.createWorkdirSecret(context.Background(), logr.Discard(), action) + require.NoError(t, err) + + // Verify the secret properties + assert.Equal(t, "porter-hello-", secret.GenerateName, "incorrect secret name") + assert.Equal(t, action.Namespace, secret.Namespace, "incorrect secret namespace") + assert.Equal(t, corev1.SecretTypeOpaque, secret.Type, "expected the secret to be of type Opaque") + assert.Equal(t, pointer.BoolPtr(true), secret.Immutable, "expected the secret to be immutable") + assert.Contains(t, secret.Data, "installation.yaml", "expected the secret to have config.yaml") + assertSharedAgentLabels(t, secret.Labels) + assertContains(t, secret.Labels, porterv1.LabelSecretType, porterv1.SecretTypeWorkdir, "incorrect label") + assertContains(t, secret.Labels, "testLabel", "abc123", "incorrect label") +} + +func TestAgentActionReconciler_createAgentJob(t *testing.T) { + controller := setupAgentActionController() + + action := &porterv1.AgentAction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentAction", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "porter-hello", + Generation: 1, + ResourceVersion: "123", + UID: "random-uid", + Labels: map[string]string{ + "testLabel": "abc123", + }, + }, + Spec: porterv1.AgentActionSpec{ + Args: []string{"installation", "apply", "installation.yaml"}, + }, + } + agentCfg := porterv1.AgentConfigSpec{ + VolumeSize: "128Mi", + PorterRepository: "getporter/custom-agent", + PorterVersion: "v1.0.0", + PullPolicy: "Always", + ServiceAccount: "porteraccount", + InstallationServiceAccount: "installeraccount", + } + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "mypvc"}} + configSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}} + workDirSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}} + job, err := controller.createAgentJob(context.Background(), logr.Discard(), action, agentCfg, pvc, configSecret, workDirSecret) + require.NoError(t, err) + + // Verify the job properties + wantName := "porter-hello-" + assert.Equal(t, wantName, job.GenerateName, "incorrect job name") + assert.Equal(t, action.Namespace, job.Namespace, "incorrect job namespace") + assert.Len(t, job.OwnerReferences, 1, "expected the job to have an owner reference") + wantOwnerRef := metav1.OwnerReference{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentAction", + Name: "porter-hello", + UID: "random-uid", + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + } + assert.Equal(t, wantOwnerRef, job.OwnerReferences[0], "incorrect owner reference") + assertSharedAgentLabels(t, job.Labels) + assertContains(t, job.Labels, porterv1.LabelJobType, porterv1.JobTypeAgent, "incorrect label") + assertContains(t, job.Labels, "testLabel", "abc123", "incorrect label") + assert.Equal(t, pointer.Int32Ptr(1), job.Spec.Completions, "incorrect job completions") + assert.Equal(t, pointer.Int32Ptr(0), job.Spec.BackoffLimit, "incorrect job back off limit") + + // Verify the job pod template + podTemplate := job.Spec.Template + assert.Equal(t, wantName, podTemplate.GenerateName, "incorrect pod generate name") + assert.Equal(t, "test", podTemplate.Namespace, "incorrect pod namespace") + assertSharedAgentLabels(t, podTemplate.Labels) + assertContains(t, podTemplate.Labels, "testLabel", "abc123", "incorrect label") + assert.Len(t, podTemplate.Spec.Volumes, 3, "incorrect pod volumes") + assert.Len(t, podTemplate.Spec.Volumes, 3) + assert.Equal(t, "porter-shared", podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume") + assert.Equal(t, "porter-config", podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume") + assert.Equal(t, "porter-workdir", podTemplate.Spec.Volumes[2].Name, "expected the porter-workdir volume") + assert.Equal(t, "porteraccount", podTemplate.Spec.ServiceAccountName, "incorrect service account for the pod") + assert.Equal(t, pointer.Int64Ptr(65532), podTemplate.Spec.SecurityContext.RunAsUser, "incorrect RunAsUser") + assert.Equal(t, pointer.Int64Ptr(0), podTemplate.Spec.SecurityContext.RunAsGroup, "incorrect RunAsGroup") + assert.Equal(t, pointer.Int64Ptr(0), podTemplate.Spec.SecurityContext.FSGroup, "incorrect FSGroup") + + // Verify the agent container + agentContainer := podTemplate.Spec.Containers[0] + assert.Equal(t, "porter-agent", agentContainer.Name, "incorrect agent container name") + assert.Equal(t, "getporter/custom-agent:v1.0.0", agentContainer.Image, "incorrect agent image") + assert.Equal(t, corev1.PullPolicy("Always"), agentContainer.ImagePullPolicy, "incorrect agent pull policy") + assert.Equal(t, []string{"installation", "apply", "installation.yaml"}, agentContainer.Args, "incorrect agent command arguments") + assertEnvVar(t, agentContainer.Env, "PORTER_RUNTIME_DRIVER", "kubernetes") + assertEnvVar(t, agentContainer.Env, "KUBE_NAMESPACE", "test") + assertEnvVar(t, agentContainer.Env, "IN_CLUSTER", "true") + assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_NAME", pvc.Name) + assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_PATH", "/porter-shared") + assertEnvVar(t, agentContainer.Env, "CLEANUP_JOBS", "false") // this will be configurable in the future + assertEnvVar(t, agentContainer.Env, "SERVICE_ACCOUNT", "installeraccount") + assertEnvVar(t, agentContainer.Env, "LABELS", "porter.sh/jobType=bundle-installer porter.sh/managed=true porter.sh/resourceGeneration=1 porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/retry= testLabel=abc123") + assertEnvVar(t, agentContainer.Env, "AFFINITY_MATCH_LABELS", "porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/resourceGeneration=1 porter.sh/retry=") + assertEnvFrom(t, agentContainer.EnvFrom, "porter-env", pointer.BoolPtr(true)) + assert.Len(t, agentContainer.VolumeMounts, 3) + assertVolumeMount(t, agentContainer.VolumeMounts, "porter-config", "/porter-config") + assertVolumeMount(t, agentContainer.VolumeMounts, "porter-shared", "/porter-shared") + assertVolumeMount(t, agentContainer.VolumeMounts, "porter-workdir", "/porter-workdir") +} + +func assertSharedAgentLabels(t *testing.T, labels map[string]string) { + assertContains(t, labels, porterv1.LabelManaged, "true", "incorrect label") + assertContains(t, labels, porterv1.LabelResourceKind, "AgentAction", "incorrect label") + assertContains(t, labels, porterv1.LabelResourceName, "porter-hello", "incorrect label") + assertContains(t, labels, porterv1.LabelResourceGeneration, "1", "incorrect label") + assertContains(t, labels, porterv1.LabelRetry, "", "incorrect label") +} + +func assertContains(t *testing.T, labels map[string]string, key string, value string, msg string) { + assert.Contains(t, labels, key, "%s: expected the %s key to be set", msg, key) + assert.Equal(t, value, labels[key], "%s: incorrect value for key %s", msg, key) +} + +func assertEnvVar(t *testing.T, envVars []corev1.EnvVar, name string, value string) { + for _, envVar := range envVars { + if envVar.Name == name { + assert.Equal(t, value, envVar.Value, "incorrect value for EnvVar %s", name) + return + } + } + + assert.Failf(t, "expected the %s EnvVar to be set", name) +} + +func assertEnvFrom(t *testing.T, envFrom []corev1.EnvFromSource, name string, optional *bool) { + for _, source := range envFrom { + if source.SecretRef.Name == name { + assert.Equal(t, optional, source.SecretRef.Optional, "incorrect optional flag for EnvFrom %s", name) + return + } + } + + assert.Failf(t, "expected the %s EnvFrom to be set", name) +} + +func assertVolumeMount(t *testing.T, mounts []corev1.VolumeMount, name string, path string) { + for _, mount := range mounts { + if mount.Name == name { + assert.Equal(t, path, mount.MountPath, "incorrect mount path for VolumeMount %s", name) + return + } + } + + assert.Fail(t, fmt.Sprintf("expected the %s VolumeMount to be set", name)) +} + +func setupAgentActionController(objs ...client.Object) AgentActionReconciler { + scheme := runtime.NewScheme() + porterv1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + corev1.AddToScheme(scheme) + + fakeBuilder := fake.NewClientBuilder() + fakeBuilder.WithScheme(scheme) + fakeBuilder.WithObjects(objs...) + fakeClient := fakeBuilder.Build() + + return AgentActionReconciler{ + Log: logr.Discard(), + Client: fakeClient, + Scheme: scheme, + } +} diff --git a/controllers/installation_controller.go b/controllers/installation_controller.go index 09eeba53..d76e1d6a 100644 --- a/controllers/installation_controller.go +++ b/controllers/installation_controller.go @@ -2,43 +2,23 @@ package controllers import ( "context" - "fmt" "reflect" - "sort" - "strings" porterv1 "get.porter.sh/operator/api/v1" "github.com/go-logr/logr" "github.com/pkg/errors" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) const ( - labelJobType = porterv1.Prefix + "jobType" - jobTypeAgent = "porter-agent" - jobTypeInstaller = "bundle-installer" - labelManaged = porterv1.Prefix + "managed" - labelResourceKind = porterv1.Prefix + "resourceKind" - labelResourceName = porterv1.Prefix + "resourceName" - labelResourceVersion = porterv1.Prefix + "resourceVersion" - labelResourceGeneration = porterv1.Prefix + "resourceGeneration" - labelRetry = porterv1.Prefix + "retry" - operatorNamespace = "porter-operator-system" - finalizerName = porterv1.Prefix + "finalizer" + operatorNamespace = "porter-operator-system" ) // InstallationReconciler calls porter to execute changes made to an Installation CRD @@ -60,47 +40,11 @@ type InstallationReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *InstallationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&porterv1.Installation{}, builder.WithPredicates(installationChanged{})). - Owns(&batchv1.Job{}). + For(&porterv1.Installation{}, builder.WithPredicates(resourceChanged{})). + Owns(&porterv1.AgentAction{}). Complete(r) } -// For an object extracts the owner id as long as it's managed by this controller -func getOwner(rawObj client.Object) []string { - owner := metav1.GetControllerOf(rawObj) - if owner == nil { - return nil - } - - if owner.APIVersion != porterv1.GroupVersion.String() || owner.Kind != "Installation" { - return nil - } - - return []string{owner.Name} -} - -type installationChanged struct { - predicate.Funcs -} - -// Determine if the spec or the finalizer was changed -// Allow forcing porter to run with the retry annotation -func (installationChanged) Update(e event.UpdateEvent) bool { - if e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() { - return true - } - - if !reflect.DeepEqual(e.ObjectNew.GetFinalizers(), e.ObjectOld.GetFinalizers()) { - return true - } - - if e.ObjectNew.GetAnnotations()[porterv1.AnnotationRetry] != e.ObjectOld.GetAnnotations()[porterv1.AnnotationRetry] { - return true - } - - return false -} - // Reconcile is called when the spec of an installation is changed // or a job associated with an installation is updated. // Either schedule a job to handle a spec change, or update the installation status in response to the job's state. @@ -112,55 +56,67 @@ func (r *InstallationReconciler) Reconcile(ctx context.Context, req ctrl.Request err := r.Get(ctx, req.NamespacedName, inst) if err != nil { if apierrors.IsNotFound(err) { - log.V(Log4Debug).Info("Reconciliation complete: Installation CRD is deleted.") + log.V(Log5Trace).Info("Reconciliation skipped: Installation CRD or one of its owned resources was deleted.") return ctrl.Result{}, nil } - return ctrl.Result{Requeue: false}, err + return ctrl.Result{}, err } - log = log.WithValues("resourceVersion", inst.ResourceVersion, "generation", inst.Generation) + log = log.WithValues("resourceVersion", inst.ResourceVersion, "generation", inst.Generation, "observedGeneration", inst.Status.ObservedGeneration) log.V(Log5Trace).Info("Reconciling installation") - // Check if we have scheduled a job for this change yet - job, handled, err := r.isHandled(ctx, log, inst) + // Check if we have requested an agent run yet + action, handled, err := r.isHandled(ctx, log, inst) if err != nil { return ctrl.Result{}, err } - // Sync the installation status from the job - if err = r.syncStatus(ctx, log, inst, job); err != nil { + if action != nil { + log = log.WithValues("agentaction", action.Name) + } + + // Sync the installation status from the action + if err = r.syncStatus(ctx, log, inst, action); err != nil { return ctrl.Result{}, err } // Check if we have finished uninstalling - if isDeleted(inst) && apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionComplete)) { - err = r.removeFinalizer(ctx, log, inst) + if isDeleteProcessed(inst) { + err = removeFinalizer(ctx, log, r.Client, inst) log.V(Log4Debug).Info("Reconciliation complete: Finalizer has been removed from the Installation.") return ctrl.Result{}, err } // Check if we have already handled any spec changes if handled { + // Check if a retry was requested + if action.GetRetryLabelValue() != inst.GetRetryLabelValue() { + err = r.retry(ctx, log, inst, action) + log.V(Log4Debug).Info("Reconciliation complete: The associated porter agent action was retried.") + return ctrl.Result{}, err + } + // Nothing for us to do at this point log.V(Log4Debug).Info("Reconciliation complete: A porter agent has already been dispatched.") return ctrl.Result{}, nil } // Should we uninstall the bundle? - if shouldUninstall(inst) { + if r.shouldUninstall(inst) { err = r.uninstallInstallation(ctx, log, inst) log.V(Log4Debug).Info("Reconciliation complete: A porter agent has been dispatched to uninstall the installation.") return ctrl.Result{}, err } else if isDeleted(inst) { // This is installation without a finalizer that was deleted - // We remove the finalizer after we successfully uninstall (or someone is manually cleaning things up) + // We remove the finalizer af + //ter we successfully uninstall (or someone is manually cleaning things up) // Just let it go log.V(Log4Debug).Info("Reconciliation complete: Installation CRD is ready for deletion.") return ctrl.Result{}, nil } // Ensure non-deleted installations have finalizers - updated, err := r.ensureFinalizerSet(ctx, inst) + updated, err := ensureFinalizerSet(ctx, log, r.Client, inst) if err != nil { return ctrl.Result{}, err } @@ -181,29 +137,25 @@ func (r *InstallationReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Determines if this generation of the Installation has being processed by Porter. -func (r *InstallationReconciler) isHandled(ctx context.Context, log logr.Logger, inst *porterv1.Installation) (*batchv1.Job, bool, error) { - // Retrieve the Job running the porter action - // Only query by generation, not revision, since rev can be bumped when the status is updated, or a label changed - jobLabels := getAgentJobLabels(inst) - delete(jobLabels, labelResourceVersion) // resource version will vary betwen reconcile runs, don't use it to match jobs. We may want to stop using that label entirely - - results := batchv1.JobList{} - err := r.List(ctx, &results, client.InNamespace(inst.Namespace), client.MatchingLabels(jobLabels)) +func (r *InstallationReconciler) isHandled(ctx context.Context, log logr.Logger, inst *porterv1.Installation) (*porterv1.AgentAction, bool, error) { + labels := getActionLabels(inst) + results := porterv1.AgentActionList{} + err := r.List(ctx, &results, client.InNamespace(inst.Namespace), client.MatchingLabels(labels)) if err != nil { - return nil, false, errors.Wrapf(err, "could not query for active porter jobs") + return nil, false, errors.Wrapf(err, "could not query for the current agent action") } if len(results.Items) == 0 { - log.V(Log4Debug).Info("No existing job was found") + log.V(Log4Debug).Info("No existing agent action was found") return nil, false, nil } - job := results.Items[0] - log.V(Log4Debug).Info("Found existing job", "job", job.Name) - return &job, true, nil + action := results.Items[0] + log.V(Log4Debug).Info("Found existing agent action", "agentaction", action.Name, "namespace", action.Namespace) + return &action, true, nil } -// Create a job that runs `porter installation apply` +// Run the porter agent with the command `porter installation apply` func (r *InstallationReconciler) applyInstallation(ctx context.Context, log logr.Logger, inst *porterv1.Installation) error { log.V(Log5Trace).Info("Initializing installation status") inst.Status.Initialize() @@ -211,10 +163,10 @@ func (r *InstallationReconciler) applyInstallation(ctx context.Context, log logr return err } - return r.runPorter(ctx, log, inst, "installation", "apply", "/porter-config/installation.yaml") + return r.runPorter(ctx, log, inst) } -// Create a job that runs `porter uninstall` +// Flag the bundle as uninstalled, and then run the porter agent with the command `porter installation apply` func (r *InstallationReconciler) uninstallInstallation(ctx context.Context, log logr.Logger, inst *porterv1.Installation) error { log.V(Log5Trace).Info("Initializing installation status") inst.Status.Initialize() @@ -226,173 +178,40 @@ func (r *InstallationReconciler) uninstallInstallation(ctx context.Context, log log.V(Log5Trace).Info("Setting uninstalled=true to uninstall the bundle") inst.Spec.Uninstalled = true - return r.runPorter(ctx, log, inst, "installation", "apply", "/porter-config/installation.yaml") + return r.runPorter(ctx, log, inst) } -// Create a job that runs the specified porter command in a job -func (r *InstallationReconciler) runPorter(ctx context.Context, log logr.Logger, inst *porterv1.Installation, porterCommand ...string) error { - log.V(Log5Trace).Info("Porter agent requested", "command", strings.Join(porterCommand, " ")) - - agentCfg, err := r.resolveAgentConfig(ctx, log, inst) - if err != nil { - return err - } - - porterCfg, err := r.resolvePorterConfig(ctx, log, inst) - if err != nil { - return err - } - - pvc, err := r.createAgentVolume(ctx, log, inst, agentCfg) - if err != nil { - return err - } - - secret, err := r.createAgentSecret(ctx, log, inst, porterCfg) - if err != nil { - return err - } - - job, err := r.createAgentJob(ctx, log, porterCommand, inst, agentCfg, pvc, secret) +// Trigger an agent +func (r *InstallationReconciler) runPorter(ctx context.Context, log logr.Logger, inst *porterv1.Installation) error { + action, err := r.createAgentAction(ctx, log, inst) if err != nil { return err } - return r.syncStatus(ctx, log, inst, job) + // Update the Installation Status with the agent action + return r.syncStatus(ctx, log, inst, action) } -func getSharedAgentLabels(inst *porterv1.Installation) map[string]string { - labels := map[string]string{ - labelManaged: "true", - labelResourceKind: "Installation", - labelResourceName: inst.Name, - labelResourceVersion: inst.ResourceVersion, - labelResourceGeneration: fmt.Sprintf("%d", inst.Generation), - labelRetry: inst.GetRetryLabelValue(), - } - for k, v := range inst.ObjectMeta.Labels { - labels[k] = v - } - - return labels -} - -// get the labels that should be applied to the porter agent job -func getAgentJobLabels(inst *porterv1.Installation) map[string]string { - labels := getSharedAgentLabels(inst) - labels[labelJobType] = jobTypeAgent - return labels -} - -// get the labels that should be applied to the installer (invocation image) -func getInstallerJobLabels(inst *porterv1.Installation) map[string]string { - labels := getSharedAgentLabels(inst) - labels[labelJobType] = jobTypeInstaller - return labels -} - -func (r *InstallationReconciler) createAgentVolume(ctx context.Context, log logr.Logger, inst *porterv1.Installation, agentCfg porterv1.AgentConfigSpec) (corev1.PersistentVolumeClaim, error) { - sharedLabels := getSharedAgentLabels(inst) - - var results corev1.PersistentVolumeClaimList - if err := r.List(ctx, &results, client.MatchingLabels(sharedLabels)); err != nil { - return corev1.PersistentVolumeClaim{}, errors.Wrap(err, "error checking for an existing agent volume (pvc)") - } - if len(results.Items) > 0 { - return results.Items[0], nil - } - - // Create a volume to share data between porter and the invocation image - pvc := corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: getNamePrefix(inst), - Namespace: inst.Namespace, - Labels: sharedLabels, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: agentCfg.GetVolumeSize(), - }, - }, - }, - } - - if err := r.Create(ctx, &pvc); err != nil { - return corev1.PersistentVolumeClaim{}, errors.Wrap(err, "error creating the agent volume (pvc)") - } - - log.V(Log4Debug).Info("Created PersistentVolumeClaim for the Porter agent", "name", pvc.Name) - return pvc, nil -} - -func (r *InstallationReconciler) createAgentSecret(ctx context.Context, log logr.Logger, inst *porterv1.Installation, porterCfg porterv1.PorterConfigSpec) (corev1.Secret, error) { - sharedLabels := getSharedAgentLabels(inst) - - var results corev1.SecretList - if err := r.List(ctx, &results, client.MatchingLabels(sharedLabels)); err != nil { - return corev1.Secret{}, errors.Wrap(err, "error checking for a existing agent secret") - } - if len(results.Items) > 0 { - return results.Items[0], nil - } - - // Create a secret with all the files that should be copied into the agent - // * porter config file (~/.porter/config.json) - // * installation.yaml that we will pass to the command - porterCfgB, err := porterCfg.ToPorterDocument() - if err != nil { - return corev1.Secret{}, errors.Wrap(err, "error marshaling the porter config.json file") - } +// create an AgentAction that will trigger running porter +func (r *InstallationReconciler) createAgentAction(ctx context.Context, log logr.Logger, inst *porterv1.Installation) (*porterv1.AgentAction, error) { + log.V(Log5Trace).Info("Creating porter agent action") installationResourceB, err := inst.Spec.ToPorterDocument() if err != nil { - return corev1.Secret{}, err - } - log.V(Log4Debug).Info("installation document", "installation.yaml", string(installationResourceB)) - - secret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: getNamePrefix(inst), - Namespace: inst.Namespace, - Labels: sharedLabels, - }, - Type: corev1.SecretTypeOpaque, - Immutable: pointer.BoolPtr(true), - Data: map[string][]byte{ - "config.yaml": porterCfgB, - "installation.yaml": installationResourceB, - }, + return nil, err } - if err = r.Create(ctx, &secret); err != nil { - return corev1.Secret{}, errors.Wrap(err, "error creating the agent secret") - } - - log.V(Log4Debug).Info("Created Secret for the Porter agent", "name", secret.Name) - return secret, nil -} - -func (r *InstallationReconciler) createAgentJob(ctx context.Context, log logr.Logger, porterCommand []string, inst *porterv1.Installation, agentCfg porterv1.AgentConfigSpec, pvc corev1.PersistentVolumeClaim, secret corev1.Secret) (*batchv1.Job, error) { - sharedLabels := getSharedAgentLabels(inst) - - // not checking for a job because that happens earlier during reconcile - - // represent the shared labels that we are applying to all the things in a way that porter can accept on the command line - // These labels are added to the invocation image and should be sorted consistently - installerLabels := getInstallerJobLabels(inst) - sortedInstallerLabels := make([]string, 0, len(installerLabels)) - for k, v := range installerLabels { - sortedInstallerLabels = append(sortedInstallerLabels, fmt.Sprintf("%s=%s", k, v)) + labels := getActionLabels(inst) + for k, v := range inst.Labels { + labels[k] = v } - sort.Strings(sortedInstallerLabels) - porterJob := &batchv1.Job{ + action := &porterv1.AgentAction{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: getNamePrefix(inst), Namespace: inst.Namespace, - Labels: getAgentJobLabels(inst), + GenerateName: inst.Name + "-", + Labels: labels, + Annotations: inst.Annotations, OwnerReferences: []metav1.OwnerReference{ { // I'm not using controllerutil.SetControllerReference because I can't track down why that throws a panic when running our tests APIVersion: inst.APIVersion, @@ -404,282 +223,29 @@ func (r *InstallationReconciler) createAgentJob(ctx context.Context, log logr.Lo }, }, }, - Spec: batchv1.JobSpec{ - Completions: pointer.Int32Ptr(1), - BackoffLimit: pointer.Int32Ptr(0), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: getNamePrefix(inst), - Namespace: inst.Namespace, - Labels: sharedLabels, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "porter-shared", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, - }, - }, - }, - { - Name: "porter-config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: secret.Name, - Optional: pointer.BoolPtr(false), - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "porter-agent", - Image: agentCfg.GetPorterImage(), - ImagePullPolicy: agentCfg.GetPullPolicy(), - Args: porterCommand, - Env: []corev1.EnvVar{ - // Configuration for Porter - { - Name: "PORTER_RUNTIME_DRIVER", - Value: "kubernetes", - }, - // Configuration for the Kubernetes Driver - { - Name: "KUBE_NAMESPACE", - Value: inst.Namespace, - }, - { - Name: "IN_CLUSTER", - Value: "true", - }, - { - Name: "LABELS", - Value: strings.Join(sortedInstallerLabels, " "), - }, - { - Name: "JOB_VOLUME_NAME", - Value: pvc.Name, - }, - { - Name: "JOB_VOLUME_PATH", - Value: "/porter-shared", - }, - { - Name: "CLEANUP_JOBS", - Value: "false", - }, - { - Name: "SERVICE_ACCOUNT", - Value: agentCfg.InstallationServiceAccount, - }, - { - Name: "AFFINITY_MATCH_LABELS", - Value: fmt.Sprintf("%s=Installation %s=%s %s=%d %s=%s", - labelResourceKind, labelResourceName, inst.Name, labelResourceGeneration, inst.Generation, labelRetry, inst.GetRetryLabelValue()), - }, - }, - EnvFrom: []corev1.EnvFromSource{ - // Environment variables for the plugins - { - SecretRef: &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "porter-env", - }, - Optional: pointer.BoolPtr(true), - }, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "porter-shared", - MountPath: "/porter-shared", - }, - { - Name: "porter-config", - MountPath: "/porter-config", - }, - }, - }, - }, - RestartPolicy: "Never", // TODO: Make the retry policy configurable on the Installation - ServiceAccountName: agentCfg.ServiceAccount, - ImagePullSecrets: nil, // TODO: Make pulling from a private registry possible - // Mount the volumes used by this pod as the nonroot user - // Porter's agent doesn't run as root and won't have access to files on the volume - // otherwise. - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: pointer.Int64Ptr(65532), - }, - }, + Spec: porterv1.AgentActionSpec{ + AgentConfig: inst.Spec.AgentConfig, + PorterConfig: inst.Spec.PorterConfig, + Args: []string{"installation", "apply", "installation.yaml"}, + Files: map[string][]byte{ + "installation.yaml": installationResourceB, }, }, } - if err := r.Create(ctx, porterJob); err != nil { - return nil, errors.Wrap(err, "error creating Porter agent job") - } - - log.V(Log4Debug).Info("Created Job for the Porter agent", "name", porterJob.Name) - return porterJob, nil -} - -func setCondition(log logr.Logger, inst *porterv1.Installation, condType porterv1.InstallationConditionType, reason string) bool { - if apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(condType)) { - return false - } - - log.V(Log4Debug).Info("Setting condition", "condition", condType, "reason", reason) - apimeta.SetStatusCondition(&inst.Status.Conditions, metav1.Condition{ - Type: string(condType), - Reason: reason, - Status: metav1.ConditionTrue, - ObservedGeneration: inst.Status.ObservedGeneration, - }) - return true -} - -func (r *InstallationReconciler) resolveAgentConfig(ctx context.Context, log logr.Logger, inst *porterv1.Installation) (porterv1.AgentConfigSpec, error) { - logConfig := func(level string, config *porterv1.AgentConfig) { - if config == nil || config.Name == "" { - return - } - - log.V(Log4Debug).Info("Found porter agent configuration", - "level", level, - "namespace", config.Namespace, - "name", config.Name) - } - - // Read agent configuration defined at the system level - systemCfg := &porterv1.AgentConfig{} - err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: operatorNamespace}, systemCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") - } - logConfig("system", systemCfg) - - // Read agent configuration defined at the namespace level - nsCfg := &porterv1.AgentConfig{} - err = r.Get(ctx, types.NamespacedName{Name: "default", Namespace: inst.Namespace}, nsCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve namespace level porter agent configuration") - } - logConfig("namespace", nsCfg) - - // Read agent configuration defines on the installation - instCfg := &porterv1.AgentConfig{} - err = r.Get(ctx, types.NamespacedName{Name: inst.Spec.AgentConfig.Name, Namespace: inst.Namespace}, instCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrapf(err, "cannot retrieve agent configuration %s specified by the installation", inst.Spec.AgentConfig.Name) - } - logConfig("instance", instCfg) - - // Apply overrides - base := &systemCfg.Spec - cfg, err := base.MergeConfig(nsCfg.Spec, instCfg.Spec) - if err != nil { - return porterv1.AgentConfigSpec{}, err + if err := r.Create(ctx, action); err != nil { + return nil, errors.Wrap(err, "error creating the porter agent action") } - log.V(Log4Debug).Info("resolved porter agent configuration", - "porterImage", cfg.GetPorterImage(), - "pullPolicy", cfg.GetPullPolicy(), - "serviceAccount", cfg.ServiceAccount, - "volumeSize", cfg.GetVolumeSize(), - "installationServiceAccount", cfg.InstallationServiceAccount, - ) - return cfg, nil + log.V(Log4Debug).Info("Created porter agent action", "name", action.Name) + return action, nil } -func (r *InstallationReconciler) resolvePorterConfig(ctx context.Context, log logr.Logger, inst *porterv1.Installation) (porterv1.PorterConfigSpec, error) { - log.V(Log5Trace).Info(fmt.Sprintf("Resolving porter configuration file for %s", inst.Name)) - logConfig := func(level string, config *porterv1.PorterConfig) { - if config == nil || config.Name == "" { - return - } - log.V(Log4Debug).Info("Found porter config", - "level", level, - "namespace", config.Namespace, - "name", config.Name) - } - - // Provide a safe default config in case nothing is defined anywhere - defaultCfg := porterv1.PorterConfigSpec{ - DefaultStorage: pointer.StringPtr("in-cluster-mongodb"), - DefaultSecretsPlugin: pointer.StringPtr("kubernetes.secrets"), - Storage: []porterv1.StorageConfig{ - {PluginConfig: porterv1.PluginConfig{ - Name: "in-cluster-mongodb", - PluginSubKey: "mongodb", - Config: runtime.RawExtension{Raw: []byte(`{"url":"mongodb://mongodb.porter-operator-system.svc.cluster.local"}`)}, - }}, - }, - } - - // Read agent configuration defined at the system level - systemCfg := &porterv1.PorterConfig{} - err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: operatorNamespace}, systemCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.PorterConfigSpec{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") - } - logConfig("system", systemCfg) - - // Read agent configuration defined at the namespace level - nsCfg := &porterv1.PorterConfig{} - err = r.Get(ctx, types.NamespacedName{Name: "default", Namespace: inst.Namespace}, nsCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.PorterConfigSpec{}, errors.Wrap(err, "cannot retrieve namespace level porter agent configuration") - } - logConfig("namespace", nsCfg) - - // Read agent configuration defines on the installation - instCfg := &porterv1.PorterConfig{} - err = r.Get(ctx, types.NamespacedName{Name: inst.Spec.PorterConfig.Name, Namespace: inst.Namespace}, instCfg) - if err != nil && !apierrors.IsNotFound(err) { - return porterv1.PorterConfigSpec{}, errors.Wrapf(err, "cannot retrieve agent configuration %s specified by the installation", inst.Spec.AgentConfig.Name) - } - logConfig("instance", instCfg) - - // Resolve final configuration - // We don't log the final config because we haven't yet added the feature to enable not having sensitive data in porter's config files - base := &defaultCfg - cfg, err := base.MergeConfig(systemCfg.Spec, nsCfg.Spec, instCfg.Spec) - if err != nil { - return porterv1.PorterConfigSpec{}, err - } - - return cfg, nil -} - -// make sure that all CRDs, even ones made with old versions of the operator, -// have a finalizer set so that we can uninstall when the CRD is deleted. -func (r *InstallationReconciler) ensureFinalizerSet(ctx context.Context, inst *porterv1.Installation) (updated bool, err error) { - // Ensure all Installations have a finalizer to we can uninstall when they are deleted - if inst.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. This is equivalent - // registering our finalizer. - if !isFinalizerSet(inst) { - controllerutil.AddFinalizer(inst, finalizerName) - return true, r.Update(ctx, inst) - } - } - return false, nil -} - -func (r *InstallationReconciler) removeFinalizer(ctx context.Context, log logr.Logger, inst *porterv1.Installation) error { - log.V(Log5Trace).Info("removing finalizer") - controllerutil.RemoveFinalizer(inst, finalizerName) - return r.Update(ctx, inst) -} - -// Check the status of the porter-agent job and use that to update the installation status -func (r *InstallationReconciler) syncStatus(ctx context.Context, log logr.Logger, inst *porterv1.Installation, job *batchv1.Job) error { +// Check the status of the porter-agent job and use that to update the AgentAction status +func (r *InstallationReconciler) syncStatus(ctx context.Context, log logr.Logger, inst *porterv1.Installation, action *porterv1.AgentAction) error { origStatus := inst.Status - applyJobToStatus(log, inst, job) + applyAgentAction(log, inst, action) if !reflect.DeepEqual(origStatus, inst.Status) { return r.saveStatus(ctx, log, inst) @@ -688,82 +254,35 @@ func (r *InstallationReconciler) syncStatus(ctx context.Context, log logr.Logger return nil } -// Takes a job and uses it to calculate the new status for an installation -// Returns whether or not any changes were made -func applyJobToStatus(log logr.Logger, inst *porterv1.Installation, job *batchv1.Job) { - // Recalculate all conditions based on what we currently observe - inst.Status.ObservedGeneration = inst.Generation - inst.Status.Conditions = make([]metav1.Condition, 0, 4) - - if job == nil { - inst.Status.Phase = porterv1.PhaseUnknown - inst.Status.ActiveJob = nil - } - if job != nil { - inst.Status.ActiveJob = &corev1.LocalObjectReference{Name: job.Name} - setCondition(log, inst, porterv1.ConditionScheduled, "JobCreated") - inst.Status.Phase = porterv1.PhasePending - - if job.Status.Active+job.Status.Failed+job.Status.Succeeded > 0 { - inst.Status.Phase = porterv1.PhaseRunning - setCondition(log, inst, porterv1.ConditionStarted, "JobStarted") - } - - for _, condition := range job.Status.Conditions { - switch condition.Type { - case batchv1.JobComplete: - inst.Status.Phase = porterv1.PhaseSucceeded - inst.Status.ActiveJob = nil - setCondition(log, inst, porterv1.ConditionComplete, "JobCompleted") - break - case batchv1.JobFailed: - inst.Status.Phase = porterv1.PhaseFailed - inst.Status.ActiveJob = nil - setCondition(log, inst, porterv1.ConditionFailed, "JobFailed") - break - } - } - } -} - // Only update the status with a PATCH, don't clobber the entire installation func (r *InstallationReconciler) saveStatus(ctx context.Context, log logr.Logger, inst *porterv1.Installation) error { - key := client.ObjectKeyFromObject(inst) - latest := &porterv1.Installation{} - if err := r.Client.Get(ctx, key, latest); err != nil { - return errors.Wrap(err, "could not get the latest installation definition") - } - log.V(Log5Trace).Info("Patching installation status") - err := r.Client.Status().Patch(ctx, inst, client.MergeFrom(latest)) - return errors.Wrap(err, "failed to update the installation status") -} - -func isFinalizerSet(inst *porterv1.Installation) bool { - for _, finalizer := range inst.Finalizers { - if finalizer == finalizerName { - return true - } - } - return false + return PatchObjectWithRetry(ctx, log, r.Client, r.Client.Status().Patch, inst, func() client.Object { + return &porterv1.Installation{} + }) } -func shouldUninstall(inst *porterv1.Installation) bool { +func (r *InstallationReconciler) shouldUninstall(inst *porterv1.Installation) bool { // ignore a deleted CRD with no finalizers return isDeleted(inst) && isFinalizerSet(inst) } -func isDeleted(inst *porterv1.Installation) bool { - return inst.ObjectMeta.DeletionTimestamp.IsZero() == false -} +// Sync the retry annotation from the installation to the agent action to trigger another run. +func (r *InstallationReconciler) retry(ctx context.Context, log logr.Logger, inst *porterv1.Installation, action *porterv1.AgentAction) error { + log.V(Log5Trace).Info("Initializing installation status") + inst.Status.Initialize() + inst.Status.Action = &corev1.LocalObjectReference{Name: action.Name} + if err := r.saveStatus(ctx, log, inst); err != nil { + return err + } -func getNamePrefix(inst *porterv1.Installation) string { - // Limit how much of the name we use so that we have space for the - // additional characters appended "-generation-resourceversion-random" - maxNameLength := 45 - name := inst.Name - if len(name) > maxNameLength { - name = name[:maxNameLength] + log.V(Log5Trace).Info("Retrying associated porter agent action") + retry := inst.GetRetryLabelValue() + action.SetRetryAnnotation(retry) + if err := r.Update(ctx, action); err != nil { + return errors.Wrap(err, "error updating the associated porter agent action") } - return fmt.Sprintf("%s-%d-%s", name, inst.Generation, inst.ResourceVersion) + + log.V(Log4Debug).Info("Retried associated porter agent action", "name", "retry", action.Name, retry) + return nil } diff --git a/controllers/installation_controller_test.go b/controllers/installation_controller_test.go index 351e8fe2..13581099 100644 --- a/controllers/installation_controller_test.go +++ b/controllers/installation_controller_test.go @@ -9,10 +9,8 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -21,394 +19,96 @@ import ( "k8s.io/utils/pointer" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - fake "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func Test_getNamePrefix(t *testing.T) { - testcases := []struct { - name string - inst porterv1.Installation - want string - }{ - {name: "short name", want: "short-123-9912", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{ - Name: "short", Generation: 123, ResourceVersion: "9912"}}}, - {name: "long name", want: "1oF8JkZxyfEojJonxujl9rFvnSgghT1XaP57j3nNirWAA-123-9912", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{ - Name: "1oF8JkZxyfEojJonxujl9rFvnSgghT1XaP57j3nNirWAA5YLG8", Generation: 123, ResourceVersion: "9912"}}}, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - got := getNamePrefix(&tc.inst) - assert.Equal(t, tc.want, got) - }) - } -} - -func Test_getJobOwner(t *testing.T) { - controllerUUID := "9908ddc5-70cb-4425-b0e4-1faed03bae14" - tests := []struct { - name string - obj client.Object - want []string - }{ - {name: "not a job", obj: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{{ - Name: controllerUUID, - APIVersion: porterv1.GroupVersion.String(), - Kind: "Secret", - Controller: pointer.BoolPtr(true)}}}}}, - {name: "our job", obj: &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{{ - Name: controllerUUID, - APIVersion: porterv1.GroupVersion.String(), - Kind: "Installation", - Controller: pointer.BoolPtr(true)}}}}, - want: []string{controllerUUID}}, - {name: "not our job", obj: &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{{ - Name: controllerUUID, - APIVersion: "someone else", - Kind: "Installation", - Controller: pointer.BoolPtr(true)}}}}}, - {name: "not our kind", obj: &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{{ - Name: controllerUUID, - APIVersion: porterv1.GroupVersion.String(), - Kind: "something else", - Controller: pointer.BoolPtr(true)}}}}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := getOwner(tt.obj) - assert.Equal(t, tt.want, got, "incorrect job owner") - }) - } -} - -func Test_getRetryLabelValue(t *testing.T) { - inst := porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - porterv1.AnnotationRetry: "123", - }, - }, - } - - assert.Equal(t, "202cb962ac59075b964b07152d234b70", inst.GetRetryLabelValue(), "retry label value should be populated when the annotation is set") - - delete(inst.Annotations, porterv1.AnnotationRetry) - - assert.Empty(t, inst.GetRetryLabelValue(), "retry label value should be empty when no annotation is set") - -} - -func Test_applyJobToStatus(t *testing.T) { - tests := []struct { - name string - job *batchv1.Job - inst porterv1.Installation - wantStatus porterv1.InstallationStatus - }{ - {name: "no job", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 1, - Phase: porterv1.PhaseUnknown, - }}, - {name: "job created", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, - job: &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "myjob"}}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 1, - ActiveJob: &corev1.LocalObjectReference{Name: "myjob"}, - Phase: porterv1.PhasePending, - Conditions: []metav1.Condition{ - {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, - }, - }}, - {name: "job started", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, - job: &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "myjob"}, - Status: batchv1.JobStatus{Active: 1}}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 1, - ActiveJob: &corev1.LocalObjectReference{Name: "myjob"}, - Phase: porterv1.PhaseRunning, - Conditions: []metav1.Condition{ - {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, - }, - }}, - {name: "job succeeded", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, - job: &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "myjob"}, - Status: batchv1.JobStatus{Succeeded: 1, Conditions: []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}}}}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 1, - ActiveJob: nil, - Phase: porterv1.PhaseSucceeded, - Conditions: []metav1.Condition{ - {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}, - }, - }}, - {name: "job failed", - inst: porterv1.Installation{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, - job: &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{Name: "myjob"}, - Status: batchv1.JobStatus{Failed: 1, Conditions: []batchv1.JobCondition{{Type: batchv1.JobFailed, Status: corev1.ConditionTrue}}}}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 1, - ActiveJob: nil, - Phase: porterv1.PhaseFailed, - Conditions: []metav1.Condition{ - {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}, - }, - }}, - {name: "update resets status", - inst: porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{Generation: 2}, - Status: porterv1.InstallationStatus{ - ObservedGeneration: 1, - ActiveJob: nil, - Phase: porterv1.PhaseFailed, - Conditions: []metav1.Condition{ - {Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}, - {Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}, - }, - }}, - wantStatus: porterv1.InstallationStatus{ - ObservedGeneration: 2, - ActiveJob: nil, - Phase: porterv1.PhaseUnknown, - }}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - inst := &tt.inst - applyJobToStatus(logr.Discard(), inst, tt.job) - - assert.Equal(t, tt.wantStatus.Phase, inst.Status.Phase, "incorrect Phase") - assert.Equal(t, tt.wantStatus.ObservedGeneration, inst.Status.ObservedGeneration, "incorrect ObservedGeneration") - assert.Equal(t, tt.wantStatus.ActiveJob, inst.Status.ActiveJob, "incorrect ActiveJob") - - assert.Len(t, inst.Status.Conditions, len(tt.wantStatus.Conditions), "incorrect number of Conditions") - for _, cond := range tt.wantStatus.Conditions { - assert.True(t, apimeta.IsStatusConditionPresentAndEqual(inst.Status.Conditions, cond.Type, cond.Status), "expected condition %s to be %s", cond.Type, cond.Status) - } - }) - } -} - -func Test_installationChanged_Update(t *testing.T) { - predicate := installationChanged{} - - t.Run("spec changed", func(t *testing.T) { - e := event.UpdateEvent{ - ObjectOld: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - }, - }, - ObjectNew: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - }, - }, - } - assert.True(t, predicate.Update(e), "expected changing the generation to trigger reconciliation") - }) - - t.Run("finalizer added", func(t *testing.T) { - e := event.UpdateEvent{ - ObjectOld: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - }, - }, - ObjectNew: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - Finalizers: []string{finalizerName}, - }, - }, - } - assert.True(t, predicate.Update(e), "expected setting a finalizer to trigger reconciliation") - }) - - t.Run("retry annotation changed", func(t *testing.T) { - e := event.UpdateEvent{ - ObjectOld: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - }, - }, - ObjectNew: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - Annotations: map[string]string{ - porterv1.AnnotationRetry: "1", - }, - }, - }, - } - assert.True(t, predicate.Update(e), "expected setting changing the retry annotation to trigger reconciliation") - }) - - t.Run("status changed", func(t *testing.T) { - e := event.UpdateEvent{ - ObjectOld: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - }, - Status: porterv1.InstallationStatus{ - ObservedGeneration: 1, - }, - }, - ObjectNew: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - }, - Status: porterv1.InstallationStatus{ - ObservedGeneration: 2, - }, - }, - } - assert.False(t, predicate.Update(e), "expected status changes to be ignored") - }) - - t.Run("label added", func(t *testing.T) { - e := event.UpdateEvent{ - ObjectOld: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - ResourceVersion: "1", - }, - }, - ObjectNew: &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - ResourceVersion: "2", - Labels: map[string]string{ - "myLabel": "super useful", - }, - }, - }, - } - assert.False(t, predicate.Update(e), "expected metadata changes to be ignored") - }) -} - -func setupTestController(objs ...client.Object) InstallationReconciler { - scheme := runtime.NewScheme() - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(porterv1.AddToScheme(scheme)) - - fakeBuilder := fake.NewClientBuilder() - fakeBuilder.WithScheme(scheme) - fakeBuilder.WithObjects(objs...) - fakeClient := fakeBuilder.Build() - - return InstallationReconciler{ - Log: logr.Discard(), - Client: fakeClient, - Scheme: scheme, - } -} - -func Test_Reconcile(t *testing.T) { +func TestInstallationReconciler_Reconcile(t *testing.T) { // long test is long - // Run through a full installation lifecycle: create, update, delete + // Run through a full resource lifecycle: create, update, delete ctx := context.Background() + namespace := "test" + name := "mybuns" testdata := []client.Object{ &porterv1.Installation{ - ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "mybuns", Generation: 1}}, + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Generation: 1}}, } - controller := setupTestController(testdata...) + controller := setupInstallationController(testdata...) var inst porterv1.Installation triggerReconcile := func() { - instRef := types.NamespacedName{Namespace: "test", Name: "mybuns"} - instKey := client.ObjectKey{Namespace: "test", Name: "mybuns"} + fullname := types.NamespacedName{Namespace: namespace, Name: name} + key := client.ObjectKey{Namespace: namespace, Name: name} request := controllerruntime.Request{ - NamespacedName: instRef, + NamespacedName: fullname, } result, err := controller.Reconcile(ctx, request) require.NoError(t, err) require.True(t, result.IsZero()) var updatedInst porterv1.Installation - if err := controller.Get(ctx, instKey, &updatedInst); err == nil { + if err := controller.Get(ctx, key, &updatedInst); err == nil { inst = updatedInst } } triggerReconcile() - // Verify the installation was picked up and now has finalizers - assert.Contains(t, inst.Finalizers, finalizerName, "Finalizer should be set on new resources") - assert.Equal(t, inst.Status.Phase, porterv1.PhaseUnknown, "New resources should be initialized to Phase: Unknown") + // Verify the installation was picked up and the status initialized + assert.Equal(t, porterv1.PhaseUnknown, inst.Status.Phase, "New resources should be initialized to Phase: Unknown") triggerReconcile() - // Verify a job has been scheduled - var jobs batchv1.JobList - require.NoError(t, controller.List(ctx, &jobs)) - require.Len(t, jobs.Items, 1) - job := jobs.Items[0] + // Verify an AgentAction was created and set on the status + require.NotNil(t, inst.Status.Action, "expected Action to be set") + var action porterv1.AgentAction + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: inst.Namespace, Name: inst.Status.Action.Name}, &action)) + assert.Equal(t, "1", action.Labels[porterv1.LabelResourceGeneration], "The wrong action is set on the status") + + // Mark the action as scheduled + action.Status.Phase = porterv1.PhasePending + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) + + triggerReconcile() - require.NotNil(t, inst.Status.ActiveJob, "expected ActiveJob to be set") - assert.Equal(t, job.Name, inst.Status.ActiveJob.Name, "expected ActiveJob to contain the job name") + // Verify the installation status was synced with the action assert.Equal(t, porterv1.PhasePending, inst.Status.Phase, "incorrect Phase") assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionScheduled))) - // Start the job - job.Status.Active = 1 - require.NoError(t, controller.Status().Update(ctx, &job)) + // Mark the action as started + action.Status.Phase = porterv1.PhaseRunning + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) triggerReconcile() - // Verify that the installation status has the job - require.NotNil(t, inst.Status.ActiveJob, "expected ActiveJob to be set") - assert.Equal(t, job.Name, inst.Status.ActiveJob.Name, "expected ActiveJob to contain the job name") + // Verify that the installation status was synced with the action assert.Equal(t, porterv1.PhaseRunning, inst.Status.Phase, "incorrect Phase") assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionStarted))) - // Complete the job - job.Status.Active = 0 - job.Status.Succeeded = 1 - job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}} - require.NoError(t, controller.Status().Update(ctx, &job)) + // Complete the action + action.Status.Phase = porterv1.PhaseSucceeded + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) triggerReconcile() - // Verify that the installation status shows the job is done - require.Nil(t, inst.Status.ActiveJob, "expected ActiveJob to be nil") + // Verify that the installation status was synced with the action + require.NotNil(t, inst.Status.Action, "expected Action to still be set") assert.Equal(t, porterv1.PhaseSucceeded, inst.Status.Phase, "incorrect Phase") assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionComplete))) - // Fail the job - job.Status.Active = 0 - job.Status.Succeeded = 0 - job.Status.Failed = 1 - job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobFailed, Status: corev1.ConditionTrue}} - require.NoError(t, controller.Status().Update(ctx, &job)) + // Fail the action + action.Status.Phase = porterv1.PhaseFailed + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) triggerReconcile() - // Verify that the installation status shows the job is failed - require.Nil(t, inst.Status.ActiveJob, "expected ActiveJob to be nil") + // Verify that the installation status shows the action is failed + require.NotNil(t, inst.Status.Action, "expected Action to still be set") assert.Equal(t, porterv1.PhaseFailed, inst.Status.Phase, "incorrect Phase") assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionFailed))) @@ -420,8 +120,27 @@ func Test_Reconcile(t *testing.T) { // Verify that the installation status was re-initialized assert.Equal(t, int64(2), inst.Status.ObservedGeneration) - assert.Equal(t, porterv1.PhasePending, inst.Status.Phase, "New resources should be initialized to Phase: Unknown and then immediately transition to Pending") - assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionScheduled))) + assert.Equal(t, porterv1.PhaseUnknown, inst.Status.Phase, "New resources should be initialized to Phase: Unknown") + assert.Empty(t, inst.Status.Conditions, "Conditions should have been reset") + + // Retry the last action + lastAction := inst.Status.Action.Name + inst.Annotations = map[string]string{porterv1.AnnotationRetry: "retry-1"} + require.NoError(t, controller.Update(ctx, &inst)) + + triggerReconcile() + + // Verify that action has retry set on it now + require.NotNil(t, inst.Status.Action, "Expected the action to still be set") + assert.Equal(t, lastAction, inst.Status.Action.Name, "Expected the action to be the same") + // get the latest version of the action + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: inst.Namespace, Name: inst.Status.Action.Name}, &action)) + assert.NotEmpty(t, action.Annotations[porterv1.AnnotationRetry], "Expected the action to have its retry annotation set") + + assert.Equal(t, int64(2), inst.Status.ObservedGeneration) + assert.NotEmpty(t, inst.Status.Action, "Expected the action to still be set") + assert.Equal(t, porterv1.PhaseUnknown, inst.Status.Phase, "New resources should be initialized to Phase: Unknown") + assert.Empty(t, inst.Status.Conditions, "Conditions should have been reset") // Delete the installation (setting the delete timestamp directly instead of client.Delete because otherwise the fake client just removes it immediately) // The fake client doesn't really follow finalizer logic @@ -432,20 +151,15 @@ func Test_Reconcile(t *testing.T) { triggerReconcile() - // Verify that a job was spawned to uninstall it - require.NoError(t, controller.List(ctx, &jobs)) - require.Len(t, jobs.Items, 3) - job = jobs.Items[2] - require.NotNil(t, inst.Status.ActiveJob, "expected ActiveJob to be set") - assert.Equal(t, job.Name, inst.Status.ActiveJob.Name, "expected ActiveJob to contain the job name") - assert.Equal(t, porterv1.PhasePending, inst.Status.Phase, "An uninstall job should have been kicked off") - assert.True(t, apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionScheduled))) + // Verify that an action was created to uninstall it + require.NotNil(t, inst.Status.Action, "expected Action to be set") + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: inst.Namespace, Name: inst.Status.Action.Name}, &action)) + assert.Equal(t, "3", action.Labels[porterv1.LabelResourceGeneration], "The wrong action is set on the status") - // Complete the uninstall job - job.Status.Active = 0 - job.Status.Succeeded = 1 - job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}} - require.NoError(t, controller.Status().Update(ctx, &job)) + // Complete the uninstall action + action.Status.Phase = porterv1.PhaseSucceeded + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) triggerReconcile() @@ -459,229 +173,81 @@ func Test_Reconcile(t *testing.T) { triggerReconcile() } -func Test_createAgentVolume(t *testing.T) { - testLabels := map[string]string{ - "testLabel": "abc123", - } - controller := setupTestController() - - inst := &porterv1.Installation{ - TypeMeta: metav1.TypeMeta{ - APIVersion: porterv1.GroupVersion.String(), - Kind: "Installation", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "porter-hello", - Generation: 1, - ResourceVersion: "123", - UID: "random-uid", - Labels: testLabels, - }, - Spec: porterv1.InstallationSpec{ - Name: "mybuns", - Bundle: porterv1.OCIReferenceParts{Repository: "getporter/porter-hello", Version: "0.1.1"}, - }, - } - agentCfg := porterv1.AgentConfigSpec{ - VolumeSize: "128Mi", - PorterRepository: "getporter/custom-agent", - PorterVersion: "v1.0.0", - PullPolicy: "Always", - ServiceAccount: "porteraccount", - InstallationServiceAccount: "installeraccount", - } - pvc, err := controller.createAgentVolume(context.Background(), logr.Discard(), inst, agentCfg) - require.NoError(t, err) - - // Verify the pvc properties - assert.Equal(t, "porter-hello-1-123", pvc.GenerateName, "incorrect pvc name") - assert.Equal(t, inst.Namespace, pvc.Namespace, "incorrect pvc namespace") - assert.Equal(t, []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, pvc.Spec.AccessModes, "incorrect pvc access modes") - assert.Equal(t, pvc.Spec.Resources.Requests[corev1.ResourceStorage], resource.MustParse("128Mi")) - assertSharedAgentLabels(t, pvc.Labels) - for k, v := range testLabels { - assertContains(t, pvc.Labels, k, v) - } -} - -func Test_createAgentSecret(t *testing.T) { - testLabels := map[string]string{ - "testLabel": "abc123", - } - controller := setupTestController() - - inst := &porterv1.Installation{ - TypeMeta: metav1.TypeMeta{ - APIVersion: porterv1.GroupVersion.String(), - Kind: "Installation", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "porter-hello", - Generation: 1, - ResourceVersion: "123", - UID: "random-uid", - Labels: testLabels, - }, - Spec: porterv1.InstallationSpec{ - Name: "mybuns", - Bundle: porterv1.OCIReferenceParts{Repository: "getporter/porter-hello", Version: "0.1.1"}, - }, - } - porterCfg := porterv1.PorterConfigSpec{} - secret, err := controller.createAgentSecret(context.Background(), logr.Discard(), inst, porterCfg) - require.NoError(t, err) - - // Verify the secret properties - assert.Equal(t, "porter-hello-1-123", secret.GenerateName, "incorrect secret name") - assert.Equal(t, inst.Namespace, secret.Namespace, "incorrect secret namespace") - assert.Equal(t, corev1.SecretTypeOpaque, secret.Type, "expected the secret to be of type Opaque") - assert.Equal(t, pointer.BoolPtr(true), secret.Immutable, "expected the secret to be immutable") - assert.Contains(t, secret.Data, "config.yaml", "expected the secret to have config.yaml") - assert.Contains(t, secret.Data, "installation.yaml", "expected the secret to have installation.yaml") - assertSharedAgentLabels(t, secret.Labels) - for k, v := range testLabels { - assertContains(t, secret.Labels, k, v) - } -} - -func Test_createAgentJob(t *testing.T) { - testLabels := map[string]string{ - "testLabel": "abc123", - } - controller := setupTestController() +func TestInstallationReconciler_createAgentAction(t *testing.T) { + controller := setupInstallationController() - cmd := []string{"porter", "installation", "apply", "-f=installation.yaml"} inst := &porterv1.Installation{ TypeMeta: metav1.TypeMeta{ APIVersion: porterv1.GroupVersion.String(), Kind: "Installation", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "porter-hello", - Generation: 1, - ResourceVersion: "123", - UID: "random-uid", - Labels: testLabels, + Namespace: "test", + Name: "myblog", + UID: "random-uid", + Generation: 1, + Labels: map[string]string{ + "testLabel": "abc123", + }, + Annotations: map[string]string{ + porterv1.AnnotationRetry: "2021-2-2 12:00:00", + }, }, Spec: porterv1.InstallationSpec{ - Name: "mybuns", - Bundle: porterv1.OCIReferenceParts{Repository: "getporter/porter-hello", Version: "0.1.1"}, + Namespace: "dev", + Name: "wordpress", + AgentConfig: &corev1.LocalObjectReference{Name: "myAgentConfig"}, + PorterConfig: &corev1.LocalObjectReference{Name: "myPorterConfig"}, }, } - agentCfg := porterv1.AgentConfigSpec{ - VolumeSize: "128Mi", - PorterRepository: "getporter/custom-agent", - PorterVersion: "v1.0.0", - PullPolicy: "Always", - ServiceAccount: "porteraccount", - InstallationServiceAccount: "installeraccount", - } - pvc := corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "mypvc"}} - secret := corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}} - job, err := controller.createAgentJob(context.Background(), logr.Discard(), cmd, inst, agentCfg, pvc, secret) + action, err := controller.createAgentAction(context.Background(), logr.Discard(), inst) require.NoError(t, err) - - // Verify the job properties - wantName := "porter-hello-1-123" - assert.Equal(t, wantName, job.GenerateName, "incorrect job name") - assert.Equal(t, inst.Namespace, job.Namespace, "incorrect job namespace") - assert.Len(t, job.OwnerReferences, 1, "expected the job to have an owner reference") + assert.Equal(t, "test", action.Namespace) + assert.Contains(t, action.Name, "myblog-") + assert.Len(t, action.OwnerReferences, 1, "expected an owner reference") wantOwnerRef := metav1.OwnerReference{ APIVersion: porterv1.GroupVersion.String(), Kind: "Installation", - Name: "porter-hello", + Name: "myblog", UID: "random-uid", Controller: pointer.BoolPtr(true), BlockOwnerDeletion: pointer.BoolPtr(true), } - assert.Equal(t, wantOwnerRef, job.OwnerReferences[0], "incorrect owner reference") - assertSharedAgentLabels(t, job.Labels) - for k, v := range testLabels { - assertContains(t, job.Labels, k, v) - } - assertContains(t, job.Labels, labelJobType, jobTypeAgent) - assert.Equal(t, pointer.Int32Ptr(1), job.Spec.Completions, "incorrect job completions") - assert.Equal(t, pointer.Int32Ptr(0), job.Spec.BackoffLimit, "incorrect job back off limit") - - // Verify the job pod template - podTemplate := job.Spec.Template - assert.Equal(t, wantName, podTemplate.GenerateName, "incorrect pod generate name") - assert.Equal(t, "test", podTemplate.Namespace, "incorrect pod namespace") - assertSharedAgentLabels(t, podTemplate.Labels) - assert.Len(t, podTemplate.Spec.Volumes, 2, "incorrect pod volumes") - assert.Equal(t, "porter-shared", podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume") - assert.Equal(t, "porter-config", podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume") - assert.Equal(t, "porteraccount", podTemplate.Spec.ServiceAccountName, "incorrect service account for the pod") - assert.NotNil(t, podTemplate.Spec.SecurityContext, "incorrect pod security context") - nonroot := pointer.Int64Ptr(65532) - assert.Equal(t, nonroot, podTemplate.Spec.SecurityContext.FSGroup, "we should mount the pod volumes as the nonroot user") - - // Verify the agent container - agentContainer := podTemplate.Spec.Containers[0] - assert.Equal(t, "porter-agent", agentContainer.Name, "incorrect agent container name") - assert.Equal(t, "getporter/custom-agent:v1.0.0", agentContainer.Image, "incorrect agent image") - assert.Equal(t, corev1.PullPolicy("Always"), agentContainer.ImagePullPolicy, "incorrect agent pull policy") - assert.Equal(t, cmd, agentContainer.Args, "incorrect agent command arguments") - assertEnvVar(t, agentContainer.Env, "PORTER_RUNTIME_DRIVER", "kubernetes") - assertEnvVar(t, agentContainer.Env, "KUBE_NAMESPACE", "test") - assertEnvVar(t, agentContainer.Env, "IN_CLUSTER", "true") - assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_NAME", pvc.Name) - assertEnvVar(t, agentContainer.Env, "JOB_VOLUME_PATH", "/porter-shared") - assertEnvVar(t, agentContainer.Env, "CLEANUP_JOBS", "false") // this will be configurable in the future - assertEnvVar(t, agentContainer.Env, "SERVICE_ACCOUNT", "installeraccount") - assertEnvVar(t, agentContainer.Env, "LABELS", "porter.sh/jobType=bundle-installer porter.sh/managed=true porter.sh/resourceGeneration=1 porter.sh/resourceKind=Installation porter.sh/resourceName=porter-hello porter.sh/resourceVersion=123 porter.sh/retry= testLabel=abc123") - assertEnvVar(t, agentContainer.Env, "AFFINITY_MATCH_LABELS", "porter.sh/resourceKind=Installation porter.sh/resourceName=porter-hello porter.sh/resourceGeneration=1 porter.sh/retry=") - assertEnvFrom(t, agentContainer.EnvFrom, "porter-env", pointer.BoolPtr(true)) - assertVolumeMount(t, agentContainer.VolumeMounts, "porter-config", "/porter-config") - assertVolumeMount(t, agentContainer.VolumeMounts, "porter-shared", "/porter-shared") -} - -func assertSharedAgentLabels(t *testing.T, labels map[string]string) { - assertContains(t, labels, labelManaged, "true") - assertContains(t, labels, labelResourceKind, "Installation") - assertContains(t, labels, labelResourceName, "porter-hello") - assertContains(t, labels, labelResourceGeneration, "1") - assertContains(t, labels, labelResourceVersion, "123") - assertContains(t, labels, labelRetry, "") -} - -func assertContains(t *testing.T, labels map[string]string, key string, value string) { - assert.Contains(t, labels, key, "expected the %s key to be set", key) - assert.Equal(t, value, labels[key], "incorrect value for key %s", key) -} - -func assertEnvVar(t *testing.T, envVars []corev1.EnvVar, name string, value string) { - for _, envVar := range envVars { - if envVar.Name == name { - assert.Equal(t, value, envVar.Value, "incorrect value for EnvVar %s", name) - return - } - } - - assert.Failf(t, "expected the %s EnvVar to be set", name) + assert.Equal(t, wantOwnerRef, action.OwnerReferences[0], "incorrect owner reference") + + assertContains(t, action.Annotations, porterv1.AnnotationRetry, inst.Annotations[porterv1.AnnotationRetry], "incorrect annotation") + assertContains(t, action.Labels, porterv1.LabelManaged, "true", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceKind, "Installation", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceName, "myblog", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceGeneration, "1", "incorrect label") + assertContains(t, action.Labels, "testLabel", "abc123", "incorrect label") + + assert.Equal(t, inst.Spec.AgentConfig, action.Spec.AgentConfig, "incorrect AgentConfig reference") + assert.Equal(t, inst.Spec.AgentConfig, action.Spec.AgentConfig, "incorrect PorterConfig reference") + assert.Nilf(t, action.Spec.Command, "should use the default command for the agent") + assert.Equal(t, []string{"installation", "apply", "installation.yaml"}, action.Spec.Args, "incorrect agent arguments") + assert.Contains(t, action.Spec.Files, "installation.yaml") + assert.NotEmpty(t, action.Spec.Files["installation.yaml"], "expected installation.yaml to get set on the action") + + assert.Empty(t, action.Spec.Env, "incorrect Env") + assert.Empty(t, action.Spec.EnvFrom, "incorrect EnvFrom") + assert.Empty(t, action.Spec.Volumes, "incorrect Volumes") + assert.Empty(t, action.Spec.VolumeMounts, "incorrect VolumeMounts") } -func assertEnvFrom(t *testing.T, envFrom []corev1.EnvFromSource, name string, optional *bool) { - for _, source := range envFrom { - if source.SecretRef.Name == name { - assert.Equal(t, optional, source.SecretRef.Optional, "incorrect optional flag for EnvFrom %s", name) - return - } - } +func setupInstallationController(objs ...client.Object) InstallationReconciler { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(porterv1.AddToScheme(scheme)) - assert.Failf(t, "expected the %s EnvFrom to be set", name) -} + fakeBuilder := fake.NewClientBuilder() + fakeBuilder.WithScheme(scheme) + fakeBuilder.WithObjects(objs...) + fakeClient := fakeBuilder.Build() -func assertVolumeMount(t *testing.T, mounts []corev1.VolumeMount, name string, path string) { - for _, mount := range mounts { - if mount.Name == name { - assert.Equal(t, path, mount.MountPath, "incorrect mount path for VolumeMount %s", name) - return - } + return InstallationReconciler{ + Log: logr.Discard(), + Client: fakeClient, + Scheme: scheme, } - - assert.Failf(t, "expected the %s VolumeMount to be set", name) } diff --git a/controllers/installation_test.go b/controllers/installation_test.go deleted file mode 100644 index 018ce0fd..00000000 --- a/controllers/installation_test.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build integration -// +build integration - -package controllers_test - -import ( - "context" - "fmt" - "time" - - porterv1 "get.porter.sh/operator/api/v1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - gomegatypes "github.com/onsi/gomega/types" - "github.com/pkg/errors" - "github.com/tidwall/pretty" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var _ = Describe("Installation controller", func() { - - // Define utility constants for object names and testing timeouts/durations and intervals. - const ( - InstallationName = "porter-hello" - AffinityMatchLabelValue = "porter.sh/resourceKind=Installation porter.sh/resourceName=" + InstallationName + " porter.sh/resourceGeneration=1" - ) - - Context("When working with Porter", func() { - It("Should execute Porter", func() { - By("By creating a new Installation") - ctx := context.Background() - - inst := &porterv1.Installation{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "porter.sh/v1", - Kind: "Installation", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: InstallationName, - Namespace: testNamespace, - }, - Spec: porterv1.InstallationSpec{ - SchemaVersion: "1.0.0", - Name: "hello", - Namespace: "operator-tests", - Bundle: porterv1.OCIReferenceParts{ - Repository: "getporter/porter-hello", - Version: "0.1.1", - }, - }, - } - Expect(k8sClient.Create(ctx, inst)).Should(Succeed()) - - // Wait for the job to be created - jobs := waitForJobStarted(ctx) - job := jobs.Items[0] - - // Validate that the job succeeded - job = waitForJobFinished(ctx, job) - - // If the job failed, print some debug info - if job.Status.Succeeded == 0 { - Log("+++JOB (%s)+++", job.Name) - LogJson(job.Status.String()) - - Log("+++POD+++") - pods := &corev1.PodList{} - k8sClient.List(ctx, pods, client.HasLabels{"job-name=" + job.Name}) - if len(pods.Items) > 0 { - LogJson(pods.Items[0].String()) - } - Fail("The job was not successful") - } - - // Validate that the installation status was updated - instName := types.NamespacedName{Namespace: inst.Namespace, Name: inst.Name} - Expect(k8sClient.Get(ctx, instName, inst)).To(Succeed()) - Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionScheduled))) - Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionStarted))) - Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionComplete))) - }) - }) -}) - -func waitForJobStarted(ctx context.Context) batchv1.JobList { - jobs := batchv1.JobList{} - inNamespace := client.InNamespace(testNamespace) - waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) - defer cancel() - for { - select { - case <-waitCtx.Done(): - Fail(errors.Wrap(waitCtx.Err(), "timeout waiting for job to be created").Error()) - default: - err := k8sClient.List(ctx, &jobs, inNamespace) - Expect(err).Should(Succeed()) - if len(jobs.Items) > 0 { - return jobs - } - - time.Sleep(time.Second) - continue - } - } -} - -func waitForJobFinished(ctx context.Context, job batchv1.Job) batchv1.Job { - waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) - defer cancel() - for { - select { - case <-waitCtx.Done(): - fmt.Println(job.String()) - Fail(errors.Wrapf(waitCtx.Err(), "timeout waiting for job %s/%s to complete", job.Namespace, job.Name).Error()) - default: - jobName := types.NamespacedName{Name: job.Name, Namespace: job.Namespace} - Expect(k8sClient.Get(waitCtx, jobName, &job)).To(Succeed()) - - if IsJobDone(job.Status) { - return job - } - - time.Sleep(500 * time.Millisecond) - } - } -} - -func IsVolume(name string) gomegatypes.GomegaMatcher { - return WithTransform(func(v corev1.Volume) string { return v.Name }, Equal(name)) -} - -func IsVolumeMount(name string) gomegatypes.GomegaMatcher { - return WithTransform(func(v corev1.VolumeMount) string { return v.Name }, Equal(name)) -} - -func IsJobDone(status batchv1.JobStatus) bool { - for _, c := range status.Conditions { - if c.Type == batchv1.JobFailed || c.Type == batchv1.JobComplete { - return true - } - } - - return false -} - -func Log(value string, args ...interface{}) { - GinkgoWriter.Write([]byte(fmt.Sprintf(value, args...))) -} - -func LogJson(value string) { - GinkgoWriter.Write(pretty.Pretty([]byte(value))) -} diff --git a/controllers/porter_resource.go b/controllers/porter_resource.go new file mode 100644 index 00000000..3af5953e --- /dev/null +++ b/controllers/porter_resource.go @@ -0,0 +1,169 @@ +package controllers + +import ( + "context" + "fmt" + "reflect" + "time" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +type porterResource interface { + client.Object + GetStatus() porterv1.PorterResourceStatus + SetStatus(value porterv1.PorterResourceStatus) +} + +type patchFunc func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + +func PatchObjectWithRetry(ctx context.Context, log logr.Logger, clnt client.Client, patch patchFunc, obj client.Object, newObj func() client.Object) error { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + kind := obj.GetObjectKind().GroupVersionKind().Kind + + for { + key := client.ObjectKeyFromObject(obj) + latest := newObj() + if err := clnt.Get(ctx, key, latest); err != nil { + return errors.Wrap(err, fmt.Sprintf("could not get the latest %s definition", kind)) + } + + patchObj := client.MergeFrom(latest) + err := patch(ctx, obj, patchObj) + if err != nil { + if apierrors.IsConflict(err) { + continue // try again + } + return errors.Wrapf(err, "failed to patch %s", kind) + } + + if log.V(Log4Debug).Enabled() { + patchDump, _ := patchObj.Data(obj) + log.V(Log4Debug).Info("Applied patch", "data", string(patchDump)) + } + return nil + } +} + +func applyAgentAction(log logr.Logger, resource porterResource, action *porterv1.AgentAction) { + log.V(Log5Trace).Info(fmt.Sprintf("Syncing AgentAction status with %s", resource.GetObjectKind().GroupVersionKind().Kind)) + status := resource.GetStatus() + status.ObservedGeneration = resource.GetGeneration() + status.Phase = porterv1.PhaseUnknown + + if action == nil { + status.Action = nil + status.Conditions = nil + log.V(Log5Trace).Info("Cleared status because there is no current agent action") + } else { + status.Action = &corev1.LocalObjectReference{Name: action.Name} + if action.Status.Phase != "" { + status.Phase = action.Status.Phase + } + status.Conditions = make([]metav1.Condition, len(action.Status.Conditions)) + copy(status.Conditions, action.Status.Conditions) + + if log.V(Log5Trace).Enabled() { + conditions := make([]string, len(status.Conditions)) + for i, condition := range status.Conditions { + conditions[i] = condition.Type + } + log.V(Log5Trace).Info("Copied status from agent action", "action", action.Name, "phase", action.Status.Phase, "conditions", conditions) + } + } + + resource.SetStatus(status) +} + +func isDeleted(resource porterResource) bool { + timestamp := resource.GetDeletionTimestamp() + return timestamp != nil && !timestamp.IsZero() +} + +func isDeleteProcessed(resource porterResource) bool { + status := resource.GetStatus() + return isDeleted(resource) && apimeta.IsStatusConditionTrue(status.Conditions, string(porterv1.ConditionComplete)) +} + +func isFinalizerSet(resource porterResource) bool { + for _, finalizer := range resource.GetFinalizers() { + if finalizer == porterv1.FinalizerName { + return true + } + } + return false +} + +// ensureFinalizerSet sets a finalizer on the resource and saves it, if necessary. +func ensureFinalizerSet(ctx context.Context, log logr.Logger, client client.Client, resource porterResource) (updated bool, err error) { + // Ensure all resources have a finalizer to we can react when they are deleted + if !isDeleted(resource) { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !isFinalizerSet(resource) { + log.V(Log5Trace).Info("adding finalizer") + controllerutil.AddFinalizer(resource, porterv1.FinalizerName) + return true, client.Update(ctx, resource) + } + } + return false, nil +} + +// removeFinalizer deletes the porter finalizer from the specified resource and saves it. +func removeFinalizer(ctx context.Context, log logr.Logger, client client.Client, inst *porterv1.Installation) error { + log.V(Log5Trace).Info("removing finalizer") + controllerutil.RemoveFinalizer(inst, porterv1.FinalizerName) + return client.Update(ctx, inst) +} + +// Build the set of labels used to uniquely identify the associated AgentAction. +func getActionLabels(resource metav1.Object) map[string]string { + typeInfo, err := meta.TypeAccessor(resource) + if err != nil { + panic(err) + } + + return map[string]string{ + porterv1.LabelManaged: "true", + porterv1.LabelResourceKind: typeInfo.GetKind(), + porterv1.LabelResourceName: resource.GetName(), + porterv1.LabelResourceGeneration: fmt.Sprintf("%d", resource.GetGeneration()), + } +} + +// resourceChanged is a predicate that filters events that are sent to Reconcile +// only triggers when the spec or the finalizer was changed. +// Allows forcing Reconcile with the retry annotation as well. +type resourceChanged struct { + predicate.Funcs +} + +func (resourceChanged) Update(e event.UpdateEvent) bool { + if e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() { + return true + } + + if !reflect.DeepEqual(e.ObjectNew.GetFinalizers(), e.ObjectOld.GetFinalizers()) { + return true + } + + if e.ObjectNew.GetAnnotations()[porterv1.AnnotationRetry] != e.ObjectOld.GetAnnotations()[porterv1.AnnotationRetry] { + return true + } + + return false +} diff --git a/controllers/porter_resource_test.go b/controllers/porter_resource_test.go new file mode 100644 index 00000000..32561b90 --- /dev/null +++ b/controllers/porter_resource_test.go @@ -0,0 +1,122 @@ +package controllers + +import ( + "testing" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func Test_resourceChanged_Update(t *testing.T) { + predicate := resourceChanged{} + + t.Run("spec changed", func(t *testing.T) { + e := event.UpdateEvent{ + ObjectOld: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + ObjectNew: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + }, + } + assert.True(t, predicate.Update(e), "expected changing the generation to trigger reconciliation") + }) + + t.Run("finalizer added", func(t *testing.T) { + e := event.UpdateEvent{ + ObjectOld: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + ObjectNew: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + Finalizers: []string{porterv1.FinalizerName}, + }, + }, + } + assert.True(t, predicate.Update(e), "expected setting a finalizer to trigger reconciliation") + }) + + t.Run("retry annotation changed", func(t *testing.T) { + e := event.UpdateEvent{ + ObjectOld: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + ObjectNew: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + Annotations: map[string]string{ + porterv1.AnnotationRetry: "1", + }, + }, + }, + } + assert.True(t, predicate.Update(e), "expected setting changing the retry annotation to trigger reconciliation") + }) + + t.Run("status changed", func(t *testing.T) { + e := event.UpdateEvent{ + ObjectOld: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Status: porterv1.InstallationStatus{PorterResourceStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 1, + }}, + }, + ObjectNew: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Status: porterv1.InstallationStatus{PorterResourceStatus: porterv1.PorterResourceStatus{ + ObservedGeneration: 2, + }}, + }, + } + assert.False(t, predicate.Update(e), "expected status changes to be ignored") + }) + + t.Run("label added", func(t *testing.T) { + e := event.UpdateEvent{ + ObjectOld: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: "1", + }, + }, + ObjectNew: &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: "2", + Labels: map[string]string{ + "myLabel": "super useful", + }, + }, + }, + } + assert.False(t, predicate.Update(e), "expected metadata changes to be ignored") + }) +} + +func Test_isFinalizerSet(t *testing.T) { + inst := &porterv1.Installation{ + ObjectMeta: metav1.ObjectMeta{}, + } + assert.False(t, isFinalizerSet(inst)) + + inst.Finalizers = append(inst.Finalizers, "something-else") + assert.False(t, isFinalizerSet(inst)) + + inst.Finalizers = append(inst.Finalizers, porterv1.FinalizerName) + assert.True(t, isFinalizerSet(inst)) +} diff --git a/docs/content/file-formats.md b/docs/content/file-formats.md index 6c6072bf..1c2ea41d 100644 --- a/docs/content/file-formats.md +++ b/docs/content/file-formats.md @@ -10,6 +10,7 @@ although they both use the term namespace, there is no relation between Kubernet The same goes for the name and labels fields. * [Installation](#installation) +* [AgentAction](#agent-action) * [AgentConfig](#agent-config) * [PorterConfig](#porter-config) @@ -22,10 +23,27 @@ the `porter installation show NAME -o yaml` command into the spec field and have In addition to the normal fields available on a [Porter Installation document](/reference/file-formats/) the following fields are supported: -| Field | Required | Default | Description | -| ----------- | ----------- | ------- | ----------- | -| agentConfig | false | See [Agent Config](#agent-config) | Reference to an AgentConfig resource in the same namespace. | -| porterConfig | false | See [Porter Config](#porter-config) | Reference to a PorterConfig resource in the same namespace. | +| Field | Required | Default | Description | +|--------------|----------|-------------------------------------|-------------------------------------------------------------| +| agentConfig | false | See [Agent Config](#agent-config) | Reference to an AgentConfig resource in the same namespace. | +| porterConfig | false | See [Porter Config](#porter-config) | Reference to a PorterConfig resource in the same namespace. | + +## Agent Action + +The AgentAction CRD represents running a Porter command with the [Porter Agent](https://release-v1.porter.sh/operator/#porter-agent). +The operator uses this resource internally to run `porter installation apply` when an Installation resource is changed and you may use it to execute arbitrary commands as well, such as executing a custom action on an installation. + +| Field | Required | Default | Description | +|--------------|----------|----------------------------------------|-----------------------------------------------------------------------------------------| +| agentConfig | false | See [Agent Config](#agent-config) | Reference to an AgentConfig resource in the same namespace. | +| porterConfig | false | See [Porter Config](#porter-config) | Reference to a PorterConfig resource in the same namespace. | +| command | false | /app/.porter/agent | Overrides the entrypoint of the Porter Agent image. | +| args | true | None. | Arguments to pass to the porter command. Do not include "porter" as the first argument. | +| files | false | None. | Files that should be present in the working directory where the command is run. | +| env | false | Settings for the kubernetes driver. | Additional environment variables that should be set. | +| envFrom | false | None. | Load environment variables from a ConfigMap or Secret. | +| volumeMounts | false | Porter's config and working directory. | Additional volumes that should be mounted into the Porter Agent. | +| volumes | false | Porter's config and working directory. | Additional volumes that should be mounted into the Porter Agent. | ## Agent Config @@ -45,7 +63,7 @@ metadata: name: customAgent spec: porterRepository: ghcr.io/getporter/porter-agent - porterVersion: v1.0.0-alpha.8 + porterVersion: v1.0.0-alpha.12 serviceAccount: porter-agent volumeSize: 64Mi pullPolicy: Always diff --git a/docs/content/install.md b/docs/content/install.md index c58d9c4c..40468b60 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -3,7 +3,7 @@ title: Install the Porter Operator description: Get up and running with the Porter Operator --- -If you aren't already familiar with Porter, we recommend that you install and use [Porter v1.0.0-alpha.9][install-porter] first and then once you are comfortable, learn how to automate Porter with the operator. +If you aren't already familiar with Porter, we recommend that you install and use [Porter v1.0.0-alpha.12][install-porter] first and then once you are comfortable, learn how to automate Porter with the operator. The commands below use the v0.4.0 release, but there may be a more recent release of the Operator. Check our [releases page](https://github.com/getporter/operator/releases) and use the most recent version number. @@ -16,7 +16,7 @@ $ porter explain -r ghcr.io/getporter/porter-operator:v0.4.0 Name: porter-operator Description: The Porter Operator for Kubernetes. Execute bundles on a Kubernetes cluster. Version: v0.4.0 -Porter Version: v1.0.0-alpha.9 +Porter Version: v1.0.0-alpha.12 Credentials: Name Description Required Applies To diff --git a/go.mod b/go.mod index d62e413e..7ec6a307 100644 --- a/go.mod +++ b/go.mod @@ -2,29 +2,10 @@ module get.porter.sh/operator go 1.17 -// These replace statements should be kept in sync with the ones in Porter's go.mod -replace ( - // Use Porter's cnab-go - github.com/cnabio/cnab-go => github.com/carolynvs/cnab-go v0.20.2-0.20210805155536-9a543e0636f4 - - // return-digest - github.com/cnabio/cnab-to-oci => github.com/carolynvs/cnab-to-oci v0.3.0-beta4.0.20210812163007-0766f78b7ee1 - - // See https://github.com/hashicorp/go-plugin/pull/127 and - // https://github.com/hashicorp/go-plugin/pull/163 - // Also includes a branch we haven't PR'd yet: capture-yamux-logs - // Tagged from v1.4.0, the improved-configuration branch - github.com/hashicorp/go-plugin => github.com/getporter/go-plugin v1.4.0-improved-configuration.1 - - // go.mod doesn't propogate replacements in the dependency graph so I'm copying this from github.com/moby/buildkit - github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 - - // Fixes https://github.com/spf13/viper/issues/761 - github.com/spf13/viper => github.com/getporter/viper v1.7.1-porter.2.0.20210514172839-3ea827168363 -) +// Any replace statements should be kept in sync with the ones in Porter's go.mod require ( - get.porter.sh/porter v1.0.0-alpha.8 + get.porter.sh/porter v1.0.0-alpha.12 github.com/carolynvs/magex v0.6.1 github.com/go-logr/logr v0.3.0 github.com/magefile/mage v1.11.0 @@ -60,13 +41,13 @@ require ( github.com/carolynvs/aferox v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cnabio/cnab-go v0.21.0 // indirect + github.com/cnabio/cnab-go v0.23.0 // indirect github.com/cnabio/cnab-to-oci v0.3.1-beta1.0.20210614060230-e4d2bd5441c8 // indirect github.com/containerd/containerd v1.5.3 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20210303052042-6bc126869bf4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/cli v20.10.7+incompatible // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.0+incompatible // indirect github.com/docker/docker v20.10.7+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.3 // indirect github.com/docker/go-connections v0.4.0 // indirect diff --git a/go.sum b/go.sum index f5840d78..c7daa872 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -get.porter.sh/porter v1.0.0-alpha.8 h1:P/+5U0r75+fn6ZUW6Lix4UgE1Pb9pLPc0XtraAgGeK0= -get.porter.sh/porter v1.0.0-alpha.8/go.mod h1:eG2RWoE+l/M3ooync0H/tT1dU5A5eHKppzo1IAOQGZY= +get.porter.sh/porter v1.0.0-alpha.12 h1:FY3XPG/kg/Gkep2d86S66mkCyeMS+3HFTrBOgVO65E0= +get.porter.sh/porter v1.0.0-alpha.12/go.mod h1:/F7/nofKmVQm6v662YncrD+Mtm8owb5KP374hhOw9r0= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= @@ -67,6 +67,7 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v12.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -146,6 +147,7 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= @@ -191,6 +193,7 @@ github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:o github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= @@ -241,10 +244,6 @@ github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGr github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= github.com/carolynvs/aferox v0.3.0 h1:CMT50zX88amTMbFfFIWSTKRVRaOw6sejUMbbKiCD4zE= github.com/carolynvs/aferox v0.3.0/go.mod h1:eb7CHGIO33CCZS//xtnblvPZbuuZMv0p1VbhiSwZnH4= -github.com/carolynvs/cnab-go v0.20.2-0.20210805155536-9a543e0636f4 h1:w6gndqIhqXU2icTc+RkqxBHD5TknsD94YKfeG9jll0U= -github.com/carolynvs/cnab-go v0.20.2-0.20210805155536-9a543e0636f4/go.mod h1:u/Y7piTNJuFs2KfQqmda6uqIx4rtqQ73H6IW7gudz7E= -github.com/carolynvs/cnab-to-oci v0.3.0-beta4.0.20210812163007-0766f78b7ee1 h1:BH1YyJJLPUcXv667KEZwpjSBRXOw05437dKYPbEeuXk= -github.com/carolynvs/cnab-to-oci v0.3.0-beta4.0.20210812163007-0766f78b7ee1/go.mod h1:EM5BN83Hf0YCBQrkdYNuT2XamG9BfKBDxGuA0I3nZ3U= github.com/carolynvs/datetime-printer v0.2.0/go.mod h1:p9W8ZUhmQUOVD5kiDuGXwRG65/nTkZWlLylY7s+Qw2k= github.com/carolynvs/magex v0.6.0/go.mod h1:hqaEkr9TAv+kFb/5wgDiTdszF13rpe0Q+bWHmTe6N74= github.com/carolynvs/magex v0.6.1 h1:E/ezIActxIslFzwR/tD3j1CUPa64utMzN6aKQ2/xFG4= @@ -260,6 +259,9 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= @@ -277,6 +279,11 @@ github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e/go.mod h1:yMWuSON github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cnabio/cnab-go v0.10.0-beta1/go.mod h1:5c4uOP6ZppR4nUGtCMAElscRiYEUi44vNQwtSAvISXk= +github.com/cnabio/cnab-go v0.23.0 h1:BBzSHCHWs4u0RT21VwGP9nAF+cq1pAkG5Qai4MQaOcs= +github.com/cnabio/cnab-go v0.23.0/go.mod h1:0DE0YB+qVoKLeg21cX30CIxBBhwEvs6v9b0prx9zoiA= +github.com/cnabio/cnab-to-oci v0.3.1-beta1.0.20210614060230-e4d2bd5441c8 h1:oi/g+7v0mVYfp5iETwm7DO6SFxpsq0hCN3fzywA6Lrk= +github.com/cnabio/cnab-to-oci v0.3.1-beta1.0.20210614060230-e4d2bd5441c8/go.mod h1:nl9mHZV0Tvj6ZirWkjpiWuVp71RenwUQ98KHA9ZY27g= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -322,6 +329,7 @@ github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTV github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.3 h1:mfKOepNDIJ3EiBTEyHFpEqB6YSOSkGcjPDIu7cD+YzY= github.com/containerd/containerd v1.5.3/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -331,6 +339,7 @@ github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EX github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= @@ -376,7 +385,9 @@ github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -417,6 +428,7 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/buildx v0.5.1/go.mod h1:YlxswdEKSMrxCCSYWU2p/Ii1oOOwu8lT3tJzJDpP7J4= @@ -431,8 +443,9 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -453,6 +466,7 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -511,8 +525,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getporter/go-plugin v1.4.0-improved-configuration.1/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/getporter/viper v1.7.1-porter.2.0.20210514172839-3ea827168363/go.mod h1:TMyCLryAYE7EgeSfzTbjQLLiLkOjZeJuiFVGK5FYwog= github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -642,6 +654,7 @@ github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3 github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -649,6 +662,7 @@ github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -661,6 +675,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -794,13 +809,16 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -829,6 +847,7 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -867,6 +886,7 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jeremywohl/flatten v1.0.1/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= @@ -961,6 +981,7 @@ github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1159,6 +1180,8 @@ github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/osteele/liquid v1.3.0/go.mod h1:VmzQQHa5v4E0GvGzqccfAfLgMwRk2V+s1QbxYx9dGak= +github.com/osteele/tuesday v1.0.3/go.mod h1:pREKpE+L03UFuR+hiznj3q7j3qB1rUZ4XfKejwWFF2M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1193,6 +1216,7 @@ github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1 github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= @@ -1209,7 +1233,9 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -1220,6 +1246,7 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -1229,8 +1256,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/qri-io/jsonpointer v0.1.0/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= +github.com/qri-io/jsonschema v0.1.1/go.mod h1:QpzJ6gBQ0GYgGmh7mDQ1YsvvhSgE4rYj0k8t5MBOmUY= github.com/qri-io/jsonschema v0.2.2-0.20210723092138-2eb22ee8115f h1:fG/BLRtlFDgCs/dvPjiAN3v2Mrkr1KRWkEdXj20UwLY= github.com/qri-io/jsonschema v0.2.2-0.20210723092138-2eb22ee8115f/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= @@ -1290,10 +1320,11 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.5.1 h1:VHu76Lk0LSP1x254maIu2bplkWpfBWI+B+6fdoZprcg= github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -1314,6 +1345,10 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1360,10 +1395,11 @@ github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk= github.com/tonistiigi/go-actions-cache v0.0.0-20210714033416-b93d7f1b2e70/go.mod h1:dNS+PPTqGnSl80x3wEyWWCHeON5xiBGtcM0uD6CgHNU= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= -github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305/go.mod h1:gXOLibKqQTRAVuVZ9gX7G9Ykky8ll8yb4slxsEMoY0c= github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1410,6 +1446,7 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1425,6 +1462,7 @@ github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhu github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= @@ -1500,6 +1538,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1515,6 +1554,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1594,6 +1634,7 @@ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1615,7 +1656,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= @@ -1656,6 +1696,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1683,6 +1724,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190830141801-acfa387b8d69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2043,6 +2085,7 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.5 h1:nI5egYTGJakVyOryqLs1cQO5dO0ksin5XXs2pspk75k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.0.0-20191016110408-35e52d86657a/go.mod h1:/L5qH+AD540e7Cetbui1tuJeXdmNhO8jM6VkXeDdDhQ= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= @@ -2054,6 +2097,7 @@ k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJ k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190806215851-162a2dabc72f/go.mod h1:+ntn62igV2hyNj7/0brOvXSMONE2KxcePkSxK7/9FFQ= +k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= @@ -2067,6 +2111,7 @@ k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/client-go v0.0.0-20191016111102-bec269661e48/go.mod h1:hrwktSwYGI4JK+TJA3dMaFyyvHVi/aLarVHpbs8bgCU= k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= @@ -2095,6 +2140,7 @@ k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -2103,6 +2149,7 @@ k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= @@ -2110,6 +2157,7 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/installer/Dockerfile.tmpl b/installer/Dockerfile.tmpl index be8a7fa8..7bddf0ba 100644 --- a/installer/Dockerfile.tmpl +++ b/installer/Dockerfile.tmpl @@ -1,10 +1,15 @@ +# syntax=docker.io/docker/dockerfile-upstream:1.4.0-rc2 FROM debian:stretch-slim -ARG BUNDLE_DIR +# PORTER_INIT + ARG KUSTOMIZE_VERSION="v3.8.7" -RUN apt-get update && apt-get install -y ca-certificates curl +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ + apt-get update && apt-get install -y ca-certificates curl +# Install yq and kustomize RUN curl -sLo /usr/bin/yq https://github.com/mikefarah/yq/releases/download/v4.20.2/yq_linux_amd64 && \ chmod +x /usr/bin/yq &&\ curl -sLo /tmp/kustomize.tar.gz https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz &&\ @@ -16,4 +21,5 @@ RUN curl -sLo /usr/bin/yq https://github.com/mikefarah/yq/releases/download/v4.2 # PORTER_MIXINS # Use the BUNDLE_DIR build argument to copy files into the bundle -COPY . $BUNDLE_DIR +COPY --link . $BUNDLE_DIR + diff --git a/installer/vanilla.porter.yaml b/installer/vanilla.porter.yaml index c76f7090..551fb0d3 100644 --- a/installer/vanilla.porter.yaml +++ b/installer/vanilla.porter.yaml @@ -65,7 +65,7 @@ parameters: credentials: - name: kubeconfig description: Kubeconfig file for cluster where the operator should be installed - path: /root/.kube/config + path: /home/nonroot/.kube/config mixins: - exec diff --git a/mage/docker/docker_test.go b/mage/docker/docker_test.go index c1922c06..15c0e55d 100644 --- a/mage/docker/docker_test.go +++ b/mage/docker/docker_test.go @@ -11,12 +11,12 @@ func TestExtractImageWithDigest(t *testing.T) { const inspectOutput = `[ { "RepoDigests": [ - "localhost:5000/porterops-controller@sha256:c742b1cccc5a69abd082b1d61c8ef616a27699b6b52430ac700019f22800c06f" + "localhost:5000/porter-operator-manager@sha256:c742b1cccc5a69abd082b1d61c8ef616a27699b6b52430ac700019f22800c06f" ] } ]` ref, err := ExtractRepoDigest(inspectOutput) require.NoError(t, err) - assert.Equal(t, "localhost:5000/porterops-controller@sha256:c742b1cccc5a69abd082b1d61c8ef616a27699b6b52430ac700019f22800c06f", ref) + assert.Equal(t, "localhost:5000/porter-operator-manager@sha256:c742b1cccc5a69abd082b1d61c8ef616a27699b6b52430ac700019f22800c06f", ref) } diff --git a/mage/env.go b/mage/env.go index ccc917bf..f8bb3fa2 100644 --- a/mage/env.go +++ b/mage/env.go @@ -25,15 +25,18 @@ type Environment struct { } func getAmbientEnvironment() Environment { - name := os.Getenv("ENV") + name := os.Getenv("PORTER_ENV") switch name { case "prod", "production": return GetProductionEnvironment() case "test", "": return GetTestEnvironment() default: - mgx.Must(errors.Errorf("invalid ENV %q", name)) - return Environment{} + registry := os.Getenv("PORTER_OPERATOR_REGISTRY") + if registry == "" { + mgx.Must(errors.New("environment variable PORTER_OPERATOR_REGISTRY must be set to push to a custom registry")) + } + return buildEnvironment(name, registry) } } @@ -57,7 +60,7 @@ func buildEnvironment(name string, registry string) Environment { return Environment{ Name: name, Registry: registry, - ManagerImagePrefix: path.Join(registry, "porterops-controller:"), + ManagerImagePrefix: path.Join(registry, "porter-operator-manager:"), BundlePrefix: path.Join(registry, "porter-operator:"), } } diff --git a/magefile.go b/magefile.go index 4163ec20..4f810f2e 100644 --- a/magefile.go +++ b/magefile.go @@ -117,7 +117,13 @@ func BuildBundle() { meta := releases.LoadMetadata() version := strings.TrimPrefix(meta.Version, "v") - porter("build", "--version", version, "-f=porter.yaml").In("installer").Must().RunV() + verbose := "" + if mg.Verbose() { + verbose = "--verbose" + } + porter("build", "--version", version, "-f=porter.yaml", verbose). + CollapseArgs().Env("PORTER_EXPERIMENTAL=build-drivers", "PORTER_BUILD_DRIVER=buildkit"). + In("installer").Must().RunV() } // Build the controller image @@ -140,10 +146,11 @@ func getMixins() error { mixins := []struct { name string + url string feed string version string }{ - {name: "helm3", feed: "https://mchorfa.github.io/porter-helm3/atom.xml", version: "v0.1.14"}, + {name: "helm3", url: "https://github.com/carolynvs/porter-helm3/releases/download", version: "v0.1.15-8-g864f450"}, {name: "kubernetes", feed: "https://cdn.porter.sh/mixins/atom.xml", version: "latest"}, } var errG errgroup.Group @@ -160,7 +167,13 @@ func getMixins() error { if mixin.version == "" { mixin.version = "latest" } - return porter("mixin", "install", mixin.name, "--version", mixin.version, "--feed-url", mixin.feed).Run() + var source string + if mixin.feed != "" { + source = "--feed-url=" + mixin.feed + } else { + source = "--url=" + mixin.url + } + return porter("mixin", "install", mixin.name, "--version", mixin.version, source).Run() }) } @@ -197,10 +210,7 @@ func buildManifests() { mgx.Must(errors.Wrap(err, "could not remove generated manifests directory")) } - // Produce CRDs that work back to Kubernetes 1.11 (no version conversion) - crdOpts := "crd:trivialVersions=true,preserveUnknownFields=false" - - must.RunV("controller-gen", crdOpts, "rbac:roleName=manager-role", "webhook", `paths="./..."`, "output:crd:artifacts:config=config/crd/bases") + must.RunV("controller-gen", "rbac:roleName=manager-role", "crd", "webhook", `paths="./..."`, "output:crd:artifacts:config=config/crd/bases") kustomize("build", "config/default", "-o", "installer/manifests/operator.yaml").RunV() } @@ -243,7 +253,20 @@ func UpdateTestfiles() { func TestIntegration() { mg.Deps(UseTestEnvironment, CleanTestdata, EnsureGinkgo, EnsureDeployed) - must.RunV("go", "test", "-tags=integration", "./...", "-coverprofile=coverage-integration.out") + // TODO: we need to run these tests either isolated against EnvTest, or + // against a cluster that doesn't have the operator deployed. Otherwise + // both the controller running in the test, and the controller on the cluster + // are responding to the same events. + // For now, it's up to the caller to use a fresh cluster with CRDs installed until we can fix it. + + kubectl("delete", "deployment", "porter-operator-controller-manager", "-n=porter-operator-system").RunV() + + v := "" + if mg.Verbose() { + v = "-v" + } + must.Command("go", "test", v, "-tags=integration", "./tests/integration/...", "-coverprofile=coverage-integration.out", "-args", "-ginkgo.v"). + CollapseArgs().RunV() } // Check if the operator is deployed to the test cluster. @@ -261,7 +284,7 @@ func Deploy() { PublishLocalPorterAgent() PublishBundle() - porter("credentials", "apply", "hack/creds.yaml", "-n=operator", "--debug", "--debug-plugins").Must().RunV() + porter("credentials", "apply", "hack/creds.yaml", "-n=operator").Must().RunV() bundleRef := Env.BundlePrefix + meta.Version porter("install", "operator", "-r", bundleRef, "-c=kind", "--force", "-n=operator").Must().RunV() } @@ -367,7 +390,8 @@ func SetupNamespace(name string) { // It would be neat if Porter could handle this for us ps := "" if os.Getenv("PORTER_AGENT_REPOSITORY") != "" && os.Getenv("PORTER_AGENT_VERSION") != "" { - ps = "-p=./hack/params.yaml" + porter("parameters", "apply", "./hack/params.yaml", "-n=operator").RunV() + ps = "-p=dev-build" } porter("invoke", "operator", "--action=configureNamespace", ps, "--param", "namespace="+name, "-c", "kind", "-n=operator"). @@ -385,10 +409,49 @@ func Clean() { // Remove data created by running the test suite func CleanTestdata() { if useCluster() { - kubectl("delete", "ns", "-l", "porter.sh/testdata=true").RunV() + // find all test namespaces + output, _ := kubectl("get", "ns", "-l", "porter.sh/testdata=true", `--template={{range .items}}{{.metadata.name}},{{end}}`). + OutputE() + namespaces := strings.Split(output, ",") + + // Remove the finalizers from any testdata in that namespace + // Otherwise they will block when you delete the namespace + for _, namespace := range namespaces { + if namespace == "" { + continue + } + + output, _ = kubectl("get", "installation,agentaction", "-n", namespace, `--template={{range .items}}{{.kind}}/{{.metadata.name}},{{end}}`). + Output() + resources := strings.Split(output, ",") + for _, resource := range resources { + if resource == "" { + continue + } + + removeFinalizers(namespace, resource) + } + + // Okay, now it's safe to delete the namespace + kubectl("delete", "ns", namespace).Run() + } } } +// Remove all finalizers from the specified resource +// name should be in the format: kind/name +func removeFinalizers(namespace, name string) { + // Get the resource definition + resource, _ := kubectl("get", name, "-n", namespace, "-o=yaml").Output() + + // Use yq to remove the finalizers + resource, _ = must.Command("yq", "eval", ".metadata.finalizers = null"). + Stdin(strings.NewReader(resource)).Output() + + // Update the resource + kubectl("apply", "-f", "-").Stdin(strings.NewReader(resource)).Run() +} + // Remove any porter data in the cluster func CleanAllData() { if useCluster() { @@ -483,7 +546,7 @@ func EnsureKustomize() { // Ensure controller-gen is installed. func EnsureControllerGen() { - mgx.Must(pkg.EnsurePackage("sigs.k8s.io/controller-tools/cmd/controller-gen", "v0.4.1", "--version")) + mgx.Must(pkg.EnsurePackage("sigs.k8s.io/controller-tools/cmd/controller-gen", "v0.8.0", "--version")) } func pwd() string { diff --git a/main.go b/main.go index 5565227c..2f4f0739 100644 --- a/main.go +++ b/main.go @@ -69,6 +69,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Installation") os.Exit(1) } + if err = (&controllers.AgentActionReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AgentAction"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AgentAction") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { diff --git a/tests/integration/installation_test.go b/tests/integration/installation_test.go new file mode 100644 index 00000000..de1ebf77 --- /dev/null +++ b/tests/integration/installation_test.go @@ -0,0 +1,179 @@ +//go:build integration +// +build integration + +package integration_test + +import ( + "context" + "fmt" + "time" + + porterv1 "get.porter.sh/operator/api/v1" + "get.porter.sh/operator/controllers" + "github.com/carolynvs/magex/shx" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/tidwall/pretty" + batchv1 "k8s.io/api/batch/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Installation Lifecycle", func() { + Context("When an installation is changed", func() { + It("Should run porter", func() { + By("By creating an agent action") + ctx := context.Background() + + Log("create an installation") + inst := &porterv1.Installation{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "porter.sh/v1", + Kind: "Installation", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "porter-hello", + Namespace: testNamespace, + }, + Spec: porterv1.InstallationSpec{ + SchemaVersion: "1.0.0", + Name: "hello", + Namespace: "operator-tests", + Bundle: porterv1.OCIReferenceParts{ + Repository: "carolynvs/porter-hello-nonroot", + Version: "0.1.0", + }, + }, + } + Expect(k8sClient.Create(ctx, inst)).Should(Succeed()) + Expect(waitForPorter(ctx, inst, "waiting for the bundle to install")).Should(Succeed()) + validateInstallationConditions(inst) + + patchInstallation := func(inst *porterv1.Installation) { + controllers.PatchObjectWithRetry(ctx, logr.Discard(), k8sClient, k8sClient.Patch, inst, func() client.Object { + return &porterv1.Installation{} + }) + } + + Log("upgrade the installation") + inst.Spec.Parameters = runtime.RawExtension{Raw: []byte(`{"name": "operator"}`)} + patchInstallation(inst) + Expect(waitForPorter(ctx, inst, "waiting for the bundle to upgrade")).Should(Succeed()) + validateInstallationConditions(inst) + + Log("uninstall the installation") + inst.Spec.Uninstalled = true + patchInstallation(inst) + Expect(waitForPorter(ctx, inst, "waiting for the bundle to uninstall")).Should(Succeed()) + validateInstallationConditions(inst) + + Log("delete the installation") + Expect(k8sClient.Delete(ctx, inst)).Should(Succeed()) + Expect(waitForInstallationDeleted(ctx, inst)).Should(Succeed()) + }) + }) +}) + +func waitForPorter(ctx context.Context, inst *porterv1.Installation, msg string) error { + Log("%s: %s/%s", msg, inst.Namespace, inst.Name) + key := client.ObjectKey{Namespace: inst.Namespace, Name: inst.Name} + ctx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + for { + select { + case <-ctx.Done(): + Fail(errors.Wrapf(ctx.Err(), "timeout %s", msg).Error()) + default: + err := k8sClient.Get(ctx, key, inst) + if err != nil { + // There is lag between creating and being able to retrieve, I don't understand why + if apierrors.IsNotFound(err) { + time.Sleep(time.Second) + continue + } + return err + } + + // Check if the latest change has been processed + if inst.Generation == inst.Status.ObservedGeneration { + if apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionComplete)) { + return nil + } + + if apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionFailed)) { + // Grab some extra info to help with debugging + debugFailedInstallation(ctx, inst) + return errors.New("porter did not run successfully") + } + } + + time.Sleep(time.Second) + continue + } + } +} + +func debugFailedInstallation(ctx context.Context, inst *porterv1.Installation) { + Log("DEBUG: ----------------------------------------------------") + actionKey := client.ObjectKey{Name: inst.Status.Action.Name, Namespace: inst.Namespace} + action := &porterv1.AgentAction{} + if err := k8sClient.Get(ctx, actionKey, action); err != nil { + Log(errors.Wrap(err, "could not retrieve the Installation's AgentAction to troubleshoot").Error()) + return + } + + jobKey := client.ObjectKey{Name: action.Status.Job.Name, Namespace: action.Namespace} + job := &batchv1.Job{} + if err := k8sClient.Get(ctx, jobKey, job); err != nil { + Log(errors.Wrap(err, "could not retrieve the Installation's Job to troubleshoot").Error()) + return + } + + shx.Command("kubectl", "logs", "-n="+job.Namespace, "job/"+job.Name). + Env("KUBECONFIG=" + "../../kind.config").RunV() + Log("DEBUG: ----------------------------------------------------") +} + +func waitForInstallationDeleted(ctx context.Context, inst *porterv1.Installation) error { + Log("Waiting for installation to finish deleting: %s/%s", inst.Namespace, inst.Name) + key := client.ObjectKey{Namespace: inst.Namespace, Name: inst.Name} + waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + for { + select { + case <-waitCtx.Done(): + Fail(errors.Wrap(waitCtx.Err(), "timeout waiting for installation to delete").Error()) + default: + err := k8sClient.Get(ctx, key, inst) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + time.Sleep(time.Second) + continue + } + } +} + +func validateInstallationConditions(inst *porterv1.Installation) { + // Checks that all expected conditions are set + Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionScheduled))) + Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionStarted))) + Expect(apimeta.IsStatusConditionTrue(inst.Status.Conditions, string(porterv1.ConditionComplete))) +} + +func Log(value string, args ...interface{}) { + GinkgoWriter.Write([]byte(fmt.Sprintf(value+"\n", args...))) +} + +func LogJson(value string) { + GinkgoWriter.Write(pretty.Pretty([]byte(value + "\n"))) +} diff --git a/controllers/suite_test.go b/tests/integration/suite_test.go similarity index 88% rename from controllers/suite_test.go rename to tests/integration/suite_test.go index 7fba0898..46f9919e 100644 --- a/controllers/suite_test.go +++ b/tests/integration/suite_test.go @@ -1,7 +1,7 @@ //go:build integration // +build integration -package controllers_test +package integration_test import ( "context" @@ -9,7 +9,6 @@ import ( "path/filepath" "testing" - "get.porter.sh/operator/controllers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" @@ -27,6 +26,9 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "get.porter.sh/operator/controllers" + + portershv1 "get.porter.sh/operator/api/v1" porterv1 "get.porter.sh/operator/api/v1" // +kubebuilder:scaffold:imports ) @@ -63,6 +65,12 @@ var _ = BeforeSuite(func(done Done) { Expect(clientgoscheme.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(porterv1.AddToScheme(scheme.Scheme)).To(Succeed()) + err = portershv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = portershv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) @@ -80,6 +88,12 @@ var _ = BeforeSuite(func(done Done) { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) + err = (&controllers.AgentActionReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AgentAction"), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + go func() { err = k8sManager.Start(ctrl.SetupSignalHandler()) Expect(err).ToNot(HaveOccurred()) @@ -113,7 +127,7 @@ func createTestNamespace(ctx context.Context) string { ObjectMeta: metav1.ObjectMeta{ GenerateName: "ginkgo-tests", Labels: map[string]string{ - "porter-test": "true", + "porter.sh/testdata": "true", }, }, } @@ -164,10 +178,11 @@ func createTestNamespace(ctx context.Context) string { } agentVersion := os.Getenv("PORTER_AGENT_VERSION") if agentVersion == "" { - agentVersion = "latest" + // We can switch this back to latest when 1.0.0 of porter releases + agentVersion = "v1.0.0-alpha.12" } // Tweak porter agent config for testing - porterOpsCfg := &porterv1.AgentConfig{ + agentCfg := &porterv1.AgentConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: ns.Name, @@ -179,7 +194,7 @@ func createTestNamespace(ctx context.Context) string { InstallationServiceAccount: "installation-agent", }, } - Expect(k8sClient.Create(ctx, porterOpsCfg)).To(Succeed()) + Expect(k8sClient.Create(ctx, agentCfg)).To(Succeed()) return ns.Name }