From a929797a30827ce8b372cd1a6ca107acac98f1d4 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 7 Oct 2024 17:27:43 +0300 Subject: [PATCH 01/50] feat: add retry strategy support to daemon containers Signed-off-by: MenD32 --- Makefile | 8 +++--- examples/daemon-retry-strategy.yaml | 44 +++++++++++++++++++++++++++++ v3 | 1 + 3 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 examples/daemon-retry-strategy.yaml create mode 120000 v3 diff --git a/Makefile b/Makefile index f3acdfd3c550..14be3b0849c7 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - grep '127.0.0.1.*azurite' /etc/hosts - grep '127.0.0.1.*minio' /etc/hosts - grep '127.0.0.1.*postgres' /etc/hosts - grep '127.0.0.1.*mysql' /etc/hosts + # grep '127.0.0.1.*azurite' /etc/hosts + # grep '127.0.0.1.*minio' /etc/hosts + # grep '127.0.0.1.*postgres' /etc/hosts + # grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif diff --git a/examples/daemon-retry-strategy.yaml b/examples/daemon-retry-strategy.yaml new file mode 100644 index 000000000000..816c853bb4d0 --- /dev/null +++ b/examples/daemon-retry-strategy.yaml @@ -0,0 +1,44 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: daemon-nginx- +spec: + entrypoint: daemon-retry-example + + templates: + - name: daemon-nginx-example + steps: + - - name: nginx-server + template: nginx-server + - - name: nginx-client + template: nginx-client + arguments: + parameters: + - name: server-ip + value: "{{steps.nginx-server.ip}}" + withSequence: + count: 10 + + - name: nginx-server + daemon: true + container: + image: nginx:1.13 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 2 + timeoutSeconds: 1 + + - name: nginx-client + inputs: + parameters: + - name: server-ip + synchronization: + mutex: + name: client + container: + image: appropriate/curl:latest + command: ["/bin/sh", "-c"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10"] + diff --git a/v3 b/v3 new file mode 120000 index 000000000000..945c9b46d684 --- /dev/null +++ b/v3 @@ -0,0 +1 @@ +. \ No newline at end of file From f3ef097d5654588316e2da6763893f69fde25d99 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 7 Oct 2024 17:31:07 +0300 Subject: [PATCH 02/50] feat: add retry strategy support to daemon containers Signed-off-by: MenD32 --- examples/daemon-retry-strategy.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/examples/daemon-retry-strategy.yaml b/examples/daemon-retry-strategy.yaml index 816c853bb4d0..00959efe5551 100644 --- a/examples/daemon-retry-strategy.yaml +++ b/examples/daemon-retry-strategy.yaml @@ -1,12 +1,12 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: daemon-nginx- + generateName: daemon-retry- spec: entrypoint: daemon-retry-example templates: - - name: daemon-nginx-example + - name: daemon-retry-example steps: - - name: nginx-server template: nginx-server @@ -20,6 +20,8 @@ spec: count: 10 - name: nginx-server + retryStrategy: + limit: "10" daemon: true container: image: nginx:1.13 @@ -34,11 +36,13 @@ spec: inputs: parameters: - name: server-ip + retryStrategy: + limit: "10" synchronization: mutex: - name: client + name: client-{{workflow.uid}} container: image: appropriate/curl:latest command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10 && exit 1"] From 693f502917109df6b9a0bbdb0d5778c8fa940bba Mon Sep 17 00:00:00 2001 From: MenD32 Date: Thu, 10 Oct 2024 13:56:04 +0300 Subject: [PATCH 03/50] feat: daemon retry works, but node.IP doesn't get updated Signed-off-by: MenD32 --- examples/daemon-retry-strategy.yaml | 22 +++++---- v3 | 1 - workflow/controller/operator.go | 70 ++++++++++++++++++++++------- workflow/controller/steps.go | 4 +- 4 files changed, 66 insertions(+), 31 deletions(-) delete mode 120000 v3 diff --git a/examples/daemon-retry-strategy.yaml b/examples/daemon-retry-strategy.yaml index 00959efe5551..65fbbdee4485 100644 --- a/examples/daemon-retry-strategy.yaml +++ b/examples/daemon-retry-strategy.yaml @@ -3,23 +3,23 @@ kind: Workflow metadata: generateName: daemon-retry- spec: - entrypoint: daemon-retry-example + entrypoint: main templates: - - name: daemon-retry-example + - name: main steps: - - - name: nginx-server - template: nginx-server - - - name: nginx-client - template: nginx-client + - - name: server + template: server + - - name: client + template: client arguments: parameters: - name: server-ip - value: "{{steps.nginx-server.ip}}" + value: "{{steps.server.ip}}" withSequence: count: 10 - - name: nginx-server + - name: server retryStrategy: limit: "10" daemon: true @@ -32,17 +32,15 @@ spec: initialDelaySeconds: 2 timeoutSeconds: 1 - - name: nginx-client + - name: client inputs: parameters: - name: server-ip - retryStrategy: - limit: "10" synchronization: mutex: name: client-{{workflow.uid}} container: image: appropriate/curl:latest command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10 && exit 1"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 100"] diff --git a/v3 b/v3 deleted file mode 120000 index 945c9b46d684..000000000000 --- a/v3 +++ /dev/null @@ -1 +0,0 @@ -. \ No newline at end of file diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 5ddf1cdfce00..dad2b6bd7894 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -955,7 +955,7 @@ func (woc *wfOperationCtx) requeue() { // processNodeRetries updates the retry node state based on the child node state and the retry strategy and returns the node. func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrategy wfv1.RetryStrategy, opts *executeTemplateOpts) (*wfv1.NodeStatus, bool, error) { - if node.Fulfilled() { + if node.Phase.Fulfilled() { return node, true, nil } @@ -968,12 +968,17 @@ func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrate return node, true, nil } - if !lastChildNode.Fulfilled() { + if lastChildNode.IsDaemoned() && !lastChildNode.Phase.Fulfilled() { + node.Daemoned = ptr.To(true) + return node, true, nil + } + + if !lastChildNode.Phase.Fulfilled() { // last child node is still running. return node, true, nil } - if !lastChildNode.FailedOrError() { + if (!lastChildNode.FailedOrError() && !lastChildNode.IsDaemoned()) || (lastChildNode.IsDaemoned() && !lastChildNode.Phase.Fulfilled()) { node.Outputs = lastChildNode.Outputs.DeepCopy() woc.wf.Status.Nodes.Set(node.ID, *node) return woc.markNodePhase(node.Name, wfv1.NodeSucceeded), true, nil @@ -1072,7 +1077,7 @@ func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrate } woc.log.Infof("Retry Policy: %s (onFailed: %v, onError %v)", retryStrategy.RetryPolicyActual(), retryOnFailed, retryOnError) - if (lastChildNode.Phase == wfv1.NodeFailed && !retryOnFailed) || (lastChildNode.Phase == wfv1.NodeError && !retryOnError) { + if ((lastChildNode.Phase == wfv1.NodeFailed || lastChildNode.IsDaemoned() && (lastChildNode.Phase == wfv1.NodeSucceeded)) && !retryOnFailed) || (lastChildNode.Phase == wfv1.NodeError && !retryOnError) { woc.log.Infof("Node not set to be retried after status: %s", lastChildNode.Phase) return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil } @@ -1347,17 +1352,21 @@ func (woc *wfOperationCtx) assessNodeStatus(ctx context.Context, pod *apiv1.Pod, woc.controller.metrics.ChangePodPending(ctx, new.Message, pod.ObjectMeta.Namespace) } case apiv1.PodSucceeded: - new.Phase = wfv1.NodeSucceeded + // if the pod is succeeded, we need to check if it is a daemoned step or not + // if it is daemoned, we need to mark it as failed, since daemon pods should run indefinitely + if tmpl.IsDaemon() { + woc.log.Debugf("Daemoned pod %s succeeded. Marking it as failed", pod.Name) + new.Phase = wfv1.NodeFailed + } else { + new.Phase = wfv1.NodeSucceeded + } + new.Daemoned = nil case apiv1.PodFailed: // ignore pod failure for daemoned steps - if tmpl != nil && tmpl.IsDaemon() { - new.Phase = wfv1.NodeSucceeded - } else { - new.Phase, new.Message = woc.inferFailedReason(pod, tmpl) - woc.log.WithField("displayName", old.DisplayName).WithField("templateName", wfutil.GetTemplateFromNode(*old)). - WithField("pod", pod.Name).Infof("Pod failed: %s", new.Message) - } + new.Phase, new.Message = woc.inferFailedReason(pod, tmpl) + woc.log.WithField("displayName", old.DisplayName).WithField("templateName", wfutil.GetTemplateFromNode(*old)). + WithField("pod", pod.Name).Infof("Pod failed: %s", new.Message) new.Daemoned = nil case apiv1.PodRunning: // Daemons are a special case we need to understand the rules: @@ -2101,8 +2110,9 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, retryParentNode = processedRetryParentNode childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) + // The retry node might have completed by now. - if retryParentNode.Fulfilled() { + if retryParentNode.Phase.Fulfilled() || (lastChildNode != nil && !lastChildNode.Phase.Fulfilled() && lastChildNode.IsDaemoned()) { // If retry node has completed, set the output of the last child node to its output. // Runtime parameters (e.g., `status`, `resourceDuration`) in the output will be used to emit metrics. if lastChildNode != nil { @@ -2135,7 +2145,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, } var retryNum int - if lastChildNode != nil && !lastChildNode.Fulfilled() { + if lastChildNode != nil && !lastChildNode.Phase.Fulfilled() { // Last child node is either still running, or in some cases the corresponding Pod hasn't even been // created yet, for example if it exceeded the ResourceQuota nodeName = lastChildNode.Name @@ -2238,7 +2248,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, return node, err } - if !retryNode.Fulfilled() && node.Fulfilled() { // if the retry child has completed we need to update outself + if !retryNode.Phase.Fulfilled() && node.Phase.Fulfilled() { // if the retry child has completed we need to update outself retryNode, err = woc.executeTemplate(ctx, retryNodeName, orgTmpl, tmplCtx, args, opts) if err != nil { return woc.markNodeError(node.Name, err), err @@ -2273,7 +2283,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, } func (woc *wfOperationCtx) handleNodeFulfilled(ctx context.Context, nodeName string, node *wfv1.NodeStatus, processedTmpl *wfv1.Template) *wfv1.NodeStatus { - if node == nil || !node.Fulfilled() { + if node == nil || !node.Phase.Fulfilled() { return nil } @@ -2445,6 +2455,34 @@ func (woc *wfOperationCtx) hasDaemonNodes() bool { return false } +// check if node has any children who are daemoned +func (woc *wfOperationCtx) nodeHasDaemonChildren(node *wfv1.NodeStatus) bool { + for _, childID := range node.Children { + childNode, err := woc.wf.Status.Nodes.Get(childID) + if err != nil { + continue + } + if childNode.IsDaemoned() { + return true + } + } + return false +} + +// check if node has any children who are daemoned and pending +func (woc *wfOperationCtx) nodeHasPendingDaemonChildren(node *wfv1.NodeStatus) bool { + for _, childID := range node.Children { + childNode, err := woc.wf.Status.Nodes.Get(childID) + if err != nil { + continue + } + if childNode.IsDaemoned() && childNode.Phase == wfv1.NodePending { + return true + } + } + return false +} + func (woc *wfOperationCtx) GetNodeTemplate(node *wfv1.NodeStatus) (*wfv1.Template, error) { if node.TemplateRef != nil { tmplCtx, err := woc.createTemplateContext(node.GetTemplateScope()) diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index d7e2860f7fd9..e20d2496f9c2 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -111,7 +111,7 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm if err != nil { return nil, err } - if !sgNode.Fulfilled() { + if !sgNode.Phase.Fulfilled() || woc.nodeHasPendingDaemonChildren(sgNode) { woc.log.Infof("Workflow step group node %s not yet completed", sgNode.ID) return node, nil } @@ -234,7 +234,7 @@ func (woc *wfOperationCtx) executeStepGroup(ctx context.Context, stepGroup []wfv if err != nil { return nil, err } - if node.Fulfilled() { + if node.Fulfilled() && !woc.nodeHasDaemonChildren(node) { woc.log.Debugf("Step group node %v already marked completed", node) return node, nil } From 2e1c501d8977f49f68bef3c8ff81997aed4c2e24 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 12 Oct 2024 01:35:43 +0300 Subject: [PATCH 04/50] feat: style is better Signed-off-by: MenD32 --- workflow/controller/operator.go | 26 +++++++------------------- workflow/controller/steps.go | 4 ++-- 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index dad2b6bd7894..77bb0986d8d0 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -2110,7 +2110,6 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, retryParentNode = processedRetryParentNode childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) - // The retry node might have completed by now. if retryParentNode.Phase.Fulfilled() || (lastChildNode != nil && !lastChildNode.Phase.Fulfilled() && lastChildNode.IsDaemoned()) { // If retry node has completed, set the output of the last child node to its output. @@ -2455,32 +2454,21 @@ func (woc *wfOperationCtx) hasDaemonNodes() bool { return false } -// check if node has any children who are daemoned -func (woc *wfOperationCtx) nodeHasDaemonChildren(node *wfv1.NodeStatus) bool { - for _, childID := range node.Children { - childNode, err := woc.wf.Status.Nodes.Get(childID) - if err != nil { - continue - } - if childNode.IsDaemoned() { - return true - } +// check if all of the nodes children are fulffilled +func (woc *wfOperationCtx) childrenFulfilled(node *wfv1.NodeStatus) bool { + if len(node.Children) == 0 { + return node.Fulfilled() } - return false -} - -// check if node has any children who are daemoned and pending -func (woc *wfOperationCtx) nodeHasPendingDaemonChildren(node *wfv1.NodeStatus) bool { for _, childID := range node.Children { childNode, err := woc.wf.Status.Nodes.Get(childID) if err != nil { continue } - if childNode.IsDaemoned() && childNode.Phase == wfv1.NodePending { - return true + if !woc.childrenFulfilled(childNode) { + return false } } - return false + return true } func (woc *wfOperationCtx) GetNodeTemplate(node *wfv1.NodeStatus) (*wfv1.Template, error) { diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index e20d2496f9c2..b712f906edf3 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -111,7 +111,7 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm if err != nil { return nil, err } - if !sgNode.Phase.Fulfilled() || woc.nodeHasPendingDaemonChildren(sgNode) { + if !sgNode.Phase.Fulfilled() || !woc.childrenFulfilled(sgNode) { woc.log.Infof("Workflow step group node %s not yet completed", sgNode.ID) return node, nil } @@ -234,7 +234,7 @@ func (woc *wfOperationCtx) executeStepGroup(ctx context.Context, stepGroup []wfv if err != nil { return nil, err } - if node.Fulfilled() && !woc.nodeHasDaemonChildren(node) { + if node.Fulfilled() && woc.childrenFulfilled(node) { woc.log.Debugf("Step group node %v already marked completed", node) return node, nil } From cbf168b0d7616ce745db8fccfe857d828f17e953 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 12 Oct 2024 15:24:19 +0300 Subject: [PATCH 05/50] feat: now works, need to squash Signed-off-by: MenD32 --- examples/daemon-retry-strategy.yaml | 2 +- workflow/controller/operator.go | 18 ++++++++++++++---- workflow/controller/steps.go | 5 ++++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/examples/daemon-retry-strategy.yaml b/examples/daemon-retry-strategy.yaml index 65fbbdee4485..fbc7ab9d41c6 100644 --- a/examples/daemon-retry-strategy.yaml +++ b/examples/daemon-retry-strategy.yaml @@ -42,5 +42,5 @@ spec: container: image: appropriate/curl:latest command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 100"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 77bb0986d8d0..1fe49f8e3d88 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -968,17 +968,20 @@ func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrate return node, true, nil } - if lastChildNode.IsDaemoned() && !lastChildNode.Phase.Fulfilled() { + if lastChildNode.IsDaemoned() { node.Daemoned = ptr.To(true) - return node, true, nil } if !lastChildNode.Phase.Fulfilled() { // last child node is still running. + node = woc.markNodePhase(node.Name, lastChildNode.Phase) + if lastChildNode.IsDaemoned() { // markNodePhase doesn't pass the Daemoned field + node.Daemoned = ptr.To(true) + } return node, true, nil } - if (!lastChildNode.FailedOrError() && !lastChildNode.IsDaemoned()) || (lastChildNode.IsDaemoned() && !lastChildNode.Phase.Fulfilled()) { + if !lastChildNode.FailedOrError() { node.Outputs = lastChildNode.Outputs.DeepCopy() woc.wf.Status.Nodes.Set(node.ID, *node) return woc.markNodePhase(node.Name, wfv1.NodeSucceeded), true, nil @@ -2111,7 +2114,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) // The retry node might have completed by now. - if retryParentNode.Phase.Fulfilled() || (lastChildNode != nil && !lastChildNode.Phase.Fulfilled() && lastChildNode.IsDaemoned()) { + if woc.childrenFulfilled(retryParentNode) { // If retry node has completed, set the output of the last child node to its output. // Runtime parameters (e.g., `status`, `resourceDuration`) in the output will be used to emit metrics. if lastChildNode != nil { @@ -2253,6 +2256,13 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, return woc.markNodeError(node.Name, err), err } } + + if !node.Phase.Fulfilled() { + retryNode = woc.markNodePhase(retryNodeName, node.Phase) + if node.IsDaemoned() { // markNodePhase doesn't pass the Daemoned field + retryNode.Daemoned = ptr.To(true) + } + } node = retryNode } diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index b712f906edf3..eb9ea0d70936 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -111,7 +111,7 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm if err != nil { return nil, err } - if !sgNode.Phase.Fulfilled() || !woc.childrenFulfilled(sgNode) { + if !sgNode.Fulfilled() { woc.log.Infof("Workflow step group node %s not yet completed", sgNode.ID) return node, nil } @@ -337,6 +337,9 @@ func (woc *wfOperationCtx) executeStepGroup(ctx context.Context, stepGroup []wfv } } if !completed { + if node.Fulfilled() { + return woc.markNodePhase(sgNodeName, wfv1.NodeRunning), nil + } return node, nil } From c0ad779d1393183ea57fab60843d7988e0a82a47 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 12 Oct 2024 20:14:09 +0300 Subject: [PATCH 06/50] feat(docs): added daemon retry strategy examples for steps and dags Signed-off-by: MenD32 --- examples/daemon-retry-strategy.yaml | 46 ----------------------- examples/dag-daemon-retry-strategy.yaml | 10 +++-- examples/steps-daemon-retry-strategy.yaml | 2 +- 3 files changed, 7 insertions(+), 51 deletions(-) delete mode 100644 examples/daemon-retry-strategy.yaml diff --git a/examples/daemon-retry-strategy.yaml b/examples/daemon-retry-strategy.yaml deleted file mode 100644 index fbc7ab9d41c6..000000000000 --- a/examples/daemon-retry-strategy.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: daemon-retry- -spec: - entrypoint: main - - templates: - - name: main - steps: - - - name: server - template: server - - - name: client - template: client - arguments: - parameters: - - name: server-ip - value: "{{steps.server.ip}}" - withSequence: - count: 10 - - - name: server - retryStrategy: - limit: "10" - daemon: true - container: - image: nginx:1.13 - readinessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 2 - timeoutSeconds: 1 - - - name: client - inputs: - parameters: - - name: server-ip - synchronization: - mutex: - name: client-{{workflow.uid}} - container: - image: appropriate/curl:latest - command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] - diff --git a/examples/dag-daemon-retry-strategy.yaml b/examples/dag-daemon-retry-strategy.yaml index fbc7ab9d41c6..ef4f81ee8165 100644 --- a/examples/dag-daemon-retry-strategy.yaml +++ b/examples/dag-daemon-retry-strategy.yaml @@ -1,16 +1,18 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: daemon-retry- + generateName: dag-daemon-retry- spec: entrypoint: main templates: - name: main - steps: - - - name: server + dag: + tasks: + - name: server template: server - - - name: client + - name: client + depends: server template: client arguments: parameters: diff --git a/examples/steps-daemon-retry-strategy.yaml b/examples/steps-daemon-retry-strategy.yaml index fbc7ab9d41c6..2ebb758e4454 100644 --- a/examples/steps-daemon-retry-strategy.yaml +++ b/examples/steps-daemon-retry-strategy.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: daemon-retry- + generateName: steps-daemon-retry- spec: entrypoint: main From a5861f55783305ec658fdd517bee7c1347c3bd06 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 12 Oct 2024 20:14:42 +0300 Subject: [PATCH 07/50] feat(docs): added daemon retry strategy examples for steps and dags Signed-off-by: MenD32 --- examples/dag-daemon-retry-strategy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/dag-daemon-retry-strategy.yaml b/examples/dag-daemon-retry-strategy.yaml index ef4f81ee8165..77cbaf0d3b58 100644 --- a/examples/dag-daemon-retry-strategy.yaml +++ b/examples/dag-daemon-retry-strategy.yaml @@ -17,7 +17,7 @@ spec: arguments: parameters: - name: server-ip - value: "{{steps.server.ip}}" + value: "{{tasks.server.ip}}" withSequence: count: 10 From fa8ced44581a2b564bb0e8cc6569eae2eb48507f Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 12:43:27 +0300 Subject: [PATCH 08/50] fix(tests): func TestParametrizableLimit, exit code is int instead of string Signed-off-by: MenD32 --- test/e2e/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index 906eeac31770..71d6876d2546 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -780,7 +780,7 @@ spec: limit: "{{inputs.parameters.limit}}" container: image: argoproj/argosay:v2 - args: [exit, 1] + args: [exit, "1"] `). When(). SubmitWorkflow(). From b126f92bb734585148719f2d92c73ca626cae24a Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 15:36:10 +0300 Subject: [PATCH 09/50] fix(controller): retry logic failed to retry nodes that failed fast enough Signed-off-by: MenD32 --- v3 | 1 - workflow/controller/operator.go | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) delete mode 120000 v3 diff --git a/v3 b/v3 deleted file mode 120000 index 945c9b46d684..000000000000 --- a/v3 +++ /dev/null @@ -1 +0,0 @@ -. \ No newline at end of file diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 2abb0348f8e6..260d34c4ddf2 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -2113,8 +2113,12 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, retryParentNode = processedRetryParentNode childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) + for i := 0; i < 2000000; i++ { + woc.log.Debugf("A.1: %t", woc.childrenFulfilled(retryParentNode)) + } + // The retry node might have completed by now. - if woc.childrenFulfilled(retryParentNode) { + if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) { // if retry node has daemoned nodes we want to check those are done too // If retry node has completed, set the output of the last child node to its output. // Runtime parameters (e.g., `status`, `resourceDuration`) in the output will be used to emit metrics. if lastChildNode != nil { From 6f4efc792f656a0eaa989eb761e4230663d33e57 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 15:59:53 +0300 Subject: [PATCH 10/50] fix(controller): retry logic failed to retry nodes that failed fast enough Signed-off-by: MenD32 --- workflow/controller/operator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 260d34c4ddf2..07a7dbbfc2e8 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -2118,7 +2118,8 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, } // The retry node might have completed by now. - if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) { // if retry node has daemoned nodes we want to check those are done too + if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) || + woc.childrenFulfilled(retryParentNode) && retryParentNode.IsDaemoned() { // if retry node is daemoned we want to check those explicitly // If retry node has completed, set the output of the last child node to its output. // Runtime parameters (e.g., `status`, `resourceDuration`) in the output will be used to emit metrics. if lastChildNode != nil { From 0eb58244481d6c680c3d10535d16b6f44f536406 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 17:39:53 +0300 Subject: [PATCH 11/50] fix(docs): codegen Signed-off-by: MenD32 --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 14be3b0849c7..f3acdfd3c550 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - # grep '127.0.0.1.*azurite' /etc/hosts - # grep '127.0.0.1.*minio' /etc/hosts - # grep '127.0.0.1.*postgres' /etc/hosts - # grep '127.0.0.1.*mysql' /etc/hosts + grep '127.0.0.1.*azurite' /etc/hosts + grep '127.0.0.1.*minio' /etc/hosts + grep '127.0.0.1.*postgres' /etc/hosts + grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif From 880a2613b1e11f234913fcd12ba26d4464a34c64 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 17:40:45 +0300 Subject: [PATCH 12/50] fix(docs): codegen Signed-off-by: MenD32 --- Makefile | 8115 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 7340 insertions(+), 775 deletions(-) diff --git a/Makefile b/Makefile index f3acdfd3c550..91abfa490597 100644 --- a/Makefile +++ b/Makefile @@ -1,775 +1,7340 @@ -export SHELL:=bash -export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit - -# NOTE: Please ensure dependencies are synced with the flake.nix file in dev/nix/flake.nix before upgrading -# any external dependency. There is documentation on how to do this under the Developer Guide - -USE_NIX := false -# https://stackoverflow.com/questions/4122831/disable-make-builtin-rules-and-variables-from-inside-the-make-file -MAKEFLAGS += --no-builtin-rules -.SUFFIXES: - -# -- build metadata -BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -# below 3 are copied verbatim to release.yaml -GIT_COMMIT := $(shell git rev-parse HEAD || echo unknown) -GIT_TAG := $(shell git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged) -GIT_TREE_STATE := $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) -GIT_REMOTE := origin -GIT_BRANCH := $(shell git rev-parse --symbolic-full-name --verify --quiet --abbrev-ref HEAD) -RELEASE_TAG := $(shell if [[ "$(GIT_TAG)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.*$$ ]]; then echo "true"; else echo "false"; fi) -DEV_BRANCH := $(shell [ "$(GIT_BRANCH)" = main ] || [ `echo $(GIT_BRANCH) | cut -c -8` = release- ] || [ `echo $(GIT_BRANCH) | cut -c -4` = dev- ] || [ $(RELEASE_TAG) = true ] && echo false || echo true) -SRC := $(GOPATH)/src/github.com/argoproj/argo-workflows -VERSION := latest -# VERSION is the version to be used for files in manifests and should always be latest unless we are releasing -# we assume HEAD means you are on a tag -ifeq ($(RELEASE_TAG),true) -VERSION := $(GIT_TAG) -endif - -# -- docker image publishing options -IMAGE_NAMESPACE ?= quay.io/argoproj -DOCKER_PUSH ?= false -TARGET_PLATFORM ?= linux/$(shell go env GOARCH) -K3D_CLUSTER_NAME ?= k3s-default # declares which cluster to import to in case it's not the default name - -# -- test options -E2E_WAIT_TIMEOUT ?= 90s # timeout for wait conditions -E2E_PARALLEL ?= 20 -E2E_SUITE_TIMEOUT ?= 15m -GOTEST ?= go test -v -p 20 - -# should we build the static files? -ifneq (,$(filter $(MAKECMDGOALS),codegen lint test docs start)) -STATIC_FILES := false -else -STATIC_FILES ?= $(shell [ $(DEV_BRANCH) = true ] && echo false || echo true) -endif - -# -- install & run options -PROFILE ?= minimal -KUBE_NAMESPACE ?= argo # namespace where Kubernetes resources/RBAC will be installed -PLUGINS ?= $(shell [ $PROFILE = plugins ] && echo false || echo true) -UI ?= false # start the UI -API ?= $(UI) # start the Argo Server -TASKS := controller -ifeq ($(API),true) -TASKS := controller server -endif -ifeq ($(UI),true) -TASKS := controller server ui -endif -# Which mode to run in: -# * `local` run the workflow–controller and argo-server as single replicas on the local machine (default) -# * `kubernetes` run the workflow-controller and argo-server on the Kubernetes cluster -RUN_MODE := local -KUBECTX := $(shell [[ "`which kubectl`" != '' ]] && kubectl config current-context || echo none) -DOCKER_DESKTOP := $(shell [[ "$(KUBECTX)" == "docker-desktop" ]] && echo true || echo false) -K3D := $(shell [[ "$(KUBECTX)" == "k3d-"* ]] && echo true || echo false) -ifeq ($(PROFILE),prometheus) -RUN_MODE := kubernetes -endif -ifeq ($(PROFILE),stress) -RUN_MODE := kubernetes -endif - -# -- controller + server + executor env vars -LOG_LEVEL := debug -UPPERIO_DB_DEBUG := 0 -DEFAULT_REQUEUE_TIME ?= 1s # by keeping this short we speed up tests -ALWAYS_OFFLOAD_NODE_STATUS := false -POD_STATUS_CAPTURE_FINALIZER ?= true -NAMESPACED := true -MANAGED_NAMESPACE ?= $(KUBE_NAMESPACE) -SECURE := false # whether or not to start Argo in TLS mode -AUTH_MODE := hybrid -ifeq ($(PROFILE),sso) -AUTH_MODE := sso -endif - -$(info GIT_COMMIT=$(GIT_COMMIT) GIT_BRANCH=$(GIT_BRANCH) GIT_TAG=$(GIT_TAG) GIT_TREE_STATE=$(GIT_TREE_STATE) RELEASE_TAG=$(RELEASE_TAG) DEV_BRANCH=$(DEV_BRANCH) VERSION=$(VERSION)) -$(info KUBECTX=$(KUBECTX) DOCKER_DESKTOP=$(DOCKER_DESKTOP) K3D=$(K3D) DOCKER_PUSH=$(DOCKER_PUSH) TARGET_PLATFORM=$(TARGET_PLATFORM)) -$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED)) - -override LDFLAGS += \ - -X github.com/argoproj/argo-workflows/v3.version=$(VERSION) \ - -X github.com/argoproj/argo-workflows/v3.buildDate=$(BUILD_DATE) \ - -X github.com/argoproj/argo-workflows/v3.gitCommit=$(GIT_COMMIT) \ - -X github.com/argoproj/argo-workflows/v3.gitTreeState=$(GIT_TREE_STATE) - -ifneq ($(GIT_TAG),) -override LDFLAGS += -X github.com/argoproj/argo-workflows/v3.gitTag=${GIT_TAG} -endif - -ifndef $(GOPATH) - GOPATH=$(shell go env GOPATH) - export GOPATH -endif - -# -- file lists -HACK_PKG_FILES_AS_PKGS ?= false -ifeq ($(HACK_PKG_FILES_AS_PKGS),false) - ARGOEXEC_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) - CLI_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) - CONTROLLER_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) -else -# Building argoexec on windows cannot rebuild the openapi, we need to fall back to the old -# behaviour where we fake dependencies and therefore don't rebuild - ARGOEXEC_PKG_FILES := $(shell echo cmd/argoexec && go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) - CLI_PKG_FILES := $(shell echo cmd/argo && go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) - CONTROLLER_PKG_FILES := $(shell echo cmd/workflow-controller && go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) -endif - -TYPES := $(shell find pkg/apis/workflow/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go') -CRDS := $(shell find manifests/base/crds -type f -name 'argoproj.io_*.yaml') -SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \ - pkg/apiclient/_.secondary.swagger.json \ - pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ - pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ - pkg/apiclient/event/event.swagger.json \ - pkg/apiclient/eventsource/eventsource.swagger.json \ - pkg/apiclient/info/info.swagger.json \ - pkg/apiclient/sensor/sensor.swagger.json \ - pkg/apiclient/workflow/workflow.swagger.json \ - pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ - pkg/apiclient/workflowtemplate/workflow-template.swagger.json -PROTO_BINARIES := $(GOPATH)/bin/protoc-gen-gogo $(GOPATH)/bin/protoc-gen-gogofast $(GOPATH)/bin/goimports $(GOPATH)/bin/protoc-gen-grpc-gateway $(GOPATH)/bin/protoc-gen-swagger /usr/local/bin/clang-format - -# protoc,my.proto -define protoc - # protoc $(1) - [ -e ./vendor ] || go mod vendor - protoc \ - -I /usr/local/include \ - -I $(CURDIR) \ - -I $(CURDIR)/vendor \ - -I $(GOPATH)/src \ - -I $(GOPATH)/pkg/mod/github.com/gogo/protobuf@v1.3.2/gogoproto \ - -I $(GOPATH)/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ - --gogofast_out=plugins=grpc:$(GOPATH)/src \ - --grpc-gateway_out=logtostderr=true:$(GOPATH)/src \ - --swagger_out=logtostderr=true,fqn_for_swagger_name=true:. \ - $(1) - perl -i -pe 's|argoproj/argo-workflows/|argoproj/argo-workflows/v3/|g' `echo "$(1)" | sed 's/proto/pb.go/g'` - -endef - -# cli - -.PHONY: cli -cli: dist/argo - -ui/dist/app/index.html: $(shell find ui/src -type f && find ui -maxdepth 1 -type f) - # `yarn install` is fast (~2s), so you can call it safely. - JOBS=max yarn --cwd ui install - # `yarn build` is slow, so we guard it with a up-to-date check. - JOBS=max yarn --cwd ui build - -$(GOPATH)/bin/staticfiles: Makefile -# update this in Nix when updating it here -ifneq ($(USE_NIX), true) - go install bou.ke/staticfiles@dd04075 -endif - -ifeq ($(STATIC_FILES),true) -server/static/files.go: $(GOPATH)/bin/staticfiles ui/dist/app/index.html - # Pack UI into a Go file - $(GOPATH)/bin/staticfiles -o server/static/files.go ui/dist/app -else -server/static/files.go: - # Building without static files - cp ./server/static/files.go.stub ./server/static/files.go -endif - -dist/argo-linux-amd64: GOARGS = GOOS=linux GOARCH=amd64 -dist/argo-linux-arm64: GOARGS = GOOS=linux GOARCH=arm64 -dist/argo-linux-ppc64le: GOARGS = GOOS=linux GOARCH=ppc64le -dist/argo-linux-riscv64: GOARGS = GOOS=linux GOARCH=riscv64 -dist/argo-linux-s390x: GOARGS = GOOS=linux GOARCH=s390x -dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64 -dist/argo-darwin-arm64: GOARGS = GOOS=darwin GOARCH=arm64 -dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64 - -dist/argo-windows-%.gz: dist/argo-windows-% - gzip --force --keep dist/argo-windows-$*.exe - -dist/argo-windows-%: server/static/files.go $(CLI_PKG_FILES) go.sum - CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo - -dist/argo-%.gz: dist/argo-% - gzip --force --keep dist/argo-$* - -dist/argo-%: server/static/files.go $(CLI_PKG_FILES) go.sum - CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo - -dist/argo: server/static/files.go $(CLI_PKG_FILES) go.sum -ifeq ($(shell uname -s),Darwin) - # if local, then build fast: use CGO and dynamic-linking - go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS}' -o $@ ./cmd/argo -else - CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo -endif - -argocli-image: - -.PHONY: clis -clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-riscv64.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-darwin-arm64.gz dist/argo-windows-amd64.gz - -# controller - -.PHONY: controller -controller: dist/workflow-controller - -dist/workflow-controller: $(CONTROLLER_PKG_FILES) go.sum -ifeq ($(shell uname -s),Darwin) - # if local, then build fast: use CGO and dynamic-linking - go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller -else - CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller -endif - -workflow-controller-image: - -# argoexec - -dist/argoexec: $(ARGOEXEC_PKG_FILES) go.sum -ifeq ($(shell uname -s),Darwin) - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec -else - CGO_ENABLED=0 go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec -endif - -argoexec-image: - -%-image: - [ ! -e dist/$* ] || mv dist/$* . - docker buildx build \ - --platform $(TARGET_PLATFORM) \ - --build-arg GIT_COMMIT=$(GIT_COMMIT) \ - --build-arg GIT_TAG=$(GIT_TAG) \ - --build-arg GIT_TREE_STATE=$(GIT_TREE_STATE) \ - -t $(IMAGE_NAMESPACE)/$*:$(VERSION) \ - --target $* \ - --load \ - . - [ ! -e $* ] || mv $* dist/ - docker run --rm -t $(IMAGE_NAMESPACE)/$*:$(VERSION) version - if [ $(K3D) = true ]; then k3d image import -c $(K3D_CLUSTER_NAME) $(IMAGE_NAMESPACE)/$*:$(VERSION); fi - if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then docker push $(IMAGE_NAMESPACE)/$*:$(VERSION) ; fi - -.PHONY: codegen -codegen: types swagger manifests $(GOPATH)/bin/mockery docs/fields.md docs/cli/argo.md - go generate ./... - make --directory sdks/java USE_NIX=$(USE_NIX) generate - make --directory sdks/python USE_NIX=$(USE_NIX) generate - -.PHONY: check-pwd -check-pwd: - -ifneq ($(SRC),$(PWD)) - @echo "⚠️ Code generation will not work if code in not checked out into $(SRC)" >&2 -endif - -.PHONY: types -types: check-pwd pkg/apis/workflow/v1alpha1/generated.proto pkg/apis/workflow/v1alpha1/openapi_generated.go pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go - -.PHONY: swagger -swagger: \ - pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ - pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ - pkg/apiclient/event/event.swagger.json \ - pkg/apiclient/eventsource/eventsource.swagger.json \ - pkg/apiclient/info/info.swagger.json \ - pkg/apiclient/sensor/sensor.swagger.json \ - pkg/apiclient/workflow/workflow.swagger.json \ - pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ - pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ - manifests/base/crds/full/argoproj.io_workflows.yaml \ - manifests \ - api/openapi-spec/swagger.json \ - api/jsonschema/schema.json - - -$(GOPATH)/bin/mockery: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/vektra/mockery/v2@v2.42.2 -endif -$(GOPATH)/bin/controller-gen: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0 -endif -$(GOPATH)/bin/go-to-protobuf: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - # TODO: currently fails on v0.30.3 with - # Unable to clean package k8s.io.api.core.v1: remove /home/runner/go/pkg/mod/k8s.io/api@v0.30.3/core/v1/generated.proto: permission denied - go install k8s.io/code-generator/cmd/go-to-protobuf@v0.21.5 -endif -$(GOPATH)/src/github.com/gogo/protobuf: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - [ -e $@ ] || git clone --depth 1 https://github.com/gogo/protobuf.git -b v1.3.2 $@ -endif -$(GOPATH)/bin/protoc-gen-gogo: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/gogo/protobuf/protoc-gen-gogo@v1.3.2 -endif -$(GOPATH)/bin/protoc-gen-gogofast: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/gogo/protobuf/protoc-gen-gogofast@v1.3.2 -endif -$(GOPATH)/bin/protoc-gen-grpc-gateway: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0 -endif -$(GOPATH)/bin/protoc-gen-swagger: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.16.0 -endif -$(GOPATH)/bin/openapi-gen: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20220124234850-424119656bbf -endif -$(GOPATH)/bin/swagger: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 -endif -$(GOPATH)/bin/goimports: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - go install golang.org/x/tools/cmd/goimports@v0.1.7 -endif - -/usr/local/bin/clang-format: -ifeq (, $(shell which clang-format)) -ifeq ($(shell uname),Darwin) - brew install clang-format -else - sudo apt update - sudo apt install -y clang-format -endif -endif - -pkg/apis/workflow/v1alpha1/generated.proto: $(GOPATH)/bin/go-to-protobuf $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - # Format proto files. Formatting changes generated code, so we do it here, rather that at lint time. - # Why clang-format? Google uses it. - find pkg/apiclient -name '*.proto'|xargs clang-format -i - $(GOPATH)/bin/go-to-protobuf \ - --go-header-file=./hack/custom-boilerplate.go.txt \ - --packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1 \ - --proto-import $(GOPATH)/src - # Delete the link - [ -e ./v3 ] && rm -rf v3 - touch pkg/apis/workflow/v1alpha1/generated.proto - -# this target will also create a .pb.go and a .pb.gw.go file, but in Make 3 we cannot use _grouped target_, instead we must choose -# on file to represent all of them -pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto - $(call protoc,pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto) - -pkg/apiclient/cronworkflow/cron-workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/cronworkflow/cron-workflow.proto - $(call protoc,pkg/apiclient/cronworkflow/cron-workflow.proto) - -pkg/apiclient/event/event.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/event/event.proto - $(call protoc,pkg/apiclient/event/event.proto) - -pkg/apiclient/eventsource/eventsource.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/eventsource/eventsource.proto - $(call protoc,pkg/apiclient/eventsource/eventsource.proto) - -pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/info/info.proto - $(call protoc,pkg/apiclient/info/info.proto) - -pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto - $(call protoc,pkg/apiclient/sensor/sensor.proto) - -pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto - $(call protoc,pkg/apiclient/workflow/workflow.proto) - -pkg/apiclient/workflowarchive/workflow-archive.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowarchive/workflow-archive.proto - $(call protoc,pkg/apiclient/workflowarchive/workflow-archive.proto) - -pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowtemplate/workflow-template.proto - $(call protoc,pkg/apiclient/workflowtemplate/workflow-template.proto) - -# generate other files for other CRDs -manifests/base/crds/full/argoproj.io_workflows.yaml: $(GOPATH)/bin/controller-gen $(TYPES) ./hack/manifests/crdgen.sh ./hack/manifests/crds.go - ./hack/manifests/crdgen.sh - -.PHONY: manifests -manifests: \ - manifests/install.yaml \ - manifests/namespace-install.yaml \ - manifests/quick-start-minimal.yaml \ - manifests/quick-start-mysql.yaml \ - manifests/quick-start-postgres.yaml \ - dist/manifests/install.yaml \ - dist/manifests/namespace-install.yaml \ - dist/manifests/quick-start-minimal.yaml \ - dist/manifests/quick-start-mysql.yaml \ - dist/manifests/quick-start-postgres.yaml - -.PHONY: manifests/install.yaml -manifests/install.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/cluster-install | ./hack/manifests/auto-gen-msg.sh > manifests/install.yaml - -.PHONY: manifests/namespace-install.yaml -manifests/namespace-install.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/namespace-install | ./hack/manifests/auto-gen-msg.sh > manifests/namespace-install.yaml - -.PHONY: manifests/quick-start-minimal.yaml -manifests/quick-start-minimal.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/minimal | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-minimal.yaml - -.PHONY: manifests/quick-start-mysql.yaml -manifests/quick-start-mysql.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/mysql | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-mysql.yaml - -.PHONY: manifests/quick-start-postgres.yaml -manifests/quick-start-postgres.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/postgres | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-postgres.yaml - -dist/manifests/%: manifests/% - @mkdir -p dist/manifests - sed 's/:latest/:$(VERSION)/' manifests/$* > $@ - -# lint/test/etc - -$(GOPATH)/bin/golangci-lint: Makefile - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.61.0 - -.PHONY: lint -lint: server/static/files.go $(GOPATH)/bin/golangci-lint - rm -Rf v3 vendor - # If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate. - # * Change `woc.wf.Spec` to `woc.execWf.Spec`. - # * Change `woc.execWf.Status` to `woc.wf.Status`. - @awk '(/woc.wf.Spec/ || /woc.execWf.Status/) && !/not-woc-misuse/ {print FILENAME ":" FNR "\t" $0 ; exit 1}' $(shell find workflow/controller -type f -name '*.go' -not -name '*test*') - # Tidy Go modules - go mod tidy - # Lint Go files - $(GOPATH)/bin/golangci-lint run --fix --verbose - # Lint the UI - if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi - # Deduplicate Node modules - if [ -e ui/node_modules ]; then yarn --cwd ui deduplicate ; fi - -# for local we have a faster target that prints to stdout, does not use json, and can cache because it has no coverage -.PHONY: test -test: server/static/files.go - go build ./... - env KUBECONFIG=/dev/null $(GOTEST) ./... - # marker file, based on it's modification time, we know how long ago this target was run - @mkdir -p dist - touch dist/test - -.PHONY: install -install: githooks - kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE) - kubectl config set-context --current --namespace=$(KUBE_NAMESPACE) - @echo "installing PROFILE=$(PROFILE)" - kubectl kustomize --load-restrictor=LoadRestrictionsNone test/e2e/manifests/$(PROFILE) | sed 's|quay.io/argoproj/|$(IMAGE_NAMESPACE)/|' | sed 's/namespace: argo/namespace: $(KUBE_NAMESPACE)/' | kubectl -n $(KUBE_NAMESPACE) apply --prune -l app.kubernetes.io/part-of=argo -f - -ifeq ($(PROFILE),stress) - kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml -endif -ifeq ($(RUN_MODE),kubernetes) - kubectl -n $(KUBE_NAMESPACE) scale deploy/workflow-controller --replicas 1 - kubectl -n $(KUBE_NAMESPACE) scale deploy/argo-server --replicas 1 -endif - -.PHONY: argosay -argosay: -ifeq ($(DOCKER_PUSH),true) - cd test/e2e/images/argosay/v2 && \ - docker buildx build \ - --platform linux/amd64,linux/arm64 \ - -t argoproj/argosay:v2 \ - --push \ - . -else - cd test/e2e/images/argosay/v2 && \ - docker build . -t argoproj/argosay:v2 -endif -ifeq ($(K3D),true) - k3d image import -c $(K3D_CLUSTER_NAME) argoproj/argosay:v2 -endif - -.PHONY: argosayv1 -argosayv1: -ifeq ($(DOCKER_PUSH),true) - cd test/e2e/images/argosay/v1 && \ - docker buildx build \ - --platform linux/amd64,linux/arm64 \ - -t argoproj/argosay:v1 \ - --push \ - . -else - cd test/e2e/images/argosay/v1 && \ - docker build . -t argoproj/argosay:v1 -endif - -dist/argosay: - mkdir -p dist - cp test/e2e/images/argosay/v2/argosay dist/ - -.PHONY: kit -kit: Makefile -ifeq ($(shell command -v kit),) -ifeq ($(shell uname),Darwin) - brew tap kitproj/kit --custom-remote https://github.com/kitproj/kit - brew install kit -else - curl -q https://raw.githubusercontent.com/kitproj/kit/main/install.sh | tag=v0.1.8 sh -endif -endif - - -.PHONY: start -ifeq ($(RUN_MODE),local) -ifeq ($(API),true) -start: install controller kit cli -else -start: install controller kit -endif -else -start: install kit -endif - @echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE)" -ifneq ($(API),true) - @echo "⚠️️ not starting API. If you want to test the API, use 'make start API=true' to start it" -endif -ifneq ($(UI),true) - @echo "⚠️ not starting UI. If you want to test the UI, run 'make start UI=true' to start it" -endif -ifneq ($(PLUGINS),true) - @echo "⚠️ not starting plugins. If you want to test plugins, run 'make start PROFILE=plugins' to start it" -endif - # Check dex, minio, postgres and mysql are in hosts file -ifeq ($(AUTH_MODE),sso) - grep '127.0.0.1.*dex' /etc/hosts -endif - grep '127.0.0.1.*azurite' /etc/hosts - grep '127.0.0.1.*minio' /etc/hosts - grep '127.0.0.1.*postgres' /etc/hosts - grep '127.0.0.1.*mysql' /etc/hosts -ifeq ($(RUN_MODE),local) - env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) -endif - -.PHONY: wait -wait: - # Wait for workflow controller - until lsof -i :9090 > /dev/null ; do sleep 10s ; done -ifeq ($(API),true) - # Wait for Argo Server - until lsof -i :2746 > /dev/null ; do sleep 10s ; done -endif -ifeq ($(PROFILE),mysql) - # Wait for MySQL - until (: < /dev/tcp/localhost/3306) ; do sleep 10s ; done -endif - -.PHONY: postgres-cli -postgres-cli: - kubectl exec -ti `kubectl get pod -l app=postgres -o name|cut -c 5-` -- psql -U postgres - -.PHONY: mysql-cli -mysql-cli: - kubectl exec -ti `kubectl get pod -l app=mysql -o name|cut -c 5-` -- mysql -u mysql -ppassword argo - -test-cli: ./dist/argo - -test-%: - E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $* -parallel $(E2E_PARALLEL) ./test/e2e - -.PHONY: test-examples -test-examples: - ./hack/test-examples.sh - -.PHONY: test-%-sdk -test-%-sdk: - make --directory sdks/$* install test -B - -Test%: - E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags api,cli,cron,executor,examples,corefunctional,functional,plugins -parallel $(E2E_PARALLEL) ./test/e2e -run='.*/$*' - - -# clean - -.PHONY: clean -clean: - go clean - rm -Rf test-results node_modules vendor v2 v3 argoexec-linux-amd64 dist/* ui/dist - -# swagger - -pkg/apis/workflow/v1alpha1/openapi_generated.go: $(GOPATH)/bin/openapi-gen $(TYPES) - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - $(GOPATH)/bin/openapi-gen \ - --go-header-file ./hack/custom-boilerplate.go.txt \ - --input-dirs github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --output-package github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --report-filename pkg/apis/api-rules/violation_exceptions.list - # Force the timestamp to be up to date - touch $@ - # Delete the link - [ -e ./v3 ] && rm -rf v3 - - -# generates many other files (listers, informers, client etc). -pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(GOPATH)/bin/go-to-protobuf $(TYPES) - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - bash $(GOPATH)/pkg/mod/k8s.io/code-generator@v0.21.5/generate-groups.sh \ - "deepcopy,client,informer,lister" \ - github.com/argoproj/argo-workflows/v3/pkg/client github.com/argoproj/argo-workflows/v3/pkg/apis \ - workflow:v1alpha1 \ - --go-header-file ./hack/custom-boilerplate.go.txt - # Force the timestamp to be up to date - touch $@ - # Delete the link - [ -e ./v3 ] && rm -rf v3 - -dist/kubernetes.swagger.json: Makefile - @mkdir -p dist - # recurl will only fetch if the file doesn't exist, so delete it - rm -f $@ - ./hack/recurl.sh $@ https://raw.githubusercontent.com/kubernetes/kubernetes/v1.30.3/api/openapi-spec/swagger.json - -pkg/apiclient/_.secondary.swagger.json: hack/api/swagger/secondaryswaggergen.go pkg/apis/workflow/v1alpha1/openapi_generated.go dist/kubernetes.swagger.json - rm -Rf v3 vendor - # We have `hack/api/swagger` so that most hack script do not depend on the whole code base and are therefore slow. - go run ./hack/api/swagger secondaryswaggergen - -# we always ignore the conflicts, so lets automated figuring out how many there will be and just use that -dist/swagger-conflicts: $(GOPATH)/bin/swagger $(SWAGGER_FILES) - swagger mixin $(SWAGGER_FILES) 2>&1 | grep -c skipping > dist/swagger-conflicts || true - -dist/mixed.swagger.json: $(GOPATH)/bin/swagger $(SWAGGER_FILES) dist/swagger-conflicts - swagger mixin -c $(shell cat dist/swagger-conflicts) $(SWAGGER_FILES) -o dist/mixed.swagger.json - -dist/swaggifed.swagger.json: dist/mixed.swagger.json hack/api/swagger/swaggify.sh - cat dist/mixed.swagger.json | ./hack/api/swagger/swaggify.sh > dist/swaggifed.swagger.json - -dist/kubeified.swagger.json: dist/swaggifed.swagger.json dist/kubernetes.swagger.json - go run ./hack/api/swagger kubeifyswagger dist/swaggifed.swagger.json dist/kubeified.swagger.json - -dist/swagger.0.json: $(GOPATH)/bin/swagger dist/kubeified.swagger.json - swagger flatten --with-flatten minimal --with-flatten remove-unused dist/kubeified.swagger.json -o dist/swagger.0.json - -api/openapi-spec/swagger.json: $(GOPATH)/bin/swagger dist/swagger.0.json - swagger flatten --with-flatten remove-unused dist/swagger.0.json -o api/openapi-spec/swagger.json - -api/jsonschema/schema.json: api/openapi-spec/swagger.json hack/api/jsonschema/main.go - go run ./hack/api/jsonschema - -go-diagrams/diagram.dot: ./hack/docs/diagram.go - rm -Rf go-diagrams - go run ./hack/docs diagram - -docs/assets/diagram.png: go-diagrams/diagram.dot - cd go-diagrams && dot -Tpng diagram.dot -o ../docs/assets/diagram.png - -docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) hack/docs/fields.go - env ARGO_SECURE=false ARGO_INSECURE_SKIP_VERIFY=false ARGO_SERVER= ARGO_INSTANCEID= go run ./hack/docs fields - -# generates several other files -docs/cli/argo.md: $(CLI_PKG_FILES) go.sum server/static/files.go hack/docs/cli.go - go run ./hack/docs cli - -# docs - -/usr/local/bin/mdspell: Makefile -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - npm list -g markdown-spellcheck@1.3.1 > /dev/null || npm i -g markdown-spellcheck@1.3.1 -endif - -.PHONY: docs-spellcheck -docs-spellcheck: /usr/local/bin/mdspell - # check docs for spelling mistakes - mdspell --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*') - # alphabetize spelling file -- ignore first line (comment), then sort the rest case-sensitive and remove duplicates - $(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "LC_COLLATE=C sort" }' | uniq | tee .spelling > /dev/null) - -/usr/local/bin/markdown-link-check: -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - npm list -g markdown-link-check@3.11.1 > /dev/null || npm i -g markdown-link-check@3.11.1 -endif - -.PHONY: docs-linkcheck -docs-linkcheck: /usr/local/bin/markdown-link-check - # check docs for broken links - markdown-link-check -q -c .mlc_config.json $(shell find docs -name '*.md' -not -name fields.md -not -name executor_swagger.md) - -/usr/local/bin/markdownlint: -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - npm list -g markdownlint-cli@0.33.0 > /dev/null || npm i -g markdownlint-cli@0.33.0 -endif - - -.PHONY: docs-lint -docs-lint: /usr/local/bin/markdownlint - # lint docs - markdownlint docs --fix --ignore docs/fields.md --ignore docs/executor_swagger.md --ignore docs/cli --ignore docs/walk-through/the-structure-of-workflow-specs.md - -/usr/local/bin/mkdocs: -# update this in Nix when upgrading it here -ifneq ($(USE_NIX), true) - python -m pip install --no-cache-dir -r docs/requirements.txt -endif - -.PHONY: docs -docs: /usr/local/bin/mkdocs \ - docs-spellcheck \ - docs-lint \ - # TODO: This is temporarily disabled to unblock merging PRs. - # docs-linkcheck - # copy README.md to docs/README.md - ./hack/docs/copy-readme.sh - # check environment-variables.md contains all variables mentioned in the code - ./hack/docs/check-env-doc.sh - # build the docs - TZ=UTC mkdocs build --strict - # tell the user the fastest way to edit docs - @echo "ℹ️ If you want to preview your docs, open site/index.html. If you want to edit them with hot-reload, run 'make docs-serve' to start mkdocs on port 8000" - -.PHONY: docs-serve -docs-serve: docs - mkdocs serve - -# pre-commit checks - -.git/hooks/%: hack/git/hooks/% - @mkdir -p .git/hooks - cp hack/git/hooks/$* .git/hooks/$* - -.PHONY: githooks -githooks: .git/hooks/pre-commit .git/hooks/commit-msg - -.PHONY: pre-commit -pre-commit: codegen lint docs - # marker file, based on it's modification time, we know how long ago this target was run - touch dist/pre-commit - -# release - -release-notes: /dev/null - version=$(VERSION) envsubst '$$version' < hack/release-notes.md > release-notes - -.PHONY: checksums -checksums: - sha256sum ./dist/argo-*.gz | awk -F './dist/' '{print $$1 $$2}' > ./dist/argo-workflows-cli-checksums.txt +# Field Reference + +## Workflow + +Workflow is the definition of a workflow resource + +
+Examples (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`daemoned-stateful-set-with-service.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemoned-stateful-set-with-service.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-jobs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-jobs.yaml) + +- [`k8s-orchestration.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-orchestration.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-patch-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-pod.yaml) + +- [`k8s-resource-log-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-resource-log-selector.yaml) + +- [`k8s-set-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-set-owner-reference.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resource-delete-with-flags.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resource-delete-with-flags.yaml) + +- [`resource-flags.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resource-flags.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| +|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| +|`spec`|[`WorkflowSpec`](#workflowspec)|_No description available_| +|`status`|[`WorkflowStatus`](#workflowstatus)|_No description available_| + +## CronWorkflow + +CronWorkflow is the definition of a scheduled workflow resource + +
+Examples (click to open) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| +|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| +|`spec`|[`CronWorkflowSpec`](#cronworkflowspec)|_No description available_| +|`status`|[`CronWorkflowStatus`](#cronworkflowstatus)|_No description available_| + +## WorkflowTemplate + +WorkflowTemplate is the definition of a workflow template resource + +
+Examples (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| +|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| +|`spec`|[`WorkflowSpec`](#workflowspec)|_No description available_| + +## WorkflowSpec + +WorkflowSpec is the specification of a Workflow. + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`activeDeadlineSeconds`|`integer`|Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the io.argoproj.workflow.v1alpha1. A value of zero is used to terminate a Running workflow| +|`affinity`|[`Affinity`](#affinity)|Affinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template| +|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| +|`arguments`|[`Arguments`](#arguments)|Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}| +|`artifactGC`|[`WorkflowLevelArtifactGC`](#workflowlevelartifactgc)|ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)| +|`artifactRepositoryRef`|[`ArtifactRepositoryRef`](#artifactrepositoryref)|ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.| +|`automountServiceAccountToken`|`boolean`|AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.| +|`dnsConfig`|[`PodDNSConfig`](#poddnsconfig)|PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.| +|`dnsPolicy`|`string`|Set DNS policy for workflow pods. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.| +|`entrypoint`|`string`|Entrypoint is a template reference to the starting point of the io.argoproj.workflow.v1alpha1.| +|`executor`|[`ExecutorConfig`](#executorconfig)|Executor holds configurations of executor containers of the io.argoproj.workflow.v1alpha1.| +|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step| +|`hostAliases`|`Array<`[`HostAlias`](#hostalias)`>`|_No description available_| +|`hostNetwork`|`boolean`|Host networking requested for this workflow pod. Default to false.| +|`imagePullSecrets`|`Array<`[`LocalObjectReference`](#localobjectreference)`>`|ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod| +|`metrics`|[`Metrics`](#metrics)|Metrics are a list of metrics emitted from this Workflow| +|`nodeSelector`|`Map< string , string >`|NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.| +|`onExit`|`string`|OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary io.argoproj.workflow.v1alpha1.| +|`parallelism`|`integer`|Parallelism limits the max total parallel pods that can execute at the same time in a workflow| +|`podDisruptionBudget`|[`PodDisruptionBudgetSpec`](#poddisruptionbudgetspec)|PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.| +|`podGC`|[`PodGC`](#podgc)|PodGC describes the strategy to use when deleting completed pods| +|`podMetadata`|[`Metadata`](#metadata)|PodMetadata defines additional metadata that should be applied to workflow pods| +|~~`podPriority`~~|~~`integer`~~|~~Priority to apply to workflow pods.~~ DEPRECATED: Use PodPriorityClassName instead.| +|`podPriorityClassName`|`string`|PriorityClassName to apply to workflow pods.| +|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).| +|`priority`|`integer`|Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.| +|`retryStrategy`|[`RetryStrategy`](#retrystrategy)|RetryStrategy for all templates in the io.argoproj.workflow.v1alpha1.| +|`schedulerName`|`string`|Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.| +|`securityContext`|[`PodSecurityContext`](#podsecuritycontext)|SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.| +|`serviceAccountName`|`string`|ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.| +|`shutdown`|`string`|Shutdown will shutdown the workflow according to its ShutdownStrategy| +|`suspend`|`boolean`|Suspend will suspend the workflow and prevent execution of any future steps in the workflow| +|`synchronization`|[`Synchronization`](#synchronization)|Synchronization holds synchronization lock configuration for this Workflow| +|`templateDefaults`|[`Template`](#template)|TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level| +|`templates`|`Array<`[`Template`](#template)`>`|Templates is a list of workflow templates used in a workflow| +|`tolerations`|`Array<`[`Toleration`](#toleration)`>`|Tolerations to apply to workflow pods.| +|`ttlStrategy`|[`TTLStrategy`](#ttlstrategy)|TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.| +|`volumeClaimGC`|[`VolumeClaimGC`](#volumeclaimgc)|VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows| +|`volumeClaimTemplates`|`Array<`[`PersistentVolumeClaim`](#persistentvolumeclaim)`>`|VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow| +|`volumes`|`Array<`[`Volume`](#volume)`>`|Volumes is a list of volumes that can be mounted by containers in a io.argoproj.workflow.v1alpha1.| +|`workflowMetadata`|[`WorkflowMetadata`](#workflowmetadata)|WorkflowMetadata contains some metadata of the workflow to refer to| +|`workflowTemplateRef`|[`WorkflowTemplateRef`](#workflowtemplateref)|WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution| + +## WorkflowStatus + +WorkflowStatus contains overall status information about a workflow + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifactGCStatus`|[`ArtGCStatus`](#artgcstatus)|ArtifactGCStatus maintains the status of Artifact Garbage Collection| +|`artifactRepositoryRef`|[`ArtifactRepositoryRefStatus`](#artifactrepositoryrefstatus)|ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.| +|`compressedNodes`|`string`|Compressed and base64 decoded Nodes map| +|`conditions`|`Array<`[`Condition`](#condition)`>`|Conditions is a list of conditions the Workflow may have| +|`estimatedDuration`|`integer`|EstimatedDuration in seconds.| +|`finishedAt`|[`Time`](#time)|Time at which this workflow completed| +|`message`|`string`|A human readable message indicating details about why the workflow is in this condition.| +|`nodes`|[`NodeStatus`](#nodestatus)|Nodes is a mapping between a node ID and the node's status.| +|`offloadNodeStatusVersion`|`string`|Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.| +|`outputs`|[`Outputs`](#outputs)|Outputs captures output values and artifact locations produced by the workflow via global outputs| +|`persistentVolumeClaims`|`Array<`[`Volume`](#volume)`>`|PersistentVolumeClaims tracks all PVCs that were created as part of the io.argoproj.workflow.v1alpha1. The contents of this list are drained at the end of the workflow.| +|`phase`|`string`|Phase a simple, high-level summary of where the workflow is in its lifecycle. Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", "Failed" or "Error" once the workflow has completed.| +|`progress`|`string`|Progress to completion| +|`resourcesDuration`|`Map< integer , int64 >`|ResourcesDuration is the total for the workflow| +|`startedAt`|[`Time`](#time)|Time at which this workflow started| +|`storedTemplates`|[`Template`](#template)|StoredTemplates is a mapping between a template ref and the node's status.| +|`storedWorkflowTemplateSpec`|[`WorkflowSpec`](#workflowspec)|StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.| +|`synchronization`|[`SynchronizationStatus`](#synchronizationstatus)|Synchronization stores the status of synchronization locks| +|`taskResultsCompletionStatus`|`Map< boolean , string >`|TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection.| + +## CronWorkflowSpec + +CronWorkflowSpec is the specification of a CronWorkflow + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`concurrencyPolicy`|`string`|ConcurrencyPolicy is the K8s-style concurrency policy that will be used| +|`failedJobsHistoryLimit`|`integer`|FailedJobsHistoryLimit is the number of failed jobs to be kept at a time| +|`schedule`|`string`|Schedule is a schedule to run the Workflow in Cron format. Deprecated, use Schedules| +|`schedules`|`Array< string >`|v3.6 and after: Schedules is a list of schedules to run the Workflow in Cron format| +|`startingDeadlineSeconds`|`integer`|StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.| +|`stopStrategy`|[`StopStrategy`](#stopstrategy)|v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition| +|`successfulJobsHistoryLimit`|`integer`|SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time| +|`suspend`|`boolean`|Suspend is a flag that will stop new CronWorkflows from running if set to true| +|`timezone`|`string`|Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time.| +|`when`|`string`|v3.6 and after: When is an expression that determines if a run should be scheduled.| +|`workflowMetadata`|[`ObjectMeta`](#objectmeta)|WorkflowMetadata contains some metadata of the workflow to be run| +|`workflowSpec`|[`WorkflowSpec`](#workflowspec)|WorkflowSpec is the spec of the workflow to be run| + +## CronWorkflowStatus + +CronWorkflowStatus is the status of a CronWorkflow + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`active`|`Array<`[`ObjectReference`](#objectreference)`>`|Active is a list of active workflows stemming from this CronWorkflow| +|`conditions`|`Array<`[`Condition`](#condition)`>`|Conditions is a list of conditions the CronWorkflow may have| +|`failed`|`integer`|v3.6 and after: Failed counts how many times child workflows failed| +|`lastScheduledTime`|[`Time`](#time)|LastScheduleTime is the last time the CronWorkflow was scheduled| +|`phase`|`string`|v3.6 and after: Phase is an enum of Active or Stopped. It changes to Stopped when stopStrategy.condition is true| +|`succeeded`|`integer`|v3.6 and after: Succeeded counts how many times child workflows succeeded| + +## Arguments + +Arguments to a template + +
+Examples with this field (click to open) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifacts is the list of artifacts to pass to the template or workflow| +|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters is the list of parameters to pass to the template or workflow| + +## WorkflowLevelArtifactGC + +WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level + +
+Examples with this field (click to open) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`forceFinalizerRemoval`|`boolean`|ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails| +|`podMetadata`|[`Metadata`](#metadata)|PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion| +|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the artgc pod spec.| +|`serviceAccountName`|`string`|ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion| +|`strategy`|`string`|Strategy is the strategy to use.| + +## ArtifactRepositoryRef + +_No description available_ + +
+Examples with this field (click to open) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMap`|`string`|The name of the config map. Defaults to "artifact-repositories".| +|`key`|`string`|The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.| + +## ExecutorConfig + +ExecutorConfig holds configurations of an executor container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`serviceAccountName`|`string`|ServiceAccountName specifies the service account name of the executor container.| + +## LifecycleHook + +_No description available_ + +
+Examples with this field (click to open) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`arguments`|[`Arguments`](#arguments)|Arguments hold arguments to the template| +|`expression`|`string`|Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored| +|`template`|`string`|Template is the name of the template to execute by the hook| +|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute by the hook| + +## Metrics + +Metrics are a list of metrics emitted from a Workflow/Template + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`prometheus`|`Array<`[`Prometheus`](#prometheus)`>`|Prometheus is a list of prometheus metrics to be emitted| + +## PodGC + +PodGC describes how to delete completed pods as they complete + +
+Examples with this field (click to open) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`deleteDelayDuration`|`string`|DeleteDelayDuration specifies the duration before pods in the GC queue get deleted.| +|`labelSelector`|[`LabelSelector`](#labelselector)|LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.| +|`strategy`|`string`|Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods| + +## Metadata + +Pod metdata + +
+Examples with this field (click to open) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`annotations`|`Map< string , string >`|_No description available_| +|`labels`|`Map< string , string >`|_No description available_| + +## RetryStrategy + +RetryStrategy provides controls on how to retry a workflow step + +
+Examples with this field (click to open) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`affinity`|[`RetryAffinity`](#retryaffinity)|Affinity prevents running workflow's step on the same host| +|`backoff`|[`Backoff`](#backoff)|Backoff is a backoff strategy| +|`expression`|`string`|Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored| +|`limit`|[`IntOrString`](#intorstring)|Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.| +|`retryPolicy`|`string`|RetryPolicy is a policy of NodePhase statuses that will be retried| + +## Synchronization + +Synchronization holds synchronization lock configuration + +
+Examples with this field (click to open) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`mutex`|[`Mutex`](#mutex)|Mutex holds the Mutex lock details - deprecated, use mutexes instead| +|`mutexes`|`Array<`[`Mutex`](#mutex)`>`|v3.6 and after: Mutexes holds the list of Mutex lock details| +|`semaphore`|[`SemaphoreRef`](#semaphoreref)|Semaphore holds the Semaphore configuration - deprecated, use semaphores instead| +|`semaphores`|`Array<`[`SemaphoreRef`](#semaphoreref)`>`|v3.6 and after: Semaphores holds the list of Semaphores configuration| + +## Template + +Template is a reusable and composable unit of execution in a workflow + +
+Examples with this field (click to open) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`activeDeadlineSeconds`|[`IntOrString`](#intorstring)|Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.| +|`affinity`|[`Affinity`](#affinity)|Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)| +|`archiveLocation`|[`ArtifactLocation`](#artifactlocation)|Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.| +|`automountServiceAccountToken`|`boolean`|AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.| +|`container`|[`Container`](#container)|Container is the main container image to run in the pod| +|`containerSet`|[`ContainerSetTemplate`](#containersettemplate)|ContainerSet groups multiple containers within a single pod.| +|`daemon`|`boolean`|Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness| +|`dag`|[`DAGTemplate`](#dagtemplate)|DAG template subtype which runs a DAG| +|`data`|[`Data`](#data)|Data is a data template| +|`executor`|[`ExecutorConfig`](#executorconfig)|Executor holds configurations of the executor container.| +|`failFast`|`boolean`|FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with `withItems`, etc.| +|`hostAliases`|`Array<`[`HostAlias`](#hostalias)`>`|HostAliases is an optional list of hosts and IPs that will be injected into the pod spec| +|`http`|[`HTTP`](#http)|HTTP makes a HTTP request| +|`initContainers`|`Array<`[`UserContainer`](#usercontainer)`>`|InitContainers is a list of containers which run before the main container.| +|`inputs`|[`Inputs`](#inputs)|Inputs describe what inputs parameters and artifacts are supplied to this template| +|`memoize`|[`Memoize`](#memoize)|Memoize allows templates to use outputs generated from already executed templates| +|`metadata`|[`Metadata`](#metadata)|Metdata sets the pods's metadata, i.e. annotations and labels| +|`metrics`|[`Metrics`](#metrics)|Metrics are a list of metrics emitted from this template| +|`name`|`string`|Name is the name of the template| +|`nodeSelector`|`Map< string , string >`|NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.| +|`outputs`|[`Outputs`](#outputs)|Outputs describe the parameters and artifacts that this template produces| +|`parallelism`|`integer`|Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.| +|`plugin`|[`Plugin`](#plugin)|Plugin is a plugin template| +|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).| +|`priority`|`integer`|Priority to apply to workflow pods.| +|`priorityClassName`|`string`|PriorityClassName to apply to workflow pods.| +|`resource`|[`ResourceTemplate`](#resourcetemplate)|Resource template subtype which can run k8s resources| +|`retryStrategy`|[`RetryStrategy`](#retrystrategy)|RetryStrategy describes how to retry a template when it fails| +|`schedulerName`|`string`|If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.| +|`script`|[`ScriptTemplate`](#scripttemplate)|Script runs a portion of code against an interpreter| +|`securityContext`|[`PodSecurityContext`](#podsecuritycontext)|SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.| +|`serviceAccountName`|`string`|ServiceAccountName to apply to workflow pods| +|`sidecars`|`Array<`[`UserContainer`](#usercontainer)`>`|Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes| +|`steps`|`Array>`|Steps define a series of sequential/parallel workflow steps| +|`suspend`|[`SuspendTemplate`](#suspendtemplate)|Suspend template subtype which can suspend a workflow when reaching the step| +|`synchronization`|[`Synchronization`](#synchronization)|Synchronization holds synchronization lock configuration for this template| +|`timeout`|`string`|Timeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.| +|`tolerations`|`Array<`[`Toleration`](#toleration)`>`|Tolerations to apply to workflow pods.| +|`volumes`|`Array<`[`Volume`](#volume)`>`|Volumes is a list of volumes that can be mounted by containers in a template.| + +## TTLStrategy + +TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed + +
+Examples with this field (click to open) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`secondsAfterCompletion`|`integer`|SecondsAfterCompletion is the number of seconds to live after completion| +|`secondsAfterFailure`|`integer`|SecondsAfterFailure is the number of seconds to live after failure| +|`secondsAfterSuccess`|`integer`|SecondsAfterSuccess is the number of seconds to live after success| + +## VolumeClaimGC + +VolumeClaimGC describes how to delete volumes from completed Workflows + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`strategy`|`string`|Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess"| + +## WorkflowMetadata + +_No description available_ + +
+Examples with this field (click to open) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`annotations`|`Map< string , string >`|_No description available_| +|`labels`|`Map< string , string >`|_No description available_| +|`labelsFrom`|[`LabelValueFrom`](#labelvaluefrom)|_No description available_| + +## WorkflowTemplateRef + +WorkflowTemplateRef is a reference to a WorkflowTemplate resource. + +
+Examples with this field (click to open) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`clusterScope`|`boolean`|ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).| +|`name`|`string`|Name is the resource name of the workflow template.| + +## ArtGCStatus + +ArtGCStatus maintains state related to ArtifactGC + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`notSpecified`|`boolean`|if this is true, we already checked to see if we need to do it and we don't| +|`podsRecouped`|`Map< boolean , string >`|have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once| +|`strategiesProcessed`|`Map< boolean , string >`|have Pods been started to perform this strategy? (enables us not to re-process what we've already done)| + +## ArtifactRepositoryRefStatus + +_No description available_ + +
+Examples with this field (click to open) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifactRepository`|[`ArtifactRepository`](#artifactrepository)|The repository the workflow will use. This maybe empty before v3.1.| +|`configMap`|`string`|The name of the config map. Defaults to "artifact-repositories".| +|`default`|`boolean`|If this ref represents the default artifact repository, rather than a config map.| +|`key`|`string`|The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.| +|`namespace`|`string`|The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).| + +## Condition + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`message`|`string`|Message is the condition message| +|`status`|`string`|Status is the status of the condition| +|`type`|`string`|Type is the type of condition| + +## NodeStatus + +NodeStatus contains status information about an individual node in the workflow + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`boundaryID`|`string`|BoundaryID indicates the node ID of the associated template root node in which this node belongs to| +|`children`|`Array< string >`|Children is a list of child node IDs| +|`daemoned`|`boolean`|Daemoned tracks whether or not this node was daemoned and need to be terminated| +|`displayName`|`string`|DisplayName is a human readable representation of the node. Unique within a template boundary| +|`estimatedDuration`|`integer`|EstimatedDuration in seconds.| +|`finishedAt`|[`Time`](#time)|Time at which this node completed| +|`hostNodeName`|`string`|HostNodeName name of the Kubernetes node on which the Pod is running, if applicable| +|`id`|`string`|ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic| +|`inputs`|[`Inputs`](#inputs)|Inputs captures input parameter values and artifact locations supplied to this template invocation| +|`memoizationStatus`|[`MemoizationStatus`](#memoizationstatus)|MemoizationStatus holds information about cached nodes| +|`message`|`string`|A human readable message indicating details about why the node is in this condition.| +|`name`|`string`|Name is unique name in the node tree used to generate the node ID| +|`nodeFlag`|[`NodeFlag`](#nodeflag)|NodeFlag tracks some history of node. e.g.) hooked, retried, etc.| +|`outboundNodes`|`Array< string >`|OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as "outbound". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step. In the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the "outbound" node. In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.| +|`outputs`|[`Outputs`](#outputs)|Outputs captures output parameter values and artifact locations produced by this template invocation| +|`phase`|`string`|Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine. Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", "Skipped", "Failed", "Error", or "Omitted" as a final state.| +|`podIP`|`string`|PodIP captures the IP of the pod for daemoned steps| +|`progress`|`string`|Progress to completion| +|`resourcesDuration`|`Map< integer , int64 >`|ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.| +|`startedAt`|[`Time`](#time)|Time at which this node started| +|`synchronizationStatus`|[`NodeSynchronizationStatus`](#nodesynchronizationstatus)|SynchronizationStatus is the synchronization status of the node| +|`templateName`|`string`|TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)| +|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)| +|`templateScope`|`string`|TemplateScope is the template scope in which the template of this node was retrieved.| +|`type`|`string`|Type indicates type of node| + +## Outputs + +Outputs hold parameters, artifacts, and results from a step + +
+Examples with this field (click to open) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifacts holds the list of output artifacts produced by a step| +|`exitCode`|`string`|ExitCode holds the exit code of a script template| +|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters holds the list of output parameters produced by a step| +|`result`|`string`|Result holds the result (stdout) of a script template| + +## SynchronizationStatus + +SynchronizationStatus stores the status of semaphore and mutex. + +
+Examples with this field (click to open) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`mutex`|[`MutexStatus`](#mutexstatus)|Mutex stores this workflow's mutex holder details| +|`semaphore`|[`SemaphoreStatus`](#semaphorestatus)|Semaphore stores this workflow's Semaphore holder details| + +## StopStrategy + +v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`condition`|`string`|v3.6 and after: Condition is an expression that stops scheduling workflows when true. Use the variables `failed` or `succeeded` to access the number of failed or successful child workflows.| + +## Artifact + +Artifact indicates an artifact to place at a specified path + +
+Examples with this field (click to open) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.| +|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| +|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows| +|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| +|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| +|`deleted`|`boolean`|Has this been deleted?| +|`from`|`string`|From allows an artifact to reference an artifact from a previous step| +|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact| +|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| +|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| +|`globalName`|`string`|GlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts| +|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| +|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| +|`mode`|`integer`|mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.| +|`name`|`string`|name of the artifact. must be unique within a template's inputs/outputs.| +|`optional`|`boolean`|Make Artifacts optional, if Artifacts doesn't generate or exist| +|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| +|`path`|`string`|Path is the container path to the artifact| +|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| +|`recurseMode`|`boolean`|If mode is set, apply the permission recursively into the artifact if it is a folder| +|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| +|`subPath`|`string`|SubPath allows an artifact to be sourced from a subpath within the specified source| + +## Parameter + +Parameter indicate a passed string parameter to a service template with an optional default value + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`default`|`string`|Default is the default value to use for an input parameter if a value was not supplied| +|`description`|`string`|Description is the parameter description| +|`enum`|`Array< string >`|Enum holds a list of string values to choose from, for the actual value of the parameter| +|`globalName`|`string`|GlobalName exports an output parameter to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters| +|`name`|`string`|Name is the parameter name| +|`value`|`string`|Value is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values| +|`valueFrom`|[`ValueFrom`](#valuefrom)|ValueFrom is the source for the output parameter's value| + +## TemplateRef + +TemplateRef is a reference of template resource. + +
+Examples with this field (click to open) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`clusterScope`|`boolean`|ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).| +|`name`|`string`|Name is the resource name of the template.| +|`template`|`string`|Template is the name of referred template in the resource.| + +## Prometheus + +Prometheus is a prometheus metric to be emitted + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`counter`|[`Counter`](#counter)|Counter is a counter metric| +|`gauge`|[`Gauge`](#gauge)|Gauge is a gauge metric| +|`help`|`string`|Help is a string that describes the metric| +|`histogram`|[`Histogram`](#histogram)|Histogram is a histogram metric| +|`labels`|`Array<`[`MetricLabel`](#metriclabel)`>`|Labels is a list of metric labels| +|`name`|`string`|Name is the name of the metric| +|`when`|`string`|When is a conditional statement that decides when to emit the metric| + +## RetryAffinity + +RetryAffinity prevents running steps on the same host. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`nodeAntiAffinity`|[`RetryNodeAntiAffinity`](#retrynodeantiaffinity)|_No description available_| + +## Backoff + +Backoff is a backoff strategy to use within retryStrategy + +
+Examples with this field (click to open) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`duration`|`string`|Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h")| +|`factor`|[`IntOrString`](#intorstring)|Factor is a factor to multiply the base duration after each failed retry| +|`maxDuration`|`string`|MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. However, when the workflow fails, the pod's deadline is then overridden by maxDuration. This ensures that the workflow does not exceed the specified maximum duration when retries are involved.| + +## Mutex + +Mutex holds Mutex configuration + +
+Examples with this field (click to open) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|name of the mutex| +|`namespace`|`string`|Namespace is the namespace of the mutex, default: [namespace of workflow]| + +## SemaphoreRef + +SemaphoreRef is a reference of Semaphore + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMapKeyRef is configmap selector for Semaphore configuration| +|`namespace`|`string`|Namespace is the namespace of the configmap, default: [namespace of workflow]| + +## ArtifactLocation + +ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files. + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| +|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| +|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| +|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| +|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| +|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| +|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| +|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| +|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| +|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| + +## ContainerSetTemplate + +_No description available_ + +
+Examples with this field (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`containers`|`Array<`[`ContainerNode`](#containernode)`>`|_No description available_| +|`retryStrategy`|[`ContainerSetRetryStrategy`](#containersetretrystrategy)|RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.| +|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|_No description available_| + +## DAGTemplate + +DAGTemplate is a template subtype for directed acyclic graph templates + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`failFast`|`boolean`|This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442| +|`target`|`string`|Target are one or more names of targets to execute in a DAG| +|`tasks`|`Array<`[`DAGTask`](#dagtask)`>`|Tasks are a list of DAG tasks| + +## Data + +Data is a data template + +
+Examples with this field (click to open) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`source`|[`DataSource`](#datasource)|Source sources external data into a data template| +|`transformation`|`Array<`[`TransformationStep`](#transformationstep)`>`|Transformation applies a set of transformations| + +## HTTP + +_No description available_ + +
+Examples with this field (click to open) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`body`|`string`|Body is content of the HTTP Request| +|`bodyFrom`|[`HTTPBodySource`](#httpbodysource)|BodyFrom is content of the HTTP Request as Bytes| +|`headers`|`Array<`[`HTTPHeader`](#httpheader)`>`|Headers are an optional list of headers to send with HTTP requests| +|`insecureSkipVerify`|`boolean`|InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client| +|`method`|`string`|Method is HTTP methods for HTTP Request| +|`successCondition`|`string`|SuccessCondition is an expression if evaluated to true is considered successful| +|`timeoutSeconds`|`integer`|TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds| +|`url`|`string`|URL of the HTTP Request| + +## UserContainer + +UserContainer is a container specified by a user. + +
+Examples with this field (click to open) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| +|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| +|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| +|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| +|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| +|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`mirrorVolumeMounts`|`boolean`|MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding| +|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| +|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| +|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| +|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| +|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| +|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| +|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| +|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| +|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| +|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| +|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| +|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| +|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| + +## Inputs + +Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another + +
+Examples with this field (click to open) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifact are a list of artifacts passed as inputs| +|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters are a list of parameters passed as inputs| + +## Memoize + +Memoization enables caching for the Outputs of the template + +
+Examples with this field (click to open) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`cache`|[`Cache`](#cache)|Cache sets and configures the kind of cache| +|`key`|`string`|Key is the key to use as the caching key| +|`maxAge`|`string`|MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.| + +## Plugin + +Plugin is an Object with exactly one key + +## ResourceTemplate + +ResourceTemplate is a template subtype to manipulate kubernetes resources + +
+Examples with this field (click to open) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`action`|`string`|Action is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch| +|`failureCondition`|`string`|FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed| +|`flags`|`Array< string >`|Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [ "--validate=false" # disable resource validation ]| +|`manifest`|`string`|Manifest contains the kubernetes manifest| +|`manifestFrom`|[`ManifestFrom`](#manifestfrom)|ManifestFrom is the source for a single kubernetes manifest| +|`mergeStrategy`|`string`|MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" Must be one of: strategic, merge, json| +|`setOwnerReference`|`boolean`|SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.| +|`successCondition`|`string`|SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step| + +## ScriptTemplate + +ScriptTemplate is a template subtype to enable scripting through code steps + +
+Examples with this field (click to open) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| +|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| +|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| +|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| +|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| +|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| +|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| +|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| +|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| +|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| +|`source`|`string`|Source contains the source code of the script to execute| +|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| +|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| +|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| +|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| +|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| +|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| +|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| +|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| + +## WorkflowStep + +WorkflowStep is a reference to a template to execute in a series of step + +
+Examples with this field (click to open) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`arguments`|[`Arguments`](#arguments)|Arguments hold arguments to the template| +|`continueOn`|[`ContinueOn`](#continueon)|ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified| +|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step| +|`inline`|[`Template`](#template)|Inline is the template. Template must be empty if this is declared (and vice-versa).| +|`name`|`string`|Name of the step| +|~~`onExit`~~|~~`string`~~|~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.| +|`template`|`string`|Template is the name of the template to execute as the step| +|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute as the step.| +|`when`|`string`|When is an expression in which the step should conditionally execute| +|`withItems`|`Array<`[`Item`](#item)`>`|WithItems expands a step into multiple parallel steps from the items in the list| +|`withParam`|`string`|WithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.| +|`withSequence`|[`Sequence`](#sequence)|WithSequence expands a step into a numeric sequence| + +## SuspendTemplate + +SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time + +
+Examples with this field (click to open) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`duration`|`string`|Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h"| + +## LabelValueFrom + +_No description available_ + +
+Examples with this field (click to open) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`expression`|`string`|_No description available_| + +## ArtifactRepository + +ArtifactRepository represents an artifact repository in which a controller will store its artifacts + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`archiveLogs`|`boolean`|ArchiveLogs enables log archiving| +|`artifactory`|[`ArtifactoryArtifactRepository`](#artifactoryartifactrepository)|Artifactory stores artifacts to JFrog Artifactory| +|`azure`|[`AzureArtifactRepository`](#azureartifactrepository)|Azure stores artifact in an Azure Storage account| +|`gcs`|[`GCSArtifactRepository`](#gcsartifactrepository)|GCS stores artifact in a GCS object store| +|`hdfs`|[`HDFSArtifactRepository`](#hdfsartifactrepository)|HDFS stores artifacts in HDFS| +|`oss`|[`OSSArtifactRepository`](#ossartifactrepository)|OSS stores artifact in a OSS-compliant object store| +|`s3`|[`S3ArtifactRepository`](#s3artifactrepository)|S3 stores artifact in a S3-compliant object store| + +## MemoizationStatus + +MemoizationStatus is the status of this memoized node + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`cacheName`|`string`|Cache is the name of the cache that was used| +|`hit`|`boolean`|Hit indicates whether this node was created from a cache entry| +|`key`|`string`|Key is the name of the key used for this node's cache| + +## NodeFlag + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`hooked`|`boolean`|Hooked tracks whether or not this node was triggered by hook or onExit| +|`retried`|`boolean`|Retried tracks whether or not this node was retried by retryStrategy| + +## NodeSynchronizationStatus + +NodeSynchronizationStatus stores the status of a node + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`waiting`|`string`|Waiting is the name of the lock that this node is waiting for| + +## MutexStatus + +MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. + +
+Examples with this field (click to open) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`holding`|`Array<`[`MutexHolding`](#mutexholding)`>`|Holding is a list of mutexes and their respective objects that are held by mutex lock for this io.argoproj.workflow.v1alpha1.| +|`waiting`|`Array<`[`MutexHolding`](#mutexholding)`>`|Waiting is a list of mutexes and their respective objects this workflow is waiting for.| + +## SemaphoreStatus + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`holding`|`Array<`[`SemaphoreHolding`](#semaphoreholding)`>`|Holding stores the list of resource acquired synchronization lock for workflows.| +|`waiting`|`Array<`[`SemaphoreHolding`](#semaphoreholding)`>`|Waiting indicates the list of current synchronization lock holders.| + +## ArchiveStrategy + +ArchiveStrategy describes how to archive files/directory when saving artifacts + +
+Examples with this field (click to open) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`none`|[`NoneStrategy`](#nonestrategy)|_No description available_| +|`tar`|[`TarStrategy`](#tarstrategy)|_No description available_| +|`zip`|[`ZipStrategy`](#zipstrategy)|_No description available_| + +## ArtifactGC + +ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed + +
+Examples with this field (click to open) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`podMetadata`|[`Metadata`](#metadata)|PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion| +|`serviceAccountName`|`string`|ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion| +|`strategy`|`string`|Strategy is the strategy to use.| + +## ArtifactoryArtifact + +ArtifactoryArtifact is the location of an artifactory artifact + +
+Examples with this field (click to open) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| +|`url`|`string`|URL of the artifact| +|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| + +## AzureArtifact + +AzureArtifact is the location of a an Azure Storage artifact + +
+Examples with this field (click to open) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key| +|`blob`|`string`|Blob is the blob name (i.e., path) in the container where the artifact resides| +|`container`|`string`|Container is the container where resources will be stored| +|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## GCSArtifact + +GCSArtifact is the location of a GCS artifact + +
+Examples with this field (click to open) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`bucket`|`string`|Bucket is the name of the bucket| +|`key`|`string`|Key is the path in the bucket where the artifact resides| +|`serviceAccountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServiceAccountKeySecret is the secret selector to the bucket's service account key| + +## GitArtifact + +GitArtifact is the location of an git artifact + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`branch`|`string`|Branch is the branch to fetch when `SingleBranch` is enabled| +|`depth`|`integer`|Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip| +|`disableSubmodules`|`boolean`|DisableSubmodules disables submodules during git clone| +|`fetch`|`Array< string >`|Fetch specifies a number of refs that should be fetched before checkout| +|`insecureIgnoreHostKey`|`boolean`|InsecureIgnoreHostKey disables SSH strict host key checking during git clone| +|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| +|`repo`|`string`|Repo is the git repository| +|`revision`|`string`|Revision is the git commit, tag, branch to checkout| +|`singleBranch`|`boolean`|SingleBranch enables single branch clone, using the `branch` parameter| +|`sshPrivateKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SSHPrivateKeySecret is the secret selector to the repository ssh private key| +|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| + +## HDFSArtifact + +HDFSArtifact is the location of an HDFS artifact + +
+Examples with this field (click to open) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`addresses`|`Array< string >`|Addresses is accessible addresses of HDFS name nodes| +|`dataTransferProtection`|`string`|DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.| +|`force`|`boolean`|Force copies a file forcibly even if it exists| +|`hdfsUser`|`string`|HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.| +|`krbCCacheSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.| +|`krbConfigConfigMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.| +|`krbKeytabSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.| +|`krbRealm`|`string`|KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.| +|`krbServicePrincipalName`|`string`|KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.| +|`krbUsername`|`string`|KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.| +|`path`|`string`|Path is a file path in HDFS| + +## HTTPArtifact + +HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container + +
+Examples with this field (click to open) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`auth`|[`HTTPAuth`](#httpauth)|Auth contains information for client authentication| +|`headers`|`Array<`[`Header`](#header)`>`|Headers are an optional list of headers to send with HTTP requests for artifacts| +|`url`|`string`|URL of the artifact| + +## OSSArtifact + +OSSArtifact is the location of an Alibaba Cloud OSS artifact + +
+Examples with this field (click to open) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| +|`bucket`|`string`|Bucket is the name of the bucket| +|`createBucketIfNotPresent`|`boolean`|CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist| +|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| +|`key`|`string`|Key is the path in the bucket where the artifact resides| +|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle| +|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| +|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## RawArtifact + +RawArtifact allows raw string content to be placed as an artifact in a container + +
+Examples with this field (click to open) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`data`|`string`|Data is the string contents of the artifact| + +## S3Artifact + +S3Artifact is the location of an S3 artifact + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| +|`bucket`|`string`|Bucket is the name of the bucket| +|`caSecret`|[`SecretKeySelector`](#secretkeyselector)|CASecret specifies the secret that contains the CA, used to verify the TLS connection| +|`createBucketIfNotPresent`|[`CreateS3BucketOptions`](#creates3bucketoptions)|CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.| +|`encryptionOptions`|[`S3EncryptionOptions`](#s3encryptionoptions)|_No description available_| +|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| +|`insecure`|`boolean`|Insecure will connect to the service with TLS| +|`key`|`string`|Key is the key in the bucket where the artifact resides| +|`region`|`string`|Region contains the optional bucket region| +|`roleARN`|`string`|RoleARN is the Amazon Resource Name (ARN) of the role to assume.| +|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| +|`sessionTokenSecret`|[`SecretKeySelector`](#secretkeyselector)|SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## ValueFrom + +ValueFrom describes a location in which to obtain the value to a parameter + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMapKeyRef is configmap selector for input parameter configuration| +|`default`|`string`|Default specifies a value to be used if retrieving the value from the specified source fails| +|`event`|`string`|Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`| +|`expression`|`string`|Expression, if defined, is evaluated to specify the value for the parameter| +|`jqFilter`|`string`|JQFilter expression against the resource object in resource templates| +|`jsonPath`|`string`|JSONPath of a resource to retrieve an output parameter value from in resource templates| +|`parameter`|`string`|Parameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')| +|`path`|`string`|Path in the container to retrieve an output parameter value from in container templates| +|`supplied`|[`SuppliedValueFrom`](#suppliedvaluefrom)|Supplied value to be filled in directly, either through the CLI, API, etc.| + +## Counter + +Counter is a Counter prometheus metric + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`value`|`string`|Value is the value of the metric| + +## Gauge + +Gauge is a Gauge prometheus metric + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`operation`|`string`|Operation defines the operation to apply with value and the metrics' current value| +|`realtime`|`boolean`|Realtime emits this metric in real time if applicable| +|`value`|`string`|Value is the value to be used in the operation with the metric's current value. If no operation is set, value is the value of the metric| + +## Histogram + +Histogram is a Histogram prometheus metric + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`buckets`|`Array<`[`Amount`](#amount)`>`|Buckets is a list of bucket divisors for the histogram| +|`value`|`string`|Value is the value of the metric| + +## MetricLabel + +MetricLabel is a single label for a prometheus metric + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|_No description available_| +|`value`|`string`|_No description available_| + +## RetryNodeAntiAffinity + +RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". + +## ContainerNode + +_No description available_ + +
+Examples with this field (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`dependencies`|`Array< string >`|_No description available_| +|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| +|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| +|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| +|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| +|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| +|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| +|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| +|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| +|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| +|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| +|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| +|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| +|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| +|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| +|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| +|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| +|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| +|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| + +## ContainerSetRetryStrategy + +ContainerSetRetryStrategy provides controls on how to retry a container set + +
+Examples with this field (click to open) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`duration`|`string`|Duration is the time between each retry, examples values are "300ms", "1s" or "5m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".| +|`retries`|[`IntOrString`](#intorstring)|Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.| + +## DAGTask + +DAGTask represents a node in the graph during DAG execution + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`arguments`|[`Arguments`](#arguments)|Arguments are the parameter and artifact arguments to the template| +|`continueOn`|[`ContinueOn`](#continueon)|ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified| +|`dependencies`|`Array< string >`|Dependencies are name of other targets which this depends on| +|`depends`|`string`|Depends are name of other targets which this depends on| +|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task| +|`inline`|[`Template`](#template)|Inline is the template. Template must be empty if this is declared (and vice-versa).| +|`name`|`string`|Name is the name of the target| +|~~`onExit`~~|~~`string`~~|~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.| +|`template`|`string`|Name of template to execute| +|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute.| +|`when`|`string`|When is an expression in which the task should conditionally execute| +|`withItems`|`Array<`[`Item`](#item)`>`|WithItems expands a task into multiple parallel tasks from the items in the list| +|`withParam`|`string`|WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.| +|`withSequence`|[`Sequence`](#sequence)|WithSequence expands a task into a numeric sequence| + +## DataSource + +DataSource sources external data into a data template + +
+Examples with this field (click to open) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifactPaths`|[`ArtifactPaths`](#artifactpaths)|ArtifactPaths is a data transformation that collects a list of artifact paths| + +## TransformationStep + +_No description available_ + +
+Examples with this field (click to open) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`expression`|`string`|Expression defines an expr expression to apply| + +## HTTPBodySource + +HTTPBodySource contains the source of the HTTP body. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`bytes`|`byte`|_No description available_| + +## HTTPHeader + +_No description available_ + +
+Examples with this field (click to open) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|_No description available_| +|`value`|`string`|_No description available_| +|`valueFrom`|[`HTTPHeaderSource`](#httpheadersource)|_No description available_| + +## Cache + +Cache is the configuration for the type of cache to be used + +
+Examples with this field (click to open) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMap sets a ConfigMap-based cache| + +## ManifestFrom + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`artifact`|[`Artifact`](#artifact)|Artifact contains the artifact to use| + +## ContinueOn + +ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both. + +
+Examples with this field (click to open) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`error`|`boolean`|_No description available_| +|`failed`|`boolean`|_No description available_| + +## Item + +Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number + +
+Examples with this field (click to open) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) +
+ +## Sequence + +Sequence expands a workflow step into numeric range + +
+Examples with this field (click to open) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`count`|[`IntOrString`](#intorstring)|Count is number of elements in the sequence (default: 0). Not to be used with end| +|`end`|[`IntOrString`](#intorstring)|Number at which to end the sequence (default: 0). Not to be used with Count| +|`format`|`string`|Format is a printf format string to format the value in the sequence| +|`start`|[`IntOrString`](#intorstring)|Number at which to start the sequence (default: 0)| + +## ArtifactoryArtifactRepository + +ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository + +
+Examples with this field (click to open) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| +|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| +|`repoURL`|`string`|RepoURL is the url for artifactory repo.| +|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| + +## AzureArtifactRepository + +AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository + +
+Examples with this field (click to open) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key| +|`blobNameFormat`|`string`|BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables| +|`container`|`string`|Container is the container where resources will be stored| +|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## GCSArtifactRepository + +GCSArtifactRepository defines the controller configuration for a GCS artifact repository + +
+Examples with this field (click to open) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`bucket`|`string`|Bucket is the name of the bucket| +|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| +|`serviceAccountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServiceAccountKeySecret is the secret selector to the bucket's service account key| + +## HDFSArtifactRepository + +HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository + +
+Examples with this field (click to open) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`addresses`|`Array< string >`|Addresses is accessible addresses of HDFS name nodes| +|`dataTransferProtection`|`string`|DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.| +|`force`|`boolean`|Force copies a file forcibly even if it exists| +|`hdfsUser`|`string`|HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.| +|`krbCCacheSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.| +|`krbConfigConfigMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.| +|`krbKeytabSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.| +|`krbRealm`|`string`|KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.| +|`krbServicePrincipalName`|`string`|KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.| +|`krbUsername`|`string`|KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.| +|`pathFormat`|`string`|PathFormat is defines the format of path to store a file. Can reference workflow variables| + +## OSSArtifactRepository + +OSSArtifactRepository defines the controller configuration for an OSS artifact repository + +
+Examples with this field (click to open) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| +|`bucket`|`string`|Bucket is the name of the bucket| +|`createBucketIfNotPresent`|`boolean`|CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist| +|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| +|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| +|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle| +|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| +|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## S3ArtifactRepository + +S3ArtifactRepository defines the controller configuration for an S3 artifact repository + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| +|`bucket`|`string`|Bucket is the name of the bucket| +|`caSecret`|[`SecretKeySelector`](#secretkeyselector)|CASecret specifies the secret that contains the CA, used to verify the TLS connection| +|`createBucketIfNotPresent`|[`CreateS3BucketOptions`](#creates3bucketoptions)|CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.| +|`encryptionOptions`|[`S3EncryptionOptions`](#s3encryptionoptions)|_No description available_| +|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| +|`insecure`|`boolean`|Insecure will connect to the service with TLS| +|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| +|~~`keyPrefix`~~|~~`string`~~|~~KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts.~~ DEPRECATED. Use KeyFormat instead| +|`region`|`string`|Region contains the optional bucket region| +|`roleARN`|`string`|RoleARN is the Amazon Resource Name (ARN) of the role to assume.| +|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| +|`sessionTokenSecret`|[`SecretKeySelector`](#secretkeyselector)|SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant| +|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| + +## MutexHolding + +MutexHolding describes the mutex and the object which is holding it. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`holder`|`string`|Holder is a reference to the object which holds the Mutex. Holding Scenario: 1. Current workflow's NodeID which is holding the lock. e.g: ${NodeID} Waiting Scenario: 1. Current workflow or other workflow NodeID which is holding the lock. e.g: ${WorkflowName}/${NodeID}| +|`mutex`|`string`|Reference for the mutex e.g: ${namespace}/mutex/${mutexName}| + +## SemaphoreHolding + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`holders`|`Array< string >`|Holders stores the list of current holder names in the io.argoproj.workflow.v1alpha1.| +|`semaphore`|`string`|Semaphore stores the semaphore name.| + +## NoneStrategy + +NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately. + +
+Examples with this field (click to open) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) +
+ +## TarStrategy + +TarStrategy will tar and gzip the file or directory when saving + +
+Examples with this field (click to open) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`compressionLevel`|`integer`|CompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.| + +## ZipStrategy + +ZipStrategy will unzip zipped input artifacts + +## HTTPAuth + +_No description available_ + +
+Examples with this field (click to open) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`basicAuth`|[`BasicAuth`](#basicauth)|_No description available_| +|`clientCert`|[`ClientCertAuth`](#clientcertauth)|_No description available_| +|`oauth2`|[`OAuth2Auth`](#oauth2auth)|_No description available_| + +## Header + +Header indicate a key-value request header to be used when fetching artifacts over HTTP + +
+Examples with this field (click to open) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name is the header name| +|`value`|`string`|Value is the literal value to use for the header| + +## OSSLifecycleRule + +OSSLifecycleRule specifies how to manage bucket's lifecycle + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`markDeletionAfterDays`|`integer`|MarkDeletionAfterDays is the number of days before we delete objects in the bucket| +|`markInfrequentAccessAfterDays`|`integer`|MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type| + +## CreateS3BucketOptions + +CreateS3BucketOptions options used to determine automatic automatic bucket-creation process + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`objectLocking`|`boolean`|ObjectLocking Enable object locking| + +## S3EncryptionOptions + +S3EncryptionOptions used to determine encryption options during s3 operations + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`enableEncryption`|`boolean`|EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used| +|`kmsEncryptionContext`|`string`|KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information| +|`kmsKeyId`|`string`|KMSKeyId tells the driver to encrypt the object using the specified KMS Key.| +|`serverSideCustomerKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.| + +## SuppliedValueFrom + +SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. + +
+Examples with this field (click to open) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) +
+ +## Amount + +Amount represent a numeric amount. + +
+Examples with this field (click to open) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) +
+ +## ArtifactPaths + +ArtifactPaths expands a step from a collection of artifacts + +
+Examples with this field (click to open) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.| +|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| +|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows| +|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| +|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| +|`deleted`|`boolean`|Has this been deleted?| +|`from`|`string`|From allows an artifact to reference an artifact from a previous step| +|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact| +|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| +|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| +|`globalName`|`string`|GlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts| +|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| +|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| +|`mode`|`integer`|mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.| +|`name`|`string`|name of the artifact. must be unique within a template's inputs/outputs.| +|`optional`|`boolean`|Make Artifacts optional, if Artifacts doesn't generate or exist| +|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| +|`path`|`string`|Path is the container path to the artifact| +|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| +|`recurseMode`|`boolean`|If mode is set, apply the permission recursively into the artifact if it is a folder| +|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| +|`subPath`|`string`|SubPath allows an artifact to be sourced from a subpath within the specified source| + +## HTTPHeaderSource + +_No description available_ + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`secretKeyRef`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| + +## BasicAuth + +BasicAuth describes the secret selectors required for basic authentication + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| +|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| + +## ClientCertAuth + +ClientCertAuth holds necessary information for client authentication via certificates + +
+Examples with this field (click to open) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`clientCertSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| +|`clientKeySecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| + +## OAuth2Auth + +OAuth2Auth holds all information for client authentication via OAuth2 tokens + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`clientIDSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| +|`clientSecretSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| +|`endpointParams`|`Array<`[`OAuth2EndpointParam`](#oauth2endpointparam)`>`|_No description available_| +|`scopes`|`Array< string >`|_No description available_| +|`tokenURLSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| + +## OAuth2EndpointParam + +EndpointParam is for requesting optional fields that should be sent in the oauth request + +
+Examples with this field (click to open) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|Name is the header name| +|`value`|`string`|Value is the literal value to use for the header| + +# External Fields + + +## ObjectMeta + +ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`annotations`|`Map< string , string >`|Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations| +|`creationTimestamp`|[`Time`](#time)|CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| +|`deletionGracePeriodSeconds`|`integer`|Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.| +|`deletionTimestamp`|[`Time`](#time)|DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| +|`finalizers`|`Array< string >`|Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.| +|`generateName`|`string`|GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency| +|`generation`|`integer`|A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.| +|`labels`|`Map< string , string >`|Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels| +|`managedFields`|`Array<`[`ManagedFieldsEntry`](#managedfieldsentry)`>`|ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.| +|`name`|`string`|Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| +|`namespace`|`string`|Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces| +|`ownerReferences`|`Array<`[`OwnerReference`](#ownerreference)`>`|List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.| +|`resourceVersion`|`string`|An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency| +|`selfLink`|`string`|Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.| +|`uid`|`string`|UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| + +## Affinity + +Affinity is a group of affinity scheduling rules. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`nodeAffinity`|[`NodeAffinity`](#nodeaffinity)|Describes node affinity scheduling rules for the pod.| +|`podAffinity`|[`PodAffinity`](#podaffinity)|Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).| +|`podAntiAffinity`|[`PodAntiAffinity`](#podantiaffinity)|Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).| + +## PodDNSConfig + +PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy. + +
+Examples with this field (click to open) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`nameservers`|`Array< string >`|A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.| +|`options`|`Array<`[`PodDNSConfigOption`](#poddnsconfigoption)`>`|A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.| +|`searches`|`Array< string >`|A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.| + +## HostAlias + +HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`hostnames`|`Array< string >`|Hostnames for the above IP address.| +|`ip`|`string`|IP address of the host file entry.| + +## LocalObjectReference + +LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + +
+Examples with this field (click to open) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| + +## PodDisruptionBudgetSpec + +PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. + +
+Examples with this field (click to open) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`maxUnavailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".| +|`minAvailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".| +|`selector`|[`LabelSelector`](#labelselector)|Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.| +|`unhealthyPodEvictionPolicy`|`string`|UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type="Ready",status="True". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase="Running"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase="Running"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).| + +## PodSecurityContext + +PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext. + +
+Examples with this field (click to open) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`appArmorProfile`|[`AppArmorProfile`](#apparmorprofile)|appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.| +|`fsGroup`|`integer`|A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.| +|`fsGroupChangePolicy`|`string`|fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.| +|`runAsGroup`|`integer`|The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| +|`runAsNonRoot`|`boolean`|Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| +|`runAsUser`|`integer`|The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| +|`seLinuxOptions`|[`SELinuxOptions`](#selinuxoptions)|The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| +|`seccompProfile`|[`SeccompProfile`](#seccompprofile)|The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.| +|`supplementalGroups`|`Array< integer >`|A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.| +|`sysctls`|`Array<`[`Sysctl`](#sysctl)`>`|Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.| +|`windowsOptions`|[`WindowsSecurityContextOptions`](#windowssecuritycontextoptions)|The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.| + +## Toleration + +The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`effect`|`string`|Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.| +|`key`|`string`|Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.| +|`operator`|`string`|Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.| +|`tolerationSeconds`|`integer`|TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.| +|`value`|`string`|Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.| + +## PersistentVolumeClaim + +PersistentVolumeClaim is a user's request for and claim to a persistent volume + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources| +|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`metadata`|[`ObjectMeta`](#objectmeta)|Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| +|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`status`|[`PersistentVolumeClaimStatus`](#persistentvolumeclaimstatus)|status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| + +## Volume + +Volume represents a named volume in a pod that may be accessed by any container in the pod. + +
+Examples with this field (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`awsElasticBlockStore`|[`AWSElasticBlockStoreVolumeSource`](#awselasticblockstorevolumesource)|awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`azureDisk`|[`AzureDiskVolumeSource`](#azurediskvolumesource)|azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.| +|`azureFile`|[`AzureFileVolumeSource`](#azurefilevolumesource)|azureFile represents an Azure File Service mount on the host and bind mount to the pod.| +|`cephfs`|[`CephFSVolumeSource`](#cephfsvolumesource)|cephFS represents a Ceph FS mount on the host that shares a pod's lifetime| +|`cinder`|[`CinderVolumeSource`](#cindervolumesource)|cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`configMap`|[`ConfigMapVolumeSource`](#configmapvolumesource)|configMap represents a configMap that should populate this volume| +|`csi`|[`CSIVolumeSource`](#csivolumesource)|csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).| +|`downwardAPI`|[`DownwardAPIVolumeSource`](#downwardapivolumesource)|downwardAPI represents downward API about the pod that should populate this volume| +|`emptyDir`|[`EmptyDirVolumeSource`](#emptydirvolumesource)|emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| +|`ephemeral`|[`EphemeralVolumeSource`](#ephemeralvolumesource)|ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time.| +|`fc`|[`FCVolumeSource`](#fcvolumesource)|fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.| +|`flexVolume`|[`FlexVolumeSource`](#flexvolumesource)|flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.| +|`flocker`|[`FlockerVolumeSource`](#flockervolumesource)|flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running| +|`gcePersistentDisk`|[`GCEPersistentDiskVolumeSource`](#gcepersistentdiskvolumesource)|gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|~~`gitRepo`~~|~~[`GitRepoVolumeSource`](#gitrepovolumesource)~~|~~gitRepo represents a git repository at a particular revision.~~ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.| +|`glusterfs`|[`GlusterfsVolumeSource`](#glusterfsvolumesource)|glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md| +|`hostPath`|[`HostPathVolumeSource`](#hostpathvolumesource)|hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| +|`iscsi`|[`ISCSIVolumeSource`](#iscsivolumesource)|iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md| +|`name`|`string`|name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`nfs`|[`NFSVolumeSource`](#nfsvolumesource)|nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`persistentVolumeClaim`|[`PersistentVolumeClaimVolumeSource`](#persistentvolumeclaimvolumesource)|persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`photonPersistentDisk`|[`PhotonPersistentDiskVolumeSource`](#photonpersistentdiskvolumesource)|photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine| +|`portworxVolume`|[`PortworxVolumeSource`](#portworxvolumesource)|portworxVolume represents a portworx volume attached and mounted on kubelets host machine| +|`projected`|[`ProjectedVolumeSource`](#projectedvolumesource)|projected items for all in one resources secrets, configmaps, and downward API| +|`quobyte`|[`QuobyteVolumeSource`](#quobytevolumesource)|quobyte represents a Quobyte mount on the host that shares a pod's lifetime| +|`rbd`|[`RBDVolumeSource`](#rbdvolumesource)|rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md| +|`scaleIO`|[`ScaleIOVolumeSource`](#scaleiovolumesource)|scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.| +|`secret`|[`SecretVolumeSource`](#secretvolumesource)|secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| +|`storageos`|[`StorageOSVolumeSource`](#storageosvolumesource)|storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.| +|`vsphereVolume`|[`VsphereVirtualDiskVolumeSource`](#vspherevirtualdiskvolumesource)|vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine| + +## Time + +Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. + +## ObjectReference + +ObjectReference contains enough information to let you inspect or modify the referred object. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|API version of the referent.| +|`fieldPath`|`string`|If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.| +|`kind`|`string`|Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`namespace`|`string`|Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/| +|`resourceVersion`|`string`|Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency| +|`uid`|`string`|UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids| + +## LabelSelector + +A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + +
+Examples with this field (click to open) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`matchExpressions`|`Array<`[`LabelSelectorRequirement`](#labelselectorrequirement)`>`|matchExpressions is a list of label selector requirements. The requirements are ANDed.| +|`matchLabels`|`Map< string , string >`|matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.| + +## IntOrString + +_No description available_ + +
+Examples with this field (click to open) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) +
+ +## Container + +A single application container that you want to run within a pod. + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| +|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| +|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| +|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| +|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| +|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| +|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| +|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| +|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| +|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| +|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| +|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| +|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| +|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| +|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| +|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| +|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| +|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| + +## ConfigMapKeySelector + +Selects a key from a ConfigMap. + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|The key to select.| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|Specify whether the ConfigMap or its key must be defined| + +## VolumeMount + +VolumeMount describes a mounting of a Volume within a container. + +
+Examples with this field (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`mountPath`|`string`|Path within the container at which the volume should be mounted. Must not contain ':'.| +|`mountPropagation`|`string`|mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).| +|`name`|`string`|This must match the Name of a Volume.| +|`readOnly`|`boolean`|Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.| +|`recursiveReadOnly`|`string`|RecursiveReadOnly specifies whether read-only mounts should be handled recursively. If ReadOnly is false, this field has no meaning and must be unspecified. If ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason. If this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None). If this field is not specified, it is treated as an equivalent of Disabled.| +|`subPath`|`string`|Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).| +|`subPathExpr`|`string`|Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.| + +## EnvVar + +EnvVar represents an environment variable present in a Container. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name of the environment variable. Must be a C_IDENTIFIER.| +|`value`|`string`|Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".| +|`valueFrom`|[`EnvVarSource`](#envvarsource)|Source for the environment variable's value. Cannot be used if value is not empty.| + +## EnvFromSource + +EnvFromSource represents the source of a set of ConfigMaps + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMapRef`|[`ConfigMapEnvSource`](#configmapenvsource)|The ConfigMap to select from| +|`prefix`|`string`|An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.| +|`secretRef`|[`SecretEnvSource`](#secretenvsource)|The Secret to select from| + +## Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`postStart`|[`LifecycleHandler`](#lifecyclehandler)|PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks| +|`preStop`|[`LifecycleHandler`](#lifecyclehandler)|PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks| + +## Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`exec`|[`ExecAction`](#execaction)|Exec specifies the action to take.| +|`failureThreshold`|`integer`|Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.| +|`grpc`|[`GRPCAction`](#grpcaction)|GRPC specifies an action involving a GRPC port.| +|`httpGet`|[`HTTPGetAction`](#httpgetaction)|HTTPGet specifies the http request to perform.| +|`initialDelaySeconds`|`integer`|Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`periodSeconds`|`integer`|How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.| +|`successThreshold`|`integer`|Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.| +|`tcpSocket`|[`TCPSocketAction`](#tcpsocketaction)|TCPSocket specifies an action involving a TCP port.| +|`terminationGracePeriodSeconds`|`integer`|Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.| +|`timeoutSeconds`|`integer`|Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| + +## ContainerPort + +ContainerPort represents a network port in a single container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`containerPort`|`integer`|Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.| +|`hostIP`|`string`|What host IP to bind the external port to.| +|`hostPort`|`integer`|Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.| +|`name`|`string`|If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.| +|`protocol`|`string`|Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".| + +## ContainerResizePolicy + +ContainerResizePolicy represents resource resize policy for the container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`resourceName`|`string`|Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.| +|`restartPolicy`|`string`|Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.| + +## ResourceRequirements + +ResourceRequirements describes the compute resource requirements. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`claims`|`Array<`[`ResourceClaim`](#resourceclaim)`>`|Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers.| +|`limits`|[`Quantity`](#quantity)|Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| + +## SecurityContext + +SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. + +
+Examples with this field (click to open) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`allowPrivilegeEscalation`|`boolean`|AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.| +|`appArmorProfile`|[`AppArmorProfile`](#apparmorprofile)|appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.| +|`capabilities`|[`Capabilities`](#capabilities)|The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.| +|`privileged`|`boolean`|Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.| +|`procMount`|`string`|procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.| +|`readOnlyRootFilesystem`|`boolean`|Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.| +|`runAsGroup`|`integer`|The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| +|`runAsNonRoot`|`boolean`|Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| +|`runAsUser`|`integer`|The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| +|`seLinuxOptions`|[`SELinuxOptions`](#selinuxoptions)|The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| +|`seccompProfile`|[`SeccompProfile`](#seccompprofile)|The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.| +|`windowsOptions`|[`WindowsSecurityContextOptions`](#windowssecuritycontextoptions)|The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.| + +## VolumeDevice + +volumeDevice describes a mapping of a raw block device within a container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`devicePath`|`string`|devicePath is the path inside of the container that the device will be mapped to.| +|`name`|`string`|name must match the name of a persistentVolumeClaim in the pod| + +## SecretKeySelector + +SecretKeySelector selects a key of a Secret. + +
+Examples with this field (click to open) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|The key of the secret to select from. Must be a valid secret key.| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|Specify whether the Secret or its key must be defined| + +## ManagedFieldsEntry + +ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.| +|`fieldsType`|`string`|FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"| +|`fieldsV1`|[`FieldsV1`](#fieldsv1)|FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.| +|`manager`|`string`|Manager is an identifier of the workflow managing these fields.| +|`operation`|`string`|Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.| +|`subresource`|`string`|Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.| +|`time`|[`Time`](#time)|Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.| + +## OwnerReference + +OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|API version of the referent.| +|`blockOwnerDeletion`|`boolean`|If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.| +|`controller`|`boolean`|If true, this reference points to the managing controller.| +|`kind`|`string`|Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| +|`uid`|`string`|UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| + +## NodeAffinity + +Node affinity is a group of node affinity scheduling rules. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PreferredSchedulingTerm`](#preferredschedulingterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.| +|`requiredDuringSchedulingIgnoredDuringExecution`|[`NodeSelector`](#nodeselector)|If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.| + +## PodAffinity + +Pod affinity is a group of inter pod affinity scheduling rules. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`WeightedPodAffinityTerm`](#weightedpodaffinityterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.| +|`requiredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PodAffinityTerm`](#podaffinityterm)`>`|If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.| + +## PodAntiAffinity + +Pod anti affinity is a group of inter pod anti affinity scheduling rules. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`WeightedPodAffinityTerm`](#weightedpodaffinityterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.| +|`requiredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PodAffinityTerm`](#podaffinityterm)`>`|If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.| + +## PodDNSConfigOption + +PodDNSConfigOption defines DNS resolver options of a pod. + +
+Examples with this field (click to open) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Required.| +|`value`|`string`|_No description available_| + +## AppArmorProfile + +AppArmorProfile defines a pod or container's AppArmor settings. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`localhostProfile`|`string`|localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is "Localhost".| +|`type`|`string`|type indicates which kind of AppArmor profile will be applied. Valid options are: Localhost - a profile pre-loaded on the node. RuntimeDefault - the container runtime's default profile. Unconfined - no AppArmor enforcement.| + +## SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`level`|`string`|Level is SELinux level label that applies to the container.| +|`role`|`string`|Role is a SELinux role label that applies to the container.| +|`type`|`string`|Type is a SELinux type label that applies to the container.| +|`user`|`string`|User is a SELinux user label that applies to the container.| + +## SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`localhostProfile`|`string`|localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type.| +|`type`|`string`|type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.| + +## Sysctl + +Sysctl defines a kernel parameter to be set + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name of a property to set| +|`value`|`string`|Value of a property to set| + +## WindowsSecurityContextOptions + +WindowsSecurityContextOptions contain Windows-specific options and credentials. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`gmsaCredentialSpec`|`string`|GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.| +|`gmsaCredentialSpecName`|`string`|GMSACredentialSpecName is the name of the GMSA credential spec to use.| +|`hostProcess`|`boolean`|HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.| +|`runAsUserName`|`string`|The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| + +## PersistentVolumeClaimSpec + +PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + +
+Examples with this field (click to open) + +- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) + +- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) + +- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) + +- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) + +- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) + +- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) + +- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) + +- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) + +- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) + +- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) + +- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) + +- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) + +- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) + +- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) + +- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) + +- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) + +- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) + +- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) + +- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) + +- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) + +- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) + +- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) + +- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) + +- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) + +- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) + +- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) + +- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) + +- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) + +- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) + +- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) + +- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) + +- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) + +- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) + +- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) + +- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) + +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) + +- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) + +- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) + +- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) + +- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) + +- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) + +- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) + +- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) + +- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) + +- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) + +- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) + +- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) + +- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) + +- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) + +- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) + +- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) + +- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) + +- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) + +- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) + +- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) + +- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) + +- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) + +- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) + +- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) + +- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) + +- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) + +- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) + +- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) + +- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) + +- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) + +- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) + +- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) + +- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) + +- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) + +- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) + +- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) + +- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) + +- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) + +- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) + +- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) + +- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) + +- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) + +- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) + +- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) + +- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) + +- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) + +- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) + +- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) + +- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) + +- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) + +- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) + +- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) + +- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) + +- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) + +- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) + +- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) + +- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) + +- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + +- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) + +- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) + +- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) + +- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) + +- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) + +- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) + +- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) + +- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) + +- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) + +- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) + +- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) + +- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) + +- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) + +- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) + +- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) + +- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) + +- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) + +- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) + +- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) + +- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) + +- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessModes`|`Array< string >`|accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| +|`dataSource`|[`TypedLocalObjectReference`](#typedlocalobjectreference)|dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.| +|`dataSourceRef`|[`TypedObjectReference`](#typedobjectreference)|dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| +|`resources`|[`VolumeResourceRequirements`](#volumeresourcerequirements)|resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources| +|`selector`|[`LabelSelector`](#labelselector)|selector is a label query over volumes to consider for binding.| +|`storageClassName`|`string`|storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1| +|`volumeAttributesClassName`|`string`|volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.| +|`volumeMode`|`string`|volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.| +|`volumeName`|`string`|volumeName is the binding reference to the PersistentVolume backing this claim.| + +## PersistentVolumeClaimStatus + +PersistentVolumeClaimStatus is the current status of a persistent volume claim. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`accessModes`|`Array< string >`|accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| +|`allocatedResourceStatuses`|`Map< string , string >`|allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. ClaimResourceStatus can be in any of following states: - ControllerResizeInProgress: State set when resize controller starts resizing the volume in control-plane. - ControllerResizeFailed: State set when resize has failed in resize controller with a terminal error. - NodeResizePending: State set when resize controller has finished resizing the volume but further resizing of volume is needed on the node. - NodeResizeInProgress: State set when kubelet starts resizing the volume. - NodeResizeFailed: State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed. For example: if expanding a PVC for more capacity - this field can be one of the following states: - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" When this field is not set, it means that no resize operation is in progress for the given PVC. A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| +|`allocatedResources`|[`Quantity`](#quantity)|allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. Capacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. A controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| +|`capacity`|[`Quantity`](#quantity)|capacity represents the actual resources of the underlying volume.| +|`conditions`|`Array<`[`PersistentVolumeClaimCondition`](#persistentvolumeclaimcondition)`>`|conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.| +|`currentVolumeAttributesClassName`|`string`|currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.| +|`modifyVolumeStatus`|[`ModifyVolumeStatus`](#modifyvolumestatus)|ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.| +|`phase`|`string`|phase represents the current phase of PersistentVolumeClaim.| + +## AWSElasticBlockStoreVolumeSource + +Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).| +|`readOnly`|`boolean`|readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`volumeID`|`string`|volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| + +## AzureDiskVolumeSource + +AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`cachingMode`|`string`|cachingMode is the Host Caching mode: None, Read Only, Read Write.| +|`diskName`|`string`|diskName is the Name of the data disk in the blob storage| +|`diskURI`|`string`|diskURI is the URI of data disk in the blob storage| +|`fsType`|`string`|fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`kind`|`string`|kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared| +|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| + +## AzureFileVolumeSource + +AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretName`|`string`|secretName is the name of secret that contains Azure Storage Account Name and Key| +|`shareName`|`string`|shareName is the azure share Name| + +## CephFSVolumeSource + +Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`monitors`|`Array< string >`|monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`path`|`string`|path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /| +|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`secretFile`|`string`|secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`user`|`string`|user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| + +## CinderVolumeSource + +Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.| +|`volumeID`|`string`|volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| + +## ConfigMapVolumeSource + +Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling. + +
+Examples with this field (click to open) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`defaultMode`|`integer`|defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| + +## CSIVolumeSource + +Represents a source location of a volume to mount, managed by an external CSI driver + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`driver`|`string`|driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.| +|`fsType`|`string`|fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.| +|`nodePublishSecretRef`|[`LocalObjectReference`](#localobjectreference)|nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.| +|`readOnly`|`boolean`|readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).| +|`volumeAttributes`|`Map< string , string >`|volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.| + +## DownwardAPIVolumeSource + +DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`defaultMode`|`integer`|Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`items`|`Array<`[`DownwardAPIVolumeFile`](#downwardapivolumefile)`>`|Items is a list of downward API volume file| + +## EmptyDirVolumeSource + +Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling. + +
+Examples with this field (click to open) + +- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) + +- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`medium`|`string`|medium represents what type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| +|`sizeLimit`|[`Quantity`](#quantity)|sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| + +## EphemeralVolumeSource + +Represents an ephemeral volume that is handled by a normal storage driver. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`volumeClaimTemplate`|[`PersistentVolumeClaimTemplate`](#persistentvolumeclaimtemplate)|Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. Required, must not be nil.| + +## FCVolumeSource + +Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`lun`|`integer`|lun is Optional: FC target lun number| +|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`targetWWNs`|`Array< string >`|targetWWNs is Optional: FC target worldwide names (WWNs)| +|`wwids`|`Array< string >`|wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.| + +## FlexVolumeSource + +FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`driver`|`string`|driver is the name of the driver to use for this volume.| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.| +|`options`|`Map< string , string >`|options is Optional: this field holds extra command options if any.| +|`readOnly`|`boolean`|readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.| + +## FlockerVolumeSource + +Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`datasetName`|`string`|datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated| +|`datasetUUID`|`string`|datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset| + +## GCEPersistentDiskVolumeSource + +Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`pdName`|`string`|pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| + +## GitRepoVolumeSource + +Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`directory`|`string`|directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.| +|`repository`|`string`|repository is the URL| +|`revision`|`string`|revision is the commit hash for the specified revision.| + +## GlusterfsVolumeSource + +Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`endpoints`|`string`|endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| +|`path`|`string`|path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| +|`readOnly`|`boolean`|readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| + +## HostPathVolumeSource + +Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`path`|`string`|path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| +|`type`|`string`|type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| + +## ISCSIVolumeSource + +Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`chapAuthDiscovery`|`boolean`|chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication| +|`chapAuthSession`|`boolean`|chapAuthSession defines whether support iSCSI Session CHAP authentication| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi| +|`initiatorName`|`string`|initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.| +|`iqn`|`string`|iqn is the target iSCSI Qualified Name.| +|`iscsiInterface`|`string`|iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).| +|`lun`|`integer`|lun represents iSCSI Target Lun number.| +|`portals`|`Array< string >`|portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is the CHAP Secret for iSCSI target and initiator authentication| +|`targetPortal`|`string`|targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| + +## NFSVolumeSource + +Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`path`|`string`|path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`readOnly`|`boolean`|readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`server`|`string`|server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| + +## PersistentVolumeClaimVolumeSource + +PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system). + +
+Examples with this field (click to open) + +- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`claimName`|`string`|claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`readOnly`|`boolean`|readOnly Will force the ReadOnly setting in VolumeMounts. Default false.| + +## PhotonPersistentDiskVolumeSource + +Represents a Photon Controller persistent disk resource. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`pdID`|`string`|pdID is the ID that identifies Photon Controller persistent disk| + +## PortworxVolumeSource + +PortworxVolumeSource represents a Portworx volume resource. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`volumeID`|`string`|volumeID uniquely identifies a Portworx volume| + +## ProjectedVolumeSource + +Represents a projected volume source + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`defaultMode`|`integer`|defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`sources`|`Array<`[`VolumeProjection`](#volumeprojection)`>`|sources is the list of volume projections| + +## QuobyteVolumeSource + +Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`group`|`string`|group to map volume access to Default is no group| +|`readOnly`|`boolean`|readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.| +|`registry`|`string`|registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes| +|`tenant`|`string`|tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin| +|`user`|`string`|user to map volume access to Defaults to serivceaccount user| +|`volume`|`string`|volume is a string that references an already created Quobyte volume by name.| + +## RBDVolumeSource + +Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd| +|`image`|`string`|image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`keyring`|`string`|keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`monitors`|`Array< string >`|monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`pool`|`string`|pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`user`|`string`|user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| + +## ScaleIOVolumeSource + +ScaleIOVolumeSource represents a persistent ScaleIO volume + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".| +|`gateway`|`string`|gateway is the host address of the ScaleIO API Gateway.| +|`protectionDomain`|`string`|protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.| +|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.| +|`sslEnabled`|`boolean`|sslEnabled Flag enable/disable SSL communication with Gateway, default false| +|`storageMode`|`string`|storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.| +|`storagePool`|`string`|storagePool is the ScaleIO Storage Pool associated with the protection domain.| +|`system`|`string`|system is the name of the storage system as configured in ScaleIO.| +|`volumeName`|`string`|volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.| + +## SecretVolumeSource + +Adapts a Secret into a volume. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`defaultMode`|`integer`|defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`optional`|`boolean`|optional field specify whether the Secret or its keys must be defined| +|`secretName`|`string`|secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| + +## StorageOSVolumeSource + +Represents a StorageOS persistent volume resource. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.| +|`volumeName`|`string`|volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.| +|`volumeNamespace`|`string`|volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.| + +## VsphereVirtualDiskVolumeSource + +Represents a vSphere volume resource. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fsType`|`string`|fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`storagePolicyID`|`string`|storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.| +|`storagePolicyName`|`string`|storagePolicyName is the storage Policy Based Management (SPBM) profile name.| +|`volumePath`|`string`|volumePath is the path that identifies vSphere volume vmdk| + +## LabelSelectorRequirement + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|key is the label key that the selector applies to.| +|`operator`|`string`|operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.| +|`values`|`Array< string >`|values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.| + +## EnvVarSource + +EnvVarSource represents a source for the value of an EnvVar. + +
+Examples with this field (click to open) + +- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) + +- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) + +- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) + +- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) + +- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) + +- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) + +- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) + +- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) + +- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) + +- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) + +- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) + +- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) + +- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) + +- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) + +- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) + +- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) + +- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) + +- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) + +- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) + +- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|Selects a key of a ConfigMap.| +|`fieldRef`|[`ObjectFieldSelector`](#objectfieldselector)|Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.| +|`resourceFieldRef`|[`ResourceFieldSelector`](#resourcefieldselector)|Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.| +|`secretKeyRef`|[`SecretKeySelector`](#secretkeyselector)|Selects a key of a secret in the pod's namespace| + +## ConfigMapEnvSource + +ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|Specify whether the ConfigMap must be defined| + +## SecretEnvSource + +SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|Specify whether the Secret must be defined| + +## LifecycleHandler + +LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`exec`|[`ExecAction`](#execaction)|Exec specifies the action to take.| +|`httpGet`|[`HTTPGetAction`](#httpgetaction)|HTTPGet specifies the http request to perform.| +|`sleep`|[`SleepAction`](#sleepaction)|Sleep represents the duration that the container should sleep before being terminated.| +|`tcpSocket`|[`TCPSocketAction`](#tcpsocketaction)|Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.| + +## ExecAction + +ExecAction describes a "run in container" action. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`command`|`Array< string >`|Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.| + +## GRPCAction + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`port`|`integer`|Port number of the gRPC service. Number must be in the range 1 to 65535.| +|`service`|`string`|Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC.| + +## HTTPGetAction + +HTTPGetAction describes an action based on HTTP Get requests. + +
+Examples with this field (click to open) + +- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) + +- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) + +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`host`|`string`|Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.| +|`httpHeaders`|`Array<`[`HTTPHeader`](#httpheader)`>`|Custom headers to set in the request. HTTP allows repeated headers.| +|`path`|`string`|Path to access on the HTTP server.| +|`port`|[`IntOrString`](#intorstring)|Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| +|`scheme`|`string`|Scheme to use for connecting to the host. Defaults to HTTP.| + +## TCPSocketAction + +TCPSocketAction describes an action based on opening a socket + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`host`|`string`|Optional: Host name to connect to, defaults to the pod IP.| +|`port`|[`IntOrString`](#intorstring)|Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| + +## ResourceClaim + +ResourceClaim references one entry in PodSpec.ResourceClaims. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.| + +## Quantity + +Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the "" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | "" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= "e" | "E" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as "1500m" - 1.5Gi will be serialized as "1536Mi" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. + +
+Examples with this field (click to open) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) +
+ +## Capabilities + +Adds and removes POSIX capabilities from running containers. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`add`|`Array< string >`|Added capabilities| +|`drop`|`Array< string >`|Removed capabilities| + +## FieldsV1 + +FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. The exact format is defined in sigs.k8s.io/structured-merge-diff + +## PreferredSchedulingTerm + +An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`preference`|[`NodeSelectorTerm`](#nodeselectorterm)|A node selector term, associated with the corresponding weight.| +|`weight`|`integer`|Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.| + +## NodeSelector + +A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`nodeSelectorTerms`|`Array<`[`NodeSelectorTerm`](#nodeselectorterm)`>`|Required. A list of node selector terms. The terms are ORed.| + +## WeightedPodAffinityTerm + +The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`podAffinityTerm`|[`PodAffinityTerm`](#podaffinityterm)|Required. A pod affinity term, associated with the corresponding weight.| +|`weight`|`integer`|weight associated with matching the corresponding podAffinityTerm, in the range 1-100.| + +## PodAffinityTerm + +Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`labelSelector`|[`LabelSelector`](#labelselector)|A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.| +|`matchLabelKeys`|`Array< string >`|MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.| +|`mismatchLabelKeys`|`Array< string >`|MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.| +|`namespaceSelector`|[`LabelSelector`](#labelselector)|A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.| +|`namespaces`|`Array< string >`|namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".| +|`topologyKey`|`string`|This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.| + +## TypedLocalObjectReference + +TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiGroup`|`string`|APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.| +|`kind`|`string`|Kind is the type of resource being referenced| +|`name`|`string`|Name is the name of resource being referenced| + +## TypedObjectReference + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiGroup`|`string`|APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.| +|`kind`|`string`|Kind is the type of resource being referenced| +|`name`|`string`|Name is the name of resource being referenced| +|`namespace`|`string`|Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| + +## VolumeResourceRequirements + +VolumeResourceRequirements describes the storage resource requirements for a volume. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) + +- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) + +- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) + +- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) + +- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) + +- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) + +- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) + +- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) + +- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`limits`|[`Quantity`](#quantity)|Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| + +## PersistentVolumeClaimCondition + +PersistentVolumeClaimCondition contains details about state of pvc + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`lastProbeTime`|[`Time`](#time)|lastProbeTime is the time we probed the condition.| +|`lastTransitionTime`|[`Time`](#time)|lastTransitionTime is the time the condition transitioned from one status to another.| +|`message`|`string`|message is the human-readable message indicating details about last transition.| +|`reason`|`string`|reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "Resizing" that means the underlying persistent volume is being resized.| +|`status`|`string`|_No description available_| +|`type`|`string`|_No description available_| + +## ModifyVolumeStatus + +ModifyVolumeStatus represents the status object of ControllerModifyVolume operation + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`status`|`string`|status is the status of the ControllerModifyVolume operation. It can be in any of following states: - Pending Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as the specified VolumeAttributesClass not existing. - InProgress InProgress indicates that the volume is being modified. - Infeasible Infeasible indicates that the request has been rejected as invalid by the CSI driver. To resolve the error, a valid VolumeAttributesClass needs to be specified. Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.| +|`targetVolumeAttributesClassName`|`string`|targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled| + +## KeyToPath + +Maps a string key to a path within a volume. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|key is the key to project.| +|`mode`|`integer`|mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`path`|`string`|path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.| + +## DownwardAPIVolumeFile + +DownwardAPIVolumeFile represents information to create the file containing the pod field + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`fieldRef`|[`ObjectFieldSelector`](#objectfieldselector)|Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.| +|`mode`|`integer`|Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`path`|`string`|Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'| +|`resourceFieldRef`|[`ResourceFieldSelector`](#resourcefieldselector)|Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.| + +## PersistentVolumeClaimTemplate + +PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`metadata`|[`ObjectMeta`](#objectmeta)|May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.| +|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.| + +## VolumeProjection + +Projection that may be projected along with other supported volume types + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`clusterTrustBundle`|[`ClusterTrustBundleProjection`](#clustertrustbundleprojection)|ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. Alpha, gated by the ClusterTrustBundleProjection feature gate. ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.| +|`configMap`|[`ConfigMapProjection`](#configmapprojection)|configMap information about the configMap data to project| +|`downwardAPI`|[`DownwardAPIProjection`](#downwardapiprojection)|downwardAPI information about the downwardAPI data to project| +|`secret`|[`SecretProjection`](#secretprojection)|secret information about the secret data to project| +|`serviceAccountToken`|[`ServiceAccountTokenProjection`](#serviceaccounttokenprojection)|serviceAccountToken is information about the serviceAccountToken data to project| + +## ObjectFieldSelector + +ObjectFieldSelector selects an APIVersioned field of an object. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiVersion`|`string`|Version of the schema the FieldPath is written in terms of, defaults to "v1".| +|`fieldPath`|`string`|Path of the field to select in the specified API version.| + +## ResourceFieldSelector + +ResourceFieldSelector represents container resources (cpu, memory) and their output format + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`containerName`|`string`|Container name: required for volumes, optional for env vars| +|`divisor`|[`Quantity`](#quantity)|Specifies the output format of the exposed resources, defaults to "1"| +|`resource`|`string`|Required: resource to select| + +## SleepAction + +SleepAction describes a "sleep" action. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`seconds`|`integer`|Seconds is the number of seconds to sleep.| + +## HTTPHeader + +HTTPHeader describes a custom header to be used in HTTP probes + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.| +|`value`|`string`|The header field value| + +## NodeSelectorTerm + +A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`matchExpressions`|`Array<`[`NodeSelectorRequirement`](#nodeselectorrequirement)`>`|A list of node selector requirements by node's labels.| +|`matchFields`|`Array<`[`NodeSelectorRequirement`](#nodeselectorrequirement)`>`|A list of node selector requirements by node's fields.| + +## ClusterTrustBundleProjection + +ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`labelSelector`|[`LabelSelector`](#labelselector)|Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything".| +|`name`|`string`|Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.| +|`optional`|`boolean`|If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.| +|`path`|`string`|Relative path from the volume root to write the bundle.| +|`signerName`|`string`|Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.| + +## ConfigMapProjection + +Adapts a ConfigMap into a projected volume. The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode. + +
+Examples with this field (click to open) + +- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| + +## DownwardAPIProjection + +Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`items`|`Array<`[`DownwardAPIVolumeFile`](#downwardapivolumefile)`>`|Items is a list of DownwardAPIVolume file| + +## SecretProjection + +Adapts a secret into a projected volume. The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode. + +
+Examples with this field (click to open) + +- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) + +- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) +
+ +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`optional`|`boolean`|optional field specify whether the Secret or its key must be defined| + +## ServiceAccountTokenProjection + +ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise). + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`audience`|`string`|audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.| +|`expirationSeconds`|`integer`|expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.| +|`path`|`string`|path is the path relative to the mount point of the file to project the token into.| + +## NodeSelectorRequirement + +A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`key`|`string`|The label key that the selector applies to.| +|`operator`|`string`|Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.| +|`values`|`Array< string >`|An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.| From 2ef248d7b97de30fafbb6cc339e4123d0306475a Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 17:41:45 +0300 Subject: [PATCH 13/50] fix(docs): codegen Signed-off-by: MenD32 --- Makefile | 8115 +++++------------------------------------------- docs/fields.md | 86 + 2 files changed, 861 insertions(+), 7340 deletions(-) diff --git a/Makefile b/Makefile index 91abfa490597..14be3b0849c7 100644 --- a/Makefile +++ b/Makefile @@ -1,7340 +1,775 @@ -# Field Reference - -## Workflow - -Workflow is the definition of a workflow resource - -
-Examples (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`daemoned-stateful-set-with-service.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemoned-stateful-set-with-service.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-jobs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-jobs.yaml) - -- [`k8s-orchestration.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-orchestration.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-patch-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-pod.yaml) - -- [`k8s-resource-log-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-resource-log-selector.yaml) - -- [`k8s-set-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-set-owner-reference.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resource-delete-with-flags.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resource-delete-with-flags.yaml) - -- [`resource-flags.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resource-flags.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| -|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| -|`spec`|[`WorkflowSpec`](#workflowspec)|_No description available_| -|`status`|[`WorkflowStatus`](#workflowstatus)|_No description available_| - -## CronWorkflow - -CronWorkflow is the definition of a scheduled workflow resource - -
-Examples (click to open) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| -|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| -|`spec`|[`CronWorkflowSpec`](#cronworkflowspec)|_No description available_| -|`status`|[`CronWorkflowStatus`](#cronworkflowstatus)|_No description available_| - -## WorkflowTemplate - -WorkflowTemplate is the definition of a workflow template resource - -
-Examples (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources| -|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_| -|`spec`|[`WorkflowSpec`](#workflowspec)|_No description available_| - -## WorkflowSpec - -WorkflowSpec is the specification of a Workflow. - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`activeDeadlineSeconds`|`integer`|Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the io.argoproj.workflow.v1alpha1. A value of zero is used to terminate a Running workflow| -|`affinity`|[`Affinity`](#affinity)|Affinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template| -|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| -|`arguments`|[`Arguments`](#arguments)|Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}| -|`artifactGC`|[`WorkflowLevelArtifactGC`](#workflowlevelartifactgc)|ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)| -|`artifactRepositoryRef`|[`ArtifactRepositoryRef`](#artifactrepositoryref)|ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.| -|`automountServiceAccountToken`|`boolean`|AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.| -|`dnsConfig`|[`PodDNSConfig`](#poddnsconfig)|PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.| -|`dnsPolicy`|`string`|Set DNS policy for workflow pods. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.| -|`entrypoint`|`string`|Entrypoint is a template reference to the starting point of the io.argoproj.workflow.v1alpha1.| -|`executor`|[`ExecutorConfig`](#executorconfig)|Executor holds configurations of executor containers of the io.argoproj.workflow.v1alpha1.| -|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step| -|`hostAliases`|`Array<`[`HostAlias`](#hostalias)`>`|_No description available_| -|`hostNetwork`|`boolean`|Host networking requested for this workflow pod. Default to false.| -|`imagePullSecrets`|`Array<`[`LocalObjectReference`](#localobjectreference)`>`|ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod| -|`metrics`|[`Metrics`](#metrics)|Metrics are a list of metrics emitted from this Workflow| -|`nodeSelector`|`Map< string , string >`|NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.| -|`onExit`|`string`|OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary io.argoproj.workflow.v1alpha1.| -|`parallelism`|`integer`|Parallelism limits the max total parallel pods that can execute at the same time in a workflow| -|`podDisruptionBudget`|[`PodDisruptionBudgetSpec`](#poddisruptionbudgetspec)|PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.| -|`podGC`|[`PodGC`](#podgc)|PodGC describes the strategy to use when deleting completed pods| -|`podMetadata`|[`Metadata`](#metadata)|PodMetadata defines additional metadata that should be applied to workflow pods| -|~~`podPriority`~~|~~`integer`~~|~~Priority to apply to workflow pods.~~ DEPRECATED: Use PodPriorityClassName instead.| -|`podPriorityClassName`|`string`|PriorityClassName to apply to workflow pods.| -|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).| -|`priority`|`integer`|Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.| -|`retryStrategy`|[`RetryStrategy`](#retrystrategy)|RetryStrategy for all templates in the io.argoproj.workflow.v1alpha1.| -|`schedulerName`|`string`|Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.| -|`securityContext`|[`PodSecurityContext`](#podsecuritycontext)|SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.| -|`serviceAccountName`|`string`|ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.| -|`shutdown`|`string`|Shutdown will shutdown the workflow according to its ShutdownStrategy| -|`suspend`|`boolean`|Suspend will suspend the workflow and prevent execution of any future steps in the workflow| -|`synchronization`|[`Synchronization`](#synchronization)|Synchronization holds synchronization lock configuration for this Workflow| -|`templateDefaults`|[`Template`](#template)|TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level| -|`templates`|`Array<`[`Template`](#template)`>`|Templates is a list of workflow templates used in a workflow| -|`tolerations`|`Array<`[`Toleration`](#toleration)`>`|Tolerations to apply to workflow pods.| -|`ttlStrategy`|[`TTLStrategy`](#ttlstrategy)|TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.| -|`volumeClaimGC`|[`VolumeClaimGC`](#volumeclaimgc)|VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows| -|`volumeClaimTemplates`|`Array<`[`PersistentVolumeClaim`](#persistentvolumeclaim)`>`|VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow| -|`volumes`|`Array<`[`Volume`](#volume)`>`|Volumes is a list of volumes that can be mounted by containers in a io.argoproj.workflow.v1alpha1.| -|`workflowMetadata`|[`WorkflowMetadata`](#workflowmetadata)|WorkflowMetadata contains some metadata of the workflow to refer to| -|`workflowTemplateRef`|[`WorkflowTemplateRef`](#workflowtemplateref)|WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution| - -## WorkflowStatus - -WorkflowStatus contains overall status information about a workflow - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifactGCStatus`|[`ArtGCStatus`](#artgcstatus)|ArtifactGCStatus maintains the status of Artifact Garbage Collection| -|`artifactRepositoryRef`|[`ArtifactRepositoryRefStatus`](#artifactrepositoryrefstatus)|ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.| -|`compressedNodes`|`string`|Compressed and base64 decoded Nodes map| -|`conditions`|`Array<`[`Condition`](#condition)`>`|Conditions is a list of conditions the Workflow may have| -|`estimatedDuration`|`integer`|EstimatedDuration in seconds.| -|`finishedAt`|[`Time`](#time)|Time at which this workflow completed| -|`message`|`string`|A human readable message indicating details about why the workflow is in this condition.| -|`nodes`|[`NodeStatus`](#nodestatus)|Nodes is a mapping between a node ID and the node's status.| -|`offloadNodeStatusVersion`|`string`|Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.| -|`outputs`|[`Outputs`](#outputs)|Outputs captures output values and artifact locations produced by the workflow via global outputs| -|`persistentVolumeClaims`|`Array<`[`Volume`](#volume)`>`|PersistentVolumeClaims tracks all PVCs that were created as part of the io.argoproj.workflow.v1alpha1. The contents of this list are drained at the end of the workflow.| -|`phase`|`string`|Phase a simple, high-level summary of where the workflow is in its lifecycle. Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", "Failed" or "Error" once the workflow has completed.| -|`progress`|`string`|Progress to completion| -|`resourcesDuration`|`Map< integer , int64 >`|ResourcesDuration is the total for the workflow| -|`startedAt`|[`Time`](#time)|Time at which this workflow started| -|`storedTemplates`|[`Template`](#template)|StoredTemplates is a mapping between a template ref and the node's status.| -|`storedWorkflowTemplateSpec`|[`WorkflowSpec`](#workflowspec)|StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.| -|`synchronization`|[`SynchronizationStatus`](#synchronizationstatus)|Synchronization stores the status of synchronization locks| -|`taskResultsCompletionStatus`|`Map< boolean , string >`|TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection.| - -## CronWorkflowSpec - -CronWorkflowSpec is the specification of a CronWorkflow - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`concurrencyPolicy`|`string`|ConcurrencyPolicy is the K8s-style concurrency policy that will be used| -|`failedJobsHistoryLimit`|`integer`|FailedJobsHistoryLimit is the number of failed jobs to be kept at a time| -|`schedule`|`string`|Schedule is a schedule to run the Workflow in Cron format. Deprecated, use Schedules| -|`schedules`|`Array< string >`|v3.6 and after: Schedules is a list of schedules to run the Workflow in Cron format| -|`startingDeadlineSeconds`|`integer`|StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.| -|`stopStrategy`|[`StopStrategy`](#stopstrategy)|v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition| -|`successfulJobsHistoryLimit`|`integer`|SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time| -|`suspend`|`boolean`|Suspend is a flag that will stop new CronWorkflows from running if set to true| -|`timezone`|`string`|Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time.| -|`when`|`string`|v3.6 and after: When is an expression that determines if a run should be scheduled.| -|`workflowMetadata`|[`ObjectMeta`](#objectmeta)|WorkflowMetadata contains some metadata of the workflow to be run| -|`workflowSpec`|[`WorkflowSpec`](#workflowspec)|WorkflowSpec is the spec of the workflow to be run| - -## CronWorkflowStatus - -CronWorkflowStatus is the status of a CronWorkflow - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`active`|`Array<`[`ObjectReference`](#objectreference)`>`|Active is a list of active workflows stemming from this CronWorkflow| -|`conditions`|`Array<`[`Condition`](#condition)`>`|Conditions is a list of conditions the CronWorkflow may have| -|`failed`|`integer`|v3.6 and after: Failed counts how many times child workflows failed| -|`lastScheduledTime`|[`Time`](#time)|LastScheduleTime is the last time the CronWorkflow was scheduled| -|`phase`|`string`|v3.6 and after: Phase is an enum of Active or Stopped. It changes to Stopped when stopStrategy.condition is true| -|`succeeded`|`integer`|v3.6 and after: Succeeded counts how many times child workflows succeeded| - -## Arguments - -Arguments to a template - -
-Examples with this field (click to open) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifacts is the list of artifacts to pass to the template or workflow| -|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters is the list of parameters to pass to the template or workflow| - -## WorkflowLevelArtifactGC - -WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level - -
-Examples with this field (click to open) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`forceFinalizerRemoval`|`boolean`|ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails| -|`podMetadata`|[`Metadata`](#metadata)|PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion| -|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the artgc pod spec.| -|`serviceAccountName`|`string`|ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion| -|`strategy`|`string`|Strategy is the strategy to use.| - -## ArtifactRepositoryRef - -_No description available_ - -
-Examples with this field (click to open) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMap`|`string`|The name of the config map. Defaults to "artifact-repositories".| -|`key`|`string`|The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.| - -## ExecutorConfig - -ExecutorConfig holds configurations of an executor container. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`serviceAccountName`|`string`|ServiceAccountName specifies the service account name of the executor container.| - -## LifecycleHook - -_No description available_ - -
-Examples with this field (click to open) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`arguments`|[`Arguments`](#arguments)|Arguments hold arguments to the template| -|`expression`|`string`|Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored| -|`template`|`string`|Template is the name of the template to execute by the hook| -|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute by the hook| - -## Metrics - -Metrics are a list of metrics emitted from a Workflow/Template - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`prometheus`|`Array<`[`Prometheus`](#prometheus)`>`|Prometheus is a list of prometheus metrics to be emitted| - -## PodGC - -PodGC describes how to delete completed pods as they complete - -
-Examples with this field (click to open) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`deleteDelayDuration`|`string`|DeleteDelayDuration specifies the duration before pods in the GC queue get deleted.| -|`labelSelector`|[`LabelSelector`](#labelselector)|LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.| -|`strategy`|`string`|Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods| - -## Metadata - -Pod metdata - -
-Examples with this field (click to open) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`annotations`|`Map< string , string >`|_No description available_| -|`labels`|`Map< string , string >`|_No description available_| - -## RetryStrategy - -RetryStrategy provides controls on how to retry a workflow step - -
-Examples with this field (click to open) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`affinity`|[`RetryAffinity`](#retryaffinity)|Affinity prevents running workflow's step on the same host| -|`backoff`|[`Backoff`](#backoff)|Backoff is a backoff strategy| -|`expression`|`string`|Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored| -|`limit`|[`IntOrString`](#intorstring)|Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.| -|`retryPolicy`|`string`|RetryPolicy is a policy of NodePhase statuses that will be retried| - -## Synchronization - -Synchronization holds synchronization lock configuration - -
-Examples with this field (click to open) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`mutex`|[`Mutex`](#mutex)|Mutex holds the Mutex lock details - deprecated, use mutexes instead| -|`mutexes`|`Array<`[`Mutex`](#mutex)`>`|v3.6 and after: Mutexes holds the list of Mutex lock details| -|`semaphore`|[`SemaphoreRef`](#semaphoreref)|Semaphore holds the Semaphore configuration - deprecated, use semaphores instead| -|`semaphores`|`Array<`[`SemaphoreRef`](#semaphoreref)`>`|v3.6 and after: Semaphores holds the list of Semaphores configuration| - -## Template - -Template is a reusable and composable unit of execution in a workflow - -
-Examples with this field (click to open) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`activeDeadlineSeconds`|[`IntOrString`](#intorstring)|Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.| -|`affinity`|[`Affinity`](#affinity)|Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)| -|`archiveLocation`|[`ArtifactLocation`](#artifactlocation)|Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.| -|`automountServiceAccountToken`|`boolean`|AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.| -|`container`|[`Container`](#container)|Container is the main container image to run in the pod| -|`containerSet`|[`ContainerSetTemplate`](#containersettemplate)|ContainerSet groups multiple containers within a single pod.| -|`daemon`|`boolean`|Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness| -|`dag`|[`DAGTemplate`](#dagtemplate)|DAG template subtype which runs a DAG| -|`data`|[`Data`](#data)|Data is a data template| -|`executor`|[`ExecutorConfig`](#executorconfig)|Executor holds configurations of the executor container.| -|`failFast`|`boolean`|FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with `withItems`, etc.| -|`hostAliases`|`Array<`[`HostAlias`](#hostalias)`>`|HostAliases is an optional list of hosts and IPs that will be injected into the pod spec| -|`http`|[`HTTP`](#http)|HTTP makes a HTTP request| -|`initContainers`|`Array<`[`UserContainer`](#usercontainer)`>`|InitContainers is a list of containers which run before the main container.| -|`inputs`|[`Inputs`](#inputs)|Inputs describe what inputs parameters and artifacts are supplied to this template| -|`memoize`|[`Memoize`](#memoize)|Memoize allows templates to use outputs generated from already executed templates| -|`metadata`|[`Metadata`](#metadata)|Metdata sets the pods's metadata, i.e. annotations and labels| -|`metrics`|[`Metrics`](#metrics)|Metrics are a list of metrics emitted from this template| -|`name`|`string`|Name is the name of the template| -|`nodeSelector`|`Map< string , string >`|NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.| -|`outputs`|[`Outputs`](#outputs)|Outputs describe the parameters and artifacts that this template produces| -|`parallelism`|`integer`|Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.| -|`plugin`|[`Plugin`](#plugin)|Plugin is a plugin template| -|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).| -|`priority`|`integer`|Priority to apply to workflow pods.| -|`priorityClassName`|`string`|PriorityClassName to apply to workflow pods.| -|`resource`|[`ResourceTemplate`](#resourcetemplate)|Resource template subtype which can run k8s resources| -|`retryStrategy`|[`RetryStrategy`](#retrystrategy)|RetryStrategy describes how to retry a template when it fails| -|`schedulerName`|`string`|If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.| -|`script`|[`ScriptTemplate`](#scripttemplate)|Script runs a portion of code against an interpreter| -|`securityContext`|[`PodSecurityContext`](#podsecuritycontext)|SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.| -|`serviceAccountName`|`string`|ServiceAccountName to apply to workflow pods| -|`sidecars`|`Array<`[`UserContainer`](#usercontainer)`>`|Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes| -|`steps`|`Array>`|Steps define a series of sequential/parallel workflow steps| -|`suspend`|[`SuspendTemplate`](#suspendtemplate)|Suspend template subtype which can suspend a workflow when reaching the step| -|`synchronization`|[`Synchronization`](#synchronization)|Synchronization holds synchronization lock configuration for this template| -|`timeout`|`string`|Timeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.| -|`tolerations`|`Array<`[`Toleration`](#toleration)`>`|Tolerations to apply to workflow pods.| -|`volumes`|`Array<`[`Volume`](#volume)`>`|Volumes is a list of volumes that can be mounted by containers in a template.| - -## TTLStrategy - -TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed - -
-Examples with this field (click to open) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`secondsAfterCompletion`|`integer`|SecondsAfterCompletion is the number of seconds to live after completion| -|`secondsAfterFailure`|`integer`|SecondsAfterFailure is the number of seconds to live after failure| -|`secondsAfterSuccess`|`integer`|SecondsAfterSuccess is the number of seconds to live after success| - -## VolumeClaimGC - -VolumeClaimGC describes how to delete volumes from completed Workflows - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`strategy`|`string`|Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess"| - -## WorkflowMetadata - -_No description available_ - -
-Examples with this field (click to open) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`annotations`|`Map< string , string >`|_No description available_| -|`labels`|`Map< string , string >`|_No description available_| -|`labelsFrom`|[`LabelValueFrom`](#labelvaluefrom)|_No description available_| - -## WorkflowTemplateRef - -WorkflowTemplateRef is a reference to a WorkflowTemplate resource. - -
-Examples with this field (click to open) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`clusterScope`|`boolean`|ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).| -|`name`|`string`|Name is the resource name of the workflow template.| - -## ArtGCStatus - -ArtGCStatus maintains state related to ArtifactGC - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`notSpecified`|`boolean`|if this is true, we already checked to see if we need to do it and we don't| -|`podsRecouped`|`Map< boolean , string >`|have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once| -|`strategiesProcessed`|`Map< boolean , string >`|have Pods been started to perform this strategy? (enables us not to re-process what we've already done)| - -## ArtifactRepositoryRefStatus - -_No description available_ - -
-Examples with this field (click to open) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifactRepository`|[`ArtifactRepository`](#artifactrepository)|The repository the workflow will use. This maybe empty before v3.1.| -|`configMap`|`string`|The name of the config map. Defaults to "artifact-repositories".| -|`default`|`boolean`|If this ref represents the default artifact repository, rather than a config map.| -|`key`|`string`|The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.| -|`namespace`|`string`|The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).| - -## Condition - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`message`|`string`|Message is the condition message| -|`status`|`string`|Status is the status of the condition| -|`type`|`string`|Type is the type of condition| - -## NodeStatus - -NodeStatus contains status information about an individual node in the workflow - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`boundaryID`|`string`|BoundaryID indicates the node ID of the associated template root node in which this node belongs to| -|`children`|`Array< string >`|Children is a list of child node IDs| -|`daemoned`|`boolean`|Daemoned tracks whether or not this node was daemoned and need to be terminated| -|`displayName`|`string`|DisplayName is a human readable representation of the node. Unique within a template boundary| -|`estimatedDuration`|`integer`|EstimatedDuration in seconds.| -|`finishedAt`|[`Time`](#time)|Time at which this node completed| -|`hostNodeName`|`string`|HostNodeName name of the Kubernetes node on which the Pod is running, if applicable| -|`id`|`string`|ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic| -|`inputs`|[`Inputs`](#inputs)|Inputs captures input parameter values and artifact locations supplied to this template invocation| -|`memoizationStatus`|[`MemoizationStatus`](#memoizationstatus)|MemoizationStatus holds information about cached nodes| -|`message`|`string`|A human readable message indicating details about why the node is in this condition.| -|`name`|`string`|Name is unique name in the node tree used to generate the node ID| -|`nodeFlag`|[`NodeFlag`](#nodeflag)|NodeFlag tracks some history of node. e.g.) hooked, retried, etc.| -|`outboundNodes`|`Array< string >`|OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as "outbound". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step. In the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the "outbound" node. In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.| -|`outputs`|[`Outputs`](#outputs)|Outputs captures output parameter values and artifact locations produced by this template invocation| -|`phase`|`string`|Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine. Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", "Skipped", "Failed", "Error", or "Omitted" as a final state.| -|`podIP`|`string`|PodIP captures the IP of the pod for daemoned steps| -|`progress`|`string`|Progress to completion| -|`resourcesDuration`|`Map< integer , int64 >`|ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.| -|`startedAt`|[`Time`](#time)|Time at which this node started| -|`synchronizationStatus`|[`NodeSynchronizationStatus`](#nodesynchronizationstatus)|SynchronizationStatus is the synchronization status of the node| -|`templateName`|`string`|TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)| -|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)| -|`templateScope`|`string`|TemplateScope is the template scope in which the template of this node was retrieved.| -|`type`|`string`|Type indicates type of node| - -## Outputs - -Outputs hold parameters, artifacts, and results from a step - -
-Examples with this field (click to open) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifacts holds the list of output artifacts produced by a step| -|`exitCode`|`string`|ExitCode holds the exit code of a script template| -|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters holds the list of output parameters produced by a step| -|`result`|`string`|Result holds the result (stdout) of a script template| - -## SynchronizationStatus - -SynchronizationStatus stores the status of semaphore and mutex. - -
-Examples with this field (click to open) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`mutex`|[`MutexStatus`](#mutexstatus)|Mutex stores this workflow's mutex holder details| -|`semaphore`|[`SemaphoreStatus`](#semaphorestatus)|Semaphore stores this workflow's Semaphore holder details| - -## StopStrategy - -v3.6 and after: StopStrategy defines if the CronWorkflow should stop scheduling based on a condition - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`condition`|`string`|v3.6 and after: Condition is an expression that stops scheduling workflows when true. Use the variables `failed` or `succeeded` to access the number of failed or successful child workflows.| - -## Artifact - -Artifact indicates an artifact to place at a specified path - -
-Examples with this field (click to open) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.| -|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| -|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows| -|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| -|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| -|`deleted`|`boolean`|Has this been deleted?| -|`from`|`string`|From allows an artifact to reference an artifact from a previous step| -|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact| -|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| -|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| -|`globalName`|`string`|GlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts| -|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| -|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| -|`mode`|`integer`|mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.| -|`name`|`string`|name of the artifact. must be unique within a template's inputs/outputs.| -|`optional`|`boolean`|Make Artifacts optional, if Artifacts doesn't generate or exist| -|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| -|`path`|`string`|Path is the container path to the artifact| -|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| -|`recurseMode`|`boolean`|If mode is set, apply the permission recursively into the artifact if it is a folder| -|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| -|`subPath`|`string`|SubPath allows an artifact to be sourced from a subpath within the specified source| - -## Parameter - -Parameter indicate a passed string parameter to a service template with an optional default value - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`default`|`string`|Default is the default value to use for an input parameter if a value was not supplied| -|`description`|`string`|Description is the parameter description| -|`enum`|`Array< string >`|Enum holds a list of string values to choose from, for the actual value of the parameter| -|`globalName`|`string`|GlobalName exports an output parameter to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters| -|`name`|`string`|Name is the parameter name| -|`value`|`string`|Value is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values| -|`valueFrom`|[`ValueFrom`](#valuefrom)|ValueFrom is the source for the output parameter's value| - -## TemplateRef - -TemplateRef is a reference of template resource. - -
-Examples with this field (click to open) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`clusterScope`|`boolean`|ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).| -|`name`|`string`|Name is the resource name of the template.| -|`template`|`string`|Template is the name of referred template in the resource.| - -## Prometheus - -Prometheus is a prometheus metric to be emitted - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`counter`|[`Counter`](#counter)|Counter is a counter metric| -|`gauge`|[`Gauge`](#gauge)|Gauge is a gauge metric| -|`help`|`string`|Help is a string that describes the metric| -|`histogram`|[`Histogram`](#histogram)|Histogram is a histogram metric| -|`labels`|`Array<`[`MetricLabel`](#metriclabel)`>`|Labels is a list of metric labels| -|`name`|`string`|Name is the name of the metric| -|`when`|`string`|When is a conditional statement that decides when to emit the metric| - -## RetryAffinity - -RetryAffinity prevents running steps on the same host. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`nodeAntiAffinity`|[`RetryNodeAntiAffinity`](#retrynodeantiaffinity)|_No description available_| - -## Backoff - -Backoff is a backoff strategy to use within retryStrategy - -
-Examples with this field (click to open) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`duration`|`string`|Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h")| -|`factor`|[`IntOrString`](#intorstring)|Factor is a factor to multiply the base duration after each failed retry| -|`maxDuration`|`string`|MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. However, when the workflow fails, the pod's deadline is then overridden by maxDuration. This ensures that the workflow does not exceed the specified maximum duration when retries are involved.| - -## Mutex - -Mutex holds Mutex configuration - -
-Examples with this field (click to open) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|name of the mutex| -|`namespace`|`string`|Namespace is the namespace of the mutex, default: [namespace of workflow]| - -## SemaphoreRef - -SemaphoreRef is a reference of Semaphore - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMapKeyRef is configmap selector for Semaphore configuration| -|`namespace`|`string`|Namespace is the namespace of the configmap, default: [namespace of workflow]| - -## ArtifactLocation - -ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files. - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| -|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| -|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| -|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| -|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| -|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| -|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| -|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| -|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| -|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| - -## ContainerSetTemplate - -_No description available_ - -
-Examples with this field (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`containers`|`Array<`[`ContainerNode`](#containernode)`>`|_No description available_| -|`retryStrategy`|[`ContainerSetRetryStrategy`](#containersetretrystrategy)|RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.| -|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|_No description available_| - -## DAGTemplate - -DAGTemplate is a template subtype for directed acyclic graph templates - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`failFast`|`boolean`|This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442| -|`target`|`string`|Target are one or more names of targets to execute in a DAG| -|`tasks`|`Array<`[`DAGTask`](#dagtask)`>`|Tasks are a list of DAG tasks| - -## Data - -Data is a data template - -
-Examples with this field (click to open) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`source`|[`DataSource`](#datasource)|Source sources external data into a data template| -|`transformation`|`Array<`[`TransformationStep`](#transformationstep)`>`|Transformation applies a set of transformations| - -## HTTP - -_No description available_ - -
-Examples with this field (click to open) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`body`|`string`|Body is content of the HTTP Request| -|`bodyFrom`|[`HTTPBodySource`](#httpbodysource)|BodyFrom is content of the HTTP Request as Bytes| -|`headers`|`Array<`[`HTTPHeader`](#httpheader)`>`|Headers are an optional list of headers to send with HTTP requests| -|`insecureSkipVerify`|`boolean`|InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client| -|`method`|`string`|Method is HTTP methods for HTTP Request| -|`successCondition`|`string`|SuccessCondition is an expression if evaluated to true is considered successful| -|`timeoutSeconds`|`integer`|TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds| -|`url`|`string`|URL of the HTTP Request| - -## UserContainer - -UserContainer is a container specified by a user. - -
-Examples with this field (click to open) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| -|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| -|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| -|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| -|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| -|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`mirrorVolumeMounts`|`boolean`|MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding| -|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| -|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| -|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| -|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| -|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| -|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| -|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| -|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| -|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| -|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| -|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| -|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| -|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| - -## Inputs - -Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another - -
-Examples with this field (click to open) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifact are a list of artifacts passed as inputs| -|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters are a list of parameters passed as inputs| - -## Memoize - -Memoization enables caching for the Outputs of the template - -
-Examples with this field (click to open) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`cache`|[`Cache`](#cache)|Cache sets and configures the kind of cache| -|`key`|`string`|Key is the key to use as the caching key| -|`maxAge`|`string`|MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.| - -## Plugin - -Plugin is an Object with exactly one key - -## ResourceTemplate - -ResourceTemplate is a template subtype to manipulate kubernetes resources - -
-Examples with this field (click to open) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`action`|`string`|Action is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch| -|`failureCondition`|`string`|FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed| -|`flags`|`Array< string >`|Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [ "--validate=false" # disable resource validation ]| -|`manifest`|`string`|Manifest contains the kubernetes manifest| -|`manifestFrom`|[`ManifestFrom`](#manifestfrom)|ManifestFrom is the source for a single kubernetes manifest| -|`mergeStrategy`|`string`|MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" Must be one of: strategic, merge, json| -|`setOwnerReference`|`boolean`|SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.| -|`successCondition`|`string`|SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step| - -## ScriptTemplate - -ScriptTemplate is a template subtype to enable scripting through code steps - -
-Examples with this field (click to open) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| -|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| -|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| -|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| -|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| -|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| -|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| -|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| -|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| -|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| -|`source`|`string`|Source contains the source code of the script to execute| -|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| -|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| -|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| -|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| -|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| -|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| -|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| -|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| - -## WorkflowStep - -WorkflowStep is a reference to a template to execute in a series of step - -
-Examples with this field (click to open) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`arguments`|[`Arguments`](#arguments)|Arguments hold arguments to the template| -|`continueOn`|[`ContinueOn`](#continueon)|ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified| -|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step| -|`inline`|[`Template`](#template)|Inline is the template. Template must be empty if this is declared (and vice-versa).| -|`name`|`string`|Name of the step| -|~~`onExit`~~|~~`string`~~|~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.| -|`template`|`string`|Template is the name of the template to execute as the step| -|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute as the step.| -|`when`|`string`|When is an expression in which the step should conditionally execute| -|`withItems`|`Array<`[`Item`](#item)`>`|WithItems expands a step into multiple parallel steps from the items in the list| -|`withParam`|`string`|WithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.| -|`withSequence`|[`Sequence`](#sequence)|WithSequence expands a step into a numeric sequence| - -## SuspendTemplate - -SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time - -
-Examples with this field (click to open) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`duration`|`string`|Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h"| - -## LabelValueFrom - -_No description available_ - -
-Examples with this field (click to open) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`expression`|`string`|_No description available_| - -## ArtifactRepository - -ArtifactRepository represents an artifact repository in which a controller will store its artifacts - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`archiveLogs`|`boolean`|ArchiveLogs enables log archiving| -|`artifactory`|[`ArtifactoryArtifactRepository`](#artifactoryartifactrepository)|Artifactory stores artifacts to JFrog Artifactory| -|`azure`|[`AzureArtifactRepository`](#azureartifactrepository)|Azure stores artifact in an Azure Storage account| -|`gcs`|[`GCSArtifactRepository`](#gcsartifactrepository)|GCS stores artifact in a GCS object store| -|`hdfs`|[`HDFSArtifactRepository`](#hdfsartifactrepository)|HDFS stores artifacts in HDFS| -|`oss`|[`OSSArtifactRepository`](#ossartifactrepository)|OSS stores artifact in a OSS-compliant object store| -|`s3`|[`S3ArtifactRepository`](#s3artifactrepository)|S3 stores artifact in a S3-compliant object store| - -## MemoizationStatus - -MemoizationStatus is the status of this memoized node - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`cacheName`|`string`|Cache is the name of the cache that was used| -|`hit`|`boolean`|Hit indicates whether this node was created from a cache entry| -|`key`|`string`|Key is the name of the key used for this node's cache| - -## NodeFlag - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`hooked`|`boolean`|Hooked tracks whether or not this node was triggered by hook or onExit| -|`retried`|`boolean`|Retried tracks whether or not this node was retried by retryStrategy| - -## NodeSynchronizationStatus - -NodeSynchronizationStatus stores the status of a node - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`waiting`|`string`|Waiting is the name of the lock that this node is waiting for| - -## MutexStatus - -MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. - -
-Examples with this field (click to open) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`holding`|`Array<`[`MutexHolding`](#mutexholding)`>`|Holding is a list of mutexes and their respective objects that are held by mutex lock for this io.argoproj.workflow.v1alpha1.| -|`waiting`|`Array<`[`MutexHolding`](#mutexholding)`>`|Waiting is a list of mutexes and their respective objects this workflow is waiting for.| - -## SemaphoreStatus - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`holding`|`Array<`[`SemaphoreHolding`](#semaphoreholding)`>`|Holding stores the list of resource acquired synchronization lock for workflows.| -|`waiting`|`Array<`[`SemaphoreHolding`](#semaphoreholding)`>`|Waiting indicates the list of current synchronization lock holders.| - -## ArchiveStrategy - -ArchiveStrategy describes how to archive files/directory when saving artifacts - -
-Examples with this field (click to open) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`none`|[`NoneStrategy`](#nonestrategy)|_No description available_| -|`tar`|[`TarStrategy`](#tarstrategy)|_No description available_| -|`zip`|[`ZipStrategy`](#zipstrategy)|_No description available_| - -## ArtifactGC - -ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed - -
-Examples with this field (click to open) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`podMetadata`|[`Metadata`](#metadata)|PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion| -|`serviceAccountName`|`string`|ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion| -|`strategy`|`string`|Strategy is the strategy to use.| - -## ArtifactoryArtifact - -ArtifactoryArtifact is the location of an artifactory artifact - -
-Examples with this field (click to open) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| -|`url`|`string`|URL of the artifact| -|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| - -## AzureArtifact - -AzureArtifact is the location of a an Azure Storage artifact - -
-Examples with this field (click to open) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key| -|`blob`|`string`|Blob is the blob name (i.e., path) in the container where the artifact resides| -|`container`|`string`|Container is the container where resources will be stored| -|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## GCSArtifact - -GCSArtifact is the location of a GCS artifact - -
-Examples with this field (click to open) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`bucket`|`string`|Bucket is the name of the bucket| -|`key`|`string`|Key is the path in the bucket where the artifact resides| -|`serviceAccountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServiceAccountKeySecret is the secret selector to the bucket's service account key| - -## GitArtifact - -GitArtifact is the location of an git artifact - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`branch`|`string`|Branch is the branch to fetch when `SingleBranch` is enabled| -|`depth`|`integer`|Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip| -|`disableSubmodules`|`boolean`|DisableSubmodules disables submodules during git clone| -|`fetch`|`Array< string >`|Fetch specifies a number of refs that should be fetched before checkout| -|`insecureIgnoreHostKey`|`boolean`|InsecureIgnoreHostKey disables SSH strict host key checking during git clone| -|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| -|`repo`|`string`|Repo is the git repository| -|`revision`|`string`|Revision is the git commit, tag, branch to checkout| -|`singleBranch`|`boolean`|SingleBranch enables single branch clone, using the `branch` parameter| -|`sshPrivateKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SSHPrivateKeySecret is the secret selector to the repository ssh private key| -|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| - -## HDFSArtifact - -HDFSArtifact is the location of an HDFS artifact - -
-Examples with this field (click to open) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`addresses`|`Array< string >`|Addresses is accessible addresses of HDFS name nodes| -|`dataTransferProtection`|`string`|DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.| -|`force`|`boolean`|Force copies a file forcibly even if it exists| -|`hdfsUser`|`string`|HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.| -|`krbCCacheSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.| -|`krbConfigConfigMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.| -|`krbKeytabSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.| -|`krbRealm`|`string`|KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.| -|`krbServicePrincipalName`|`string`|KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.| -|`krbUsername`|`string`|KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.| -|`path`|`string`|Path is a file path in HDFS| - -## HTTPArtifact - -HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container - -
-Examples with this field (click to open) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`auth`|[`HTTPAuth`](#httpauth)|Auth contains information for client authentication| -|`headers`|`Array<`[`Header`](#header)`>`|Headers are an optional list of headers to send with HTTP requests for artifacts| -|`url`|`string`|URL of the artifact| - -## OSSArtifact - -OSSArtifact is the location of an Alibaba Cloud OSS artifact - -
-Examples with this field (click to open) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| -|`bucket`|`string`|Bucket is the name of the bucket| -|`createBucketIfNotPresent`|`boolean`|CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist| -|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| -|`key`|`string`|Key is the path in the bucket where the artifact resides| -|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle| -|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| -|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## RawArtifact - -RawArtifact allows raw string content to be placed as an artifact in a container - -
-Examples with this field (click to open) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`data`|`string`|Data is the string contents of the artifact| - -## S3Artifact - -S3Artifact is the location of an S3 artifact - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| -|`bucket`|`string`|Bucket is the name of the bucket| -|`caSecret`|[`SecretKeySelector`](#secretkeyselector)|CASecret specifies the secret that contains the CA, used to verify the TLS connection| -|`createBucketIfNotPresent`|[`CreateS3BucketOptions`](#creates3bucketoptions)|CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.| -|`encryptionOptions`|[`S3EncryptionOptions`](#s3encryptionoptions)|_No description available_| -|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| -|`insecure`|`boolean`|Insecure will connect to the service with TLS| -|`key`|`string`|Key is the key in the bucket where the artifact resides| -|`region`|`string`|Region contains the optional bucket region| -|`roleARN`|`string`|RoleARN is the Amazon Resource Name (ARN) of the role to assume.| -|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| -|`sessionTokenSecret`|[`SecretKeySelector`](#secretkeyselector)|SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## ValueFrom - -ValueFrom describes a location in which to obtain the value to a parameter - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMapKeyRef is configmap selector for input parameter configuration| -|`default`|`string`|Default specifies a value to be used if retrieving the value from the specified source fails| -|`event`|`string`|Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`| -|`expression`|`string`|Expression, if defined, is evaluated to specify the value for the parameter| -|`jqFilter`|`string`|JQFilter expression against the resource object in resource templates| -|`jsonPath`|`string`|JSONPath of a resource to retrieve an output parameter value from in resource templates| -|`parameter`|`string`|Parameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')| -|`path`|`string`|Path in the container to retrieve an output parameter value from in container templates| -|`supplied`|[`SuppliedValueFrom`](#suppliedvaluefrom)|Supplied value to be filled in directly, either through the CLI, API, etc.| - -## Counter - -Counter is a Counter prometheus metric - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`value`|`string`|Value is the value of the metric| - -## Gauge - -Gauge is a Gauge prometheus metric - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`operation`|`string`|Operation defines the operation to apply with value and the metrics' current value| -|`realtime`|`boolean`|Realtime emits this metric in real time if applicable| -|`value`|`string`|Value is the value to be used in the operation with the metric's current value. If no operation is set, value is the value of the metric| - -## Histogram - -Histogram is a Histogram prometheus metric - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`buckets`|`Array<`[`Amount`](#amount)`>`|Buckets is a list of bucket divisors for the histogram| -|`value`|`string`|Value is the value of the metric| - -## MetricLabel - -MetricLabel is a single label for a prometheus metric - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|_No description available_| -|`value`|`string`|_No description available_| - -## RetryNodeAntiAffinity - -RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". - -## ContainerNode - -_No description available_ - -
-Examples with this field (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`dependencies`|`Array< string >`|_No description available_| -|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| -|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| -|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| -|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| -|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| -|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| -|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| -|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| -|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| -|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| -|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| -|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| -|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| -|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| -|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| -|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| -|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| -|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| - -## ContainerSetRetryStrategy - -ContainerSetRetryStrategy provides controls on how to retry a container set - -
-Examples with this field (click to open) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`duration`|`string`|Duration is the time between each retry, examples values are "300ms", "1s" or "5m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".| -|`retries`|[`IntOrString`](#intorstring)|Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.| - -## DAGTask - -DAGTask represents a node in the graph during DAG execution - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`arguments`|[`Arguments`](#arguments)|Arguments are the parameter and artifact arguments to the template| -|`continueOn`|[`ContinueOn`](#continueon)|ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified| -|`dependencies`|`Array< string >`|Dependencies are name of other targets which this depends on| -|`depends`|`string`|Depends are name of other targets which this depends on| -|`hooks`|[`LifecycleHook`](#lifecyclehook)|Hooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task| -|`inline`|[`Template`](#template)|Inline is the template. Template must be empty if this is declared (and vice-versa).| -|`name`|`string`|Name is the name of the target| -|~~`onExit`~~|~~`string`~~|~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.| -|`template`|`string`|Name of template to execute| -|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource to execute.| -|`when`|`string`|When is an expression in which the task should conditionally execute| -|`withItems`|`Array<`[`Item`](#item)`>`|WithItems expands a task into multiple parallel tasks from the items in the list| -|`withParam`|`string`|WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.| -|`withSequence`|[`Sequence`](#sequence)|WithSequence expands a task into a numeric sequence| - -## DataSource - -DataSource sources external data into a data template - -
-Examples with this field (click to open) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifactPaths`|[`ArtifactPaths`](#artifactpaths)|ArtifactPaths is a data transformation that collects a list of artifact paths| - -## TransformationStep - -_No description available_ - -
-Examples with this field (click to open) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`expression`|`string`|Expression defines an expr expression to apply| - -## HTTPBodySource - -HTTPBodySource contains the source of the HTTP body. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`bytes`|`byte`|_No description available_| - -## HTTPHeader - -_No description available_ - -
-Examples with this field (click to open) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|_No description available_| -|`value`|`string`|_No description available_| -|`valueFrom`|[`HTTPHeaderSource`](#httpheadersource)|_No description available_| - -## Cache - -Cache is the configuration for the type of cache to be used - -
-Examples with this field (click to open) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMap sets a ConfigMap-based cache| - -## ManifestFrom - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`artifact`|[`Artifact`](#artifact)|Artifact contains the artifact to use| - -## ContinueOn - -ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both. - -
-Examples with this field (click to open) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`error`|`boolean`|_No description available_| -|`failed`|`boolean`|_No description available_| - -## Item - -Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number - -
-Examples with this field (click to open) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) -
- -## Sequence - -Sequence expands a workflow step into numeric range - -
-Examples with this field (click to open) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`count`|[`IntOrString`](#intorstring)|Count is number of elements in the sequence (default: 0). Not to be used with end| -|`end`|[`IntOrString`](#intorstring)|Number at which to end the sequence (default: 0). Not to be used with Count| -|`format`|`string`|Format is a printf format string to format the value in the sequence| -|`start`|[`IntOrString`](#intorstring)|Number at which to start the sequence (default: 0)| - -## ArtifactoryArtifactRepository - -ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository - -
-Examples with this field (click to open) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| -|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| -|`repoURL`|`string`|RepoURL is the url for artifactory repo.| -|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| - -## AzureArtifactRepository - -AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository - -
-Examples with this field (click to open) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key| -|`blobNameFormat`|`string`|BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables| -|`container`|`string`|Container is the container where resources will be stored| -|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## GCSArtifactRepository - -GCSArtifactRepository defines the controller configuration for a GCS artifact repository - -
-Examples with this field (click to open) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`bucket`|`string`|Bucket is the name of the bucket| -|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| -|`serviceAccountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServiceAccountKeySecret is the secret selector to the bucket's service account key| - -## HDFSArtifactRepository - -HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository - -
-Examples with this field (click to open) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`addresses`|`Array< string >`|Addresses is accessible addresses of HDFS name nodes| -|`dataTransferProtection`|`string`|DataTransferProtection is the protection level for HDFS data transfer. It corresponds to the dfs.data.transfer.protection configuration in HDFS.| -|`force`|`boolean`|Force copies a file forcibly even if it exists| -|`hdfsUser`|`string`|HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.| -|`krbCCacheSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.| -|`krbConfigConfigMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.| -|`krbKeytabSecret`|[`SecretKeySelector`](#secretkeyselector)|KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.| -|`krbRealm`|`string`|KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.| -|`krbServicePrincipalName`|`string`|KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.| -|`krbUsername`|`string`|KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.| -|`pathFormat`|`string`|PathFormat is defines the format of path to store a file. Can reference workflow variables| - -## OSSArtifactRepository - -OSSArtifactRepository defines the controller configuration for an OSS artifact repository - -
-Examples with this field (click to open) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| -|`bucket`|`string`|Bucket is the name of the bucket| -|`createBucketIfNotPresent`|`boolean`|CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist| -|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| -|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| -|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle| -|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| -|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## S3ArtifactRepository - -S3ArtifactRepository defines the controller configuration for an S3 artifact repository - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccessKeySecret is the secret selector to the bucket's access key| -|`bucket`|`string`|Bucket is the name of the bucket| -|`caSecret`|[`SecretKeySelector`](#secretkeyselector)|CASecret specifies the secret that contains the CA, used to verify the TLS connection| -|`createBucketIfNotPresent`|[`CreateS3BucketOptions`](#creates3bucketoptions)|CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.| -|`encryptionOptions`|[`S3EncryptionOptions`](#s3encryptionoptions)|_No description available_| -|`endpoint`|`string`|Endpoint is the hostname of the bucket endpoint| -|`insecure`|`boolean`|Insecure will connect to the service with TLS| -|`keyFormat`|`string`|KeyFormat defines the format of how to store keys and can reference workflow variables.| -|~~`keyPrefix`~~|~~`string`~~|~~KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts.~~ DEPRECATED. Use KeyFormat instead| -|`region`|`string`|Region contains the optional bucket region| -|`roleARN`|`string`|RoleARN is the Amazon Resource Name (ARN) of the role to assume.| -|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key| -|`sessionTokenSecret`|[`SecretKeySelector`](#secretkeyselector)|SessionTokenSecret is used for ephemeral credentials like an IAM assume role or S3 access grant| -|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.| - -## MutexHolding - -MutexHolding describes the mutex and the object which is holding it. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`holder`|`string`|Holder is a reference to the object which holds the Mutex. Holding Scenario: 1. Current workflow's NodeID which is holding the lock. e.g: ${NodeID} Waiting Scenario: 1. Current workflow or other workflow NodeID which is holding the lock. e.g: ${WorkflowName}/${NodeID}| -|`mutex`|`string`|Reference for the mutex e.g: ${namespace}/mutex/${mutexName}| - -## SemaphoreHolding - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`holders`|`Array< string >`|Holders stores the list of current holder names in the io.argoproj.workflow.v1alpha1.| -|`semaphore`|`string`|Semaphore stores the semaphore name.| - -## NoneStrategy - -NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately. - -
-Examples with this field (click to open) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) -
- -## TarStrategy - -TarStrategy will tar and gzip the file or directory when saving - -
-Examples with this field (click to open) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`compressionLevel`|`integer`|CompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.| - -## ZipStrategy - -ZipStrategy will unzip zipped input artifacts - -## HTTPAuth - -_No description available_ - -
-Examples with this field (click to open) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`basicAuth`|[`BasicAuth`](#basicauth)|_No description available_| -|`clientCert`|[`ClientCertAuth`](#clientcertauth)|_No description available_| -|`oauth2`|[`OAuth2Auth`](#oauth2auth)|_No description available_| - -## Header - -Header indicate a key-value request header to be used when fetching artifacts over HTTP - -
-Examples with this field (click to open) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name is the header name| -|`value`|`string`|Value is the literal value to use for the header| - -## OSSLifecycleRule - -OSSLifecycleRule specifies how to manage bucket's lifecycle - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`markDeletionAfterDays`|`integer`|MarkDeletionAfterDays is the number of days before we delete objects in the bucket| -|`markInfrequentAccessAfterDays`|`integer`|MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type| - -## CreateS3BucketOptions - -CreateS3BucketOptions options used to determine automatic automatic bucket-creation process - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`objectLocking`|`boolean`|ObjectLocking Enable object locking| - -## S3EncryptionOptions - -S3EncryptionOptions used to determine encryption options during s3 operations - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`enableEncryption`|`boolean`|EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used| -|`kmsEncryptionContext`|`string`|KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information| -|`kmsKeyId`|`string`|KMSKeyId tells the driver to encrypt the object using the specified KMS Key.| -|`serverSideCustomerKeySecret`|[`SecretKeySelector`](#secretkeyselector)|ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.| - -## SuppliedValueFrom - -SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. - -
-Examples with this field (click to open) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) -
- -## Amount - -Amount represent a numeric amount. - -
-Examples with this field (click to open) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) -
- -## ArtifactPaths - -ArtifactPaths expands a step from a collection of artifacts - -
-Examples with this field (click to open) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.| -|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived| -|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows| -|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details| -|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details| -|`deleted`|`boolean`|Has this been deleted?| -|`from`|`string`|From allows an artifact to reference an artifact from a previous step| -|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact| -|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details| -|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details| -|`globalName`|`string`|GlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts| -|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details| -|`http`|[`HTTPArtifact`](#httpartifact)|HTTP contains HTTP artifact location details| -|`mode`|`integer`|mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.| -|`name`|`string`|name of the artifact. must be unique within a template's inputs/outputs.| -|`optional`|`boolean`|Make Artifacts optional, if Artifacts doesn't generate or exist| -|`oss`|[`OSSArtifact`](#ossartifact)|OSS contains OSS artifact location details| -|`path`|`string`|Path is the container path to the artifact| -|`raw`|[`RawArtifact`](#rawartifact)|Raw contains raw artifact location details| -|`recurseMode`|`boolean`|If mode is set, apply the permission recursively into the artifact if it is a folder| -|`s3`|[`S3Artifact`](#s3artifact)|S3 contains S3 artifact location details| -|`subPath`|`string`|SubPath allows an artifact to be sourced from a subpath within the specified source| - -## HTTPHeaderSource - -_No description available_ - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`secretKeyRef`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| - -## BasicAuth - -BasicAuth describes the secret selectors required for basic authentication - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password| -|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username| - -## ClientCertAuth - -ClientCertAuth holds necessary information for client authentication via certificates - -
-Examples with this field (click to open) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`clientCertSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| -|`clientKeySecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| - -## OAuth2Auth - -OAuth2Auth holds all information for client authentication via OAuth2 tokens - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`clientIDSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| -|`clientSecretSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| -|`endpointParams`|`Array<`[`OAuth2EndpointParam`](#oauth2endpointparam)`>`|_No description available_| -|`scopes`|`Array< string >`|_No description available_| -|`tokenURLSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_| - -## OAuth2EndpointParam - -EndpointParam is for requesting optional fields that should be sent in the oauth request - -
-Examples with this field (click to open) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|Name is the header name| -|`value`|`string`|Value is the literal value to use for the header| - -# External Fields - - -## ObjectMeta - -ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`annotations`|`Map< string , string >`|Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations| -|`creationTimestamp`|[`Time`](#time)|CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| -|`deletionGracePeriodSeconds`|`integer`|Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.| -|`deletionTimestamp`|[`Time`](#time)|DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| -|`finalizers`|`Array< string >`|Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.| -|`generateName`|`string`|GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency| -|`generation`|`integer`|A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.| -|`labels`|`Map< string , string >`|Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels| -|`managedFields`|`Array<`[`ManagedFieldsEntry`](#managedfieldsentry)`>`|ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.| -|`name`|`string`|Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| -|`namespace`|`string`|Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces| -|`ownerReferences`|`Array<`[`OwnerReference`](#ownerreference)`>`|List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.| -|`resourceVersion`|`string`|An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency| -|`selfLink`|`string`|Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.| -|`uid`|`string`|UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| - -## Affinity - -Affinity is a group of affinity scheduling rules. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`nodeAffinity`|[`NodeAffinity`](#nodeaffinity)|Describes node affinity scheduling rules for the pod.| -|`podAffinity`|[`PodAffinity`](#podaffinity)|Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).| -|`podAntiAffinity`|[`PodAntiAffinity`](#podantiaffinity)|Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).| - -## PodDNSConfig - -PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy. - -
-Examples with this field (click to open) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`nameservers`|`Array< string >`|A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.| -|`options`|`Array<`[`PodDNSConfigOption`](#poddnsconfigoption)`>`|A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.| -|`searches`|`Array< string >`|A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.| - -## HostAlias - -HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`hostnames`|`Array< string >`|Hostnames for the above IP address.| -|`ip`|`string`|IP address of the host file entry.| - -## LocalObjectReference - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - -
-Examples with this field (click to open) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| - -## PodDisruptionBudgetSpec - -PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. - -
-Examples with this field (click to open) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`maxUnavailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".| -|`minAvailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".| -|`selector`|[`LabelSelector`](#labelselector)|Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.| -|`unhealthyPodEvictionPolicy`|`string`|UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type="Ready",status="True". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase="Running"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase="Running"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).| - -## PodSecurityContext - -PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext. - -
-Examples with this field (click to open) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`appArmorProfile`|[`AppArmorProfile`](#apparmorprofile)|appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.| -|`fsGroup`|`integer`|A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.| -|`fsGroupChangePolicy`|`string`|fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.| -|`runAsGroup`|`integer`|The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| -|`runAsNonRoot`|`boolean`|Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| -|`runAsUser`|`integer`|The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| -|`seLinuxOptions`|[`SELinuxOptions`](#selinuxoptions)|The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| -|`seccompProfile`|[`SeccompProfile`](#seccompprofile)|The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.| -|`supplementalGroups`|`Array< integer >`|A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.| -|`sysctls`|`Array<`[`Sysctl`](#sysctl)`>`|Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.| -|`windowsOptions`|[`WindowsSecurityContextOptions`](#windowssecuritycontextoptions)|The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.| - -## Toleration - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`effect`|`string`|Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.| -|`key`|`string`|Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.| -|`operator`|`string`|Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.| -|`tolerationSeconds`|`integer`|TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.| -|`value`|`string`|Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.| - -## PersistentVolumeClaim - -PersistentVolumeClaim is a user's request for and claim to a persistent volume - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources| -|`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`metadata`|[`ObjectMeta`](#objectmeta)|Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| -|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`status`|[`PersistentVolumeClaimStatus`](#persistentvolumeclaimstatus)|status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| - -## Volume - -Volume represents a named volume in a pod that may be accessed by any container in the pod. - -
-Examples with this field (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`awsElasticBlockStore`|[`AWSElasticBlockStoreVolumeSource`](#awselasticblockstorevolumesource)|awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`azureDisk`|[`AzureDiskVolumeSource`](#azurediskvolumesource)|azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.| -|`azureFile`|[`AzureFileVolumeSource`](#azurefilevolumesource)|azureFile represents an Azure File Service mount on the host and bind mount to the pod.| -|`cephfs`|[`CephFSVolumeSource`](#cephfsvolumesource)|cephFS represents a Ceph FS mount on the host that shares a pod's lifetime| -|`cinder`|[`CinderVolumeSource`](#cindervolumesource)|cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`configMap`|[`ConfigMapVolumeSource`](#configmapvolumesource)|configMap represents a configMap that should populate this volume| -|`csi`|[`CSIVolumeSource`](#csivolumesource)|csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).| -|`downwardAPI`|[`DownwardAPIVolumeSource`](#downwardapivolumesource)|downwardAPI represents downward API about the pod that should populate this volume| -|`emptyDir`|[`EmptyDirVolumeSource`](#emptydirvolumesource)|emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| -|`ephemeral`|[`EphemeralVolumeSource`](#ephemeralvolumesource)|ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time.| -|`fc`|[`FCVolumeSource`](#fcvolumesource)|fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.| -|`flexVolume`|[`FlexVolumeSource`](#flexvolumesource)|flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.| -|`flocker`|[`FlockerVolumeSource`](#flockervolumesource)|flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running| -|`gcePersistentDisk`|[`GCEPersistentDiskVolumeSource`](#gcepersistentdiskvolumesource)|gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|~~`gitRepo`~~|~~[`GitRepoVolumeSource`](#gitrepovolumesource)~~|~~gitRepo represents a git repository at a particular revision.~~ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.| -|`glusterfs`|[`GlusterfsVolumeSource`](#glusterfsvolumesource)|glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md| -|`hostPath`|[`HostPathVolumeSource`](#hostpathvolumesource)|hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| -|`iscsi`|[`ISCSIVolumeSource`](#iscsivolumesource)|iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md| -|`name`|`string`|name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`nfs`|[`NFSVolumeSource`](#nfsvolumesource)|nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`persistentVolumeClaim`|[`PersistentVolumeClaimVolumeSource`](#persistentvolumeclaimvolumesource)|persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`photonPersistentDisk`|[`PhotonPersistentDiskVolumeSource`](#photonpersistentdiskvolumesource)|photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine| -|`portworxVolume`|[`PortworxVolumeSource`](#portworxvolumesource)|portworxVolume represents a portworx volume attached and mounted on kubelets host machine| -|`projected`|[`ProjectedVolumeSource`](#projectedvolumesource)|projected items for all in one resources secrets, configmaps, and downward API| -|`quobyte`|[`QuobyteVolumeSource`](#quobytevolumesource)|quobyte represents a Quobyte mount on the host that shares a pod's lifetime| -|`rbd`|[`RBDVolumeSource`](#rbdvolumesource)|rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md| -|`scaleIO`|[`ScaleIOVolumeSource`](#scaleiovolumesource)|scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.| -|`secret`|[`SecretVolumeSource`](#secretvolumesource)|secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| -|`storageos`|[`StorageOSVolumeSource`](#storageosvolumesource)|storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.| -|`vsphereVolume`|[`VsphereVirtualDiskVolumeSource`](#vspherevirtualdiskvolumesource)|vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine| - -## Time - -Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. - -## ObjectReference - -ObjectReference contains enough information to let you inspect or modify the referred object. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|API version of the referent.| -|`fieldPath`|`string`|If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.| -|`kind`|`string`|Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`namespace`|`string`|Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/| -|`resourceVersion`|`string`|Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency| -|`uid`|`string`|UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids| - -## LabelSelector - -A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - -
-Examples with this field (click to open) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`matchExpressions`|`Array<`[`LabelSelectorRequirement`](#labelselectorrequirement)`>`|matchExpressions is a list of label selector requirements. The requirements are ANDed.| -|`matchLabels`|`Map< string , string >`|matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.| - -## IntOrString - -_No description available_ - -
-Examples with this field (click to open) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) -
- -## Container - -A single application container that you want to run within a pod. - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| -|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| -|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| -|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| -|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| -|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| -|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| -|`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| -|`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`restartPolicy`|`string`|RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.| -|`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| -|`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| -|`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| -|`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| -|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| -|`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| -|`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| -|`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| -|`workingDir`|`string`|Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.| - -## ConfigMapKeySelector - -Selects a key from a ConfigMap. - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|The key to select.| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the ConfigMap or its key must be defined| - -## VolumeMount - -VolumeMount describes a mounting of a Volume within a container. - -
-Examples with this field (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`mountPath`|`string`|Path within the container at which the volume should be mounted. Must not contain ':'.| -|`mountPropagation`|`string`|mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).| -|`name`|`string`|This must match the Name of a Volume.| -|`readOnly`|`boolean`|Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.| -|`recursiveReadOnly`|`string`|RecursiveReadOnly specifies whether read-only mounts should be handled recursively. If ReadOnly is false, this field has no meaning and must be unspecified. If ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason. If this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None). If this field is not specified, it is treated as an equivalent of Disabled.| -|`subPath`|`string`|Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).| -|`subPathExpr`|`string`|Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.| - -## EnvVar - -EnvVar represents an environment variable present in a Container. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name of the environment variable. Must be a C_IDENTIFIER.| -|`value`|`string`|Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".| -|`valueFrom`|[`EnvVarSource`](#envvarsource)|Source for the environment variable's value. Cannot be used if value is not empty.| - -## EnvFromSource - -EnvFromSource represents the source of a set of ConfigMaps - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMapRef`|[`ConfigMapEnvSource`](#configmapenvsource)|The ConfigMap to select from| -|`prefix`|`string`|An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.| -|`secretRef`|[`SecretEnvSource`](#secretenvsource)|The Secret to select from| - -## Lifecycle - -Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`postStart`|[`LifecycleHandler`](#lifecyclehandler)|PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks| -|`preStop`|[`LifecycleHandler`](#lifecyclehandler)|PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks| - -## Probe - -Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`exec`|[`ExecAction`](#execaction)|Exec specifies the action to take.| -|`failureThreshold`|`integer`|Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.| -|`grpc`|[`GRPCAction`](#grpcaction)|GRPC specifies an action involving a GRPC port.| -|`httpGet`|[`HTTPGetAction`](#httpgetaction)|HTTPGet specifies the http request to perform.| -|`initialDelaySeconds`|`integer`|Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| -|`periodSeconds`|`integer`|How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.| -|`successThreshold`|`integer`|Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.| -|`tcpSocket`|[`TCPSocketAction`](#tcpsocketaction)|TCPSocket specifies an action involving a TCP port.| -|`terminationGracePeriodSeconds`|`integer`|Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.| -|`timeoutSeconds`|`integer`|Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| - -## ContainerPort - -ContainerPort represents a network port in a single container. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`containerPort`|`integer`|Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.| -|`hostIP`|`string`|What host IP to bind the external port to.| -|`hostPort`|`integer`|Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.| -|`name`|`string`|If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.| -|`protocol`|`string`|Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".| - -## ContainerResizePolicy - -ContainerResizePolicy represents resource resize policy for the container. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`resourceName`|`string`|Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.| -|`restartPolicy`|`string`|Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.| - -## ResourceRequirements - -ResourceRequirements describes the compute resource requirements. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`claims`|`Array<`[`ResourceClaim`](#resourceclaim)`>`|Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers.| -|`limits`|[`Quantity`](#quantity)|Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| - -## SecurityContext - -SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. - -
-Examples with this field (click to open) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`allowPrivilegeEscalation`|`boolean`|AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.| -|`appArmorProfile`|[`AppArmorProfile`](#apparmorprofile)|appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.| -|`capabilities`|[`Capabilities`](#capabilities)|The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.| -|`privileged`|`boolean`|Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.| -|`procMount`|`string`|procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.| -|`readOnlyRootFilesystem`|`boolean`|Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.| -|`runAsGroup`|`integer`|The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| -|`runAsNonRoot`|`boolean`|Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| -|`runAsUser`|`integer`|The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| -|`seLinuxOptions`|[`SELinuxOptions`](#selinuxoptions)|The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.| -|`seccompProfile`|[`SeccompProfile`](#seccompprofile)|The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.| -|`windowsOptions`|[`WindowsSecurityContextOptions`](#windowssecuritycontextoptions)|The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.| - -## VolumeDevice - -volumeDevice describes a mapping of a raw block device within a container. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`devicePath`|`string`|devicePath is the path inside of the container that the device will be mapped to.| -|`name`|`string`|name must match the name of a persistentVolumeClaim in the pod| - -## SecretKeySelector - -SecretKeySelector selects a key of a Secret. - -
-Examples with this field (click to open) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|The key of the secret to select from. Must be a valid secret key.| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the Secret or its key must be defined| - -## ManagedFieldsEntry - -ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.| -|`fieldsType`|`string`|FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"| -|`fieldsV1`|[`FieldsV1`](#fieldsv1)|FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.| -|`manager`|`string`|Manager is an identifier of the workflow managing these fields.| -|`operation`|`string`|Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.| -|`subresource`|`string`|Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.| -|`time`|[`Time`](#time)|Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.| - -## OwnerReference - -OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|API version of the referent.| -|`blockOwnerDeletion`|`boolean`|If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.| -|`controller`|`boolean`|If true, this reference points to the managing controller.| -|`kind`|`string`|Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| -|`uid`|`string`|UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| - -## NodeAffinity - -Node affinity is a group of node affinity scheduling rules. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PreferredSchedulingTerm`](#preferredschedulingterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.| -|`requiredDuringSchedulingIgnoredDuringExecution`|[`NodeSelector`](#nodeselector)|If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.| - -## PodAffinity - -Pod affinity is a group of inter pod affinity scheduling rules. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`WeightedPodAffinityTerm`](#weightedpodaffinityterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.| -|`requiredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PodAffinityTerm`](#podaffinityterm)`>`|If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.| - -## PodAntiAffinity - -Pod anti affinity is a group of inter pod anti affinity scheduling rules. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`preferredDuringSchedulingIgnoredDuringExecution`|`Array<`[`WeightedPodAffinityTerm`](#weightedpodaffinityterm)`>`|The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.| -|`requiredDuringSchedulingIgnoredDuringExecution`|`Array<`[`PodAffinityTerm`](#podaffinityterm)`>`|If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.| - -## PodDNSConfigOption - -PodDNSConfigOption defines DNS resolver options of a pod. - -
-Examples with this field (click to open) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Required.| -|`value`|`string`|_No description available_| - -## AppArmorProfile - -AppArmorProfile defines a pod or container's AppArmor settings. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`localhostProfile`|`string`|localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is "Localhost".| -|`type`|`string`|type indicates which kind of AppArmor profile will be applied. Valid options are: Localhost - a profile pre-loaded on the node. RuntimeDefault - the container runtime's default profile. Unconfined - no AppArmor enforcement.| - -## SELinuxOptions - -SELinuxOptions are the labels to be applied to the container - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`level`|`string`|Level is SELinux level label that applies to the container.| -|`role`|`string`|Role is a SELinux role label that applies to the container.| -|`type`|`string`|Type is a SELinux type label that applies to the container.| -|`user`|`string`|User is a SELinux user label that applies to the container.| - -## SeccompProfile - -SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`localhostProfile`|`string`|localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type.| -|`type`|`string`|type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.| - -## Sysctl - -Sysctl defines a kernel parameter to be set - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name of a property to set| -|`value`|`string`|Value of a property to set| - -## WindowsSecurityContextOptions - -WindowsSecurityContextOptions contain Windows-specific options and credentials. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`gmsaCredentialSpec`|`string`|GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.| -|`gmsaCredentialSpecName`|`string`|GMSACredentialSpecName is the name of the GMSA credential spec to use.| -|`hostProcess`|`boolean`|HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.| -|`runAsUserName`|`string`|The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.| - -## PersistentVolumeClaimSpec - -PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes - -
-Examples with this field (click to open) - -- [`archive-location.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/archive-location.yaml) - -- [`arguments-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-artifacts.yaml) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`arguments-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters.yaml) - -- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-disable-archive.yaml) - -- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-gc-workflow.yaml) - -- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing-subpath.yaml) - -- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-passing.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`artifact-repository-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-repository-ref.yaml) - -- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifactory-artifact.yaml) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml) - -- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) - -- [`mixed-cluster-namespaced-wftmpl-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/workflow-template-ref.yaml) - -- [`coinflip-recursive.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip-recursive.yaml) - -- [`coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/coinflip.yaml) - -- [`colored-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/colored-logs.yaml) - -- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-artifacts.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`conditionals-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals-complex.yaml) - -- [`conditionals.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditionals.yaml) - -- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/graph-workflow.yaml) - -- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/outputs-result-workflow.yaml) - -- [`parallel-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/parallel-workflow.yaml) - -- [`sequence-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/sequence-workflow.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/continue-on-fail.yaml) - -- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) - -- [`cron-when.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-when.yaml) - -- [`cron-workflow-multiple-schedules.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow-multiple-schedules.yaml) - -- [`cron-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-coinflip.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-coinflip.yaml) - -- [`dag-conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-artifacts.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`dag-continue-on-fail.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-continue-on-fail.yaml) - -- [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) - -- [`dag-diamond.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond.yaml) - -- [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - -- [`dag-enhanced-depends.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-enhanced-depends.yaml) - -- [`dag-inline-clusterworkflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-clusterworkflowtemplate.yaml) - -- [`dag-inline-cronworkflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-cronworkflow.yaml) - -- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflow.yaml) - -- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-inline-workflowtemplate.yaml) - -- [`dag-multiroot.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-multiroot.yaml) - -- [`dag-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-nested.yaml) - -- [`dag-targets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-targets.yaml) - -- [`dag-task-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-task-level-timeout.yaml) - -- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/data-transformations.yaml) - -- [`default-pdb-support.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/default-pdb-support.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`exit-code-output-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-code-output-variable.yaml) - -- [`exit-handler-dag-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-dag-level.yaml) - -- [`exit-handler-slack.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-slack.yaml) - -- [`exit-handler-step-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-step-level.yaml) - -- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-artifacts.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`exit-handlers.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handlers.yaml) - -- [`expression-destructure-json-complex.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json-complex.yaml) - -- [`expression-destructure-json.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-destructure-json.yaml) - -- [`expression-reusing-verbose-snippets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-reusing-verbose-snippets.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`forever.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/forever.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`gc-ttl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/gc-ttl.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`global-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`hdfs-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hdfs-artifact.yaml) - -- [`hello-hybrid.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-hybrid.yaml) - -- [`hello-windows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-windows.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/hello-world.yaml) - -- [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) - -- [`http-success-condition.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-success-condition.yaml) - -- [`image-pull-secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/image-pull-secrets.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-azure.yaml) - -- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-gcs.yaml) - -- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-git.yaml) - -- [`input-artifact-http.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-http.yaml) - -- [`input-artifact-oss.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-oss.yaml) - -- [`input-artifact-raw.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-raw.yaml) - -- [`input-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/input-artifact-s3.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-owner-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-owner-reference.yaml) - -- [`k8s-patch-json-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-pod.yaml) - -- [`k8s-patch-json-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-json-workflow.yaml) - -- [`k8s-patch-merge-pod.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-patch-merge-pod.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`key-only-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/key-only-artifact.yaml) - -- [`label-value-from-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/label-value-from-workflow.yaml) - -- [`life-cycle-hooks-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-tmpl-level.yaml) - -- [`life-cycle-hooks-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/life-cycle-hooks-wf-level.yaml) - -- [`loops-arbitrary-sequential-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-arbitrary-sequential-steps.yaml) - -- [`loops-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-dag.yaml) - -- [`loops-maps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-maps.yaml) - -- [`loops-param-argument.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-argument.yaml) - -- [`loops-param-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-param-result.yaml) - -- [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) - -- [`loops.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops.yaml) - -- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/map-reduce.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/node-selector.yaml) - -- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-azure.yaml) - -- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-gcs.yaml) - -- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-artifact-s3.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parallelism-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-limit.yaml) - -- [`parallelism-nested-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-dag.yaml) - -- [`parallelism-nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested-workflow.yaml) - -- [`parallelism-nested.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-nested.yaml) - -- [`parallelism-template-limit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parallelism-template-limit.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-script.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-gc-strategy-with-label-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy-with-label-selector.yaml) - -- [`pod-gc-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-gc-strategy.yaml) - -- [`pod-metadata-wf-field.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata-wf-field.yaml) - -- [`pod-metadata.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-metadata.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`recursive-for-loop.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/recursive-for-loop.yaml) - -- [`resubmit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/resubmit.yaml) - -- [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) - -- [`retry-conditional.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-conditional.yaml) - -- [`retry-container-to-completion.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container-to-completion.yaml) - -- [`retry-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-container.yaml) - -- [`retry-on-error.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-on-error.yaml) - -- [`retry-script.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-script.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) - -- [`scripts-bash.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-bash.yaml) - -- [`scripts-javascript.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-javascript.yaml) - -- [`scripts-python.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/scripts-python.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`sidecar-dind.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-dind.yaml) - -- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar-nginx.yaml) - -- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) - -- [`status-reference.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/status-reference.yaml) - -- [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) - -- [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) - -- [`suspend-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template.yaml) - -- [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - -- [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) - -- [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) - -- [`synchronization-mutex-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level.yaml) - -- [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - -- [`template-on-exit.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-on-exit.yaml) - -- [`timeouts-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-step.yaml) - -- [`timeouts-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/timeouts-workflow.yaml) - -- [`title-and-description-with-markdown.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/title-and-description-with-markdown.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml) - -- [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) - -- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml) - -- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-of-workflows.yaml) - -- [`dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/dag.yaml) - -- [`hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/hello-world.yaml) - -- [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/retry-with-steps.yaml) - -- [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/steps.yaml) - -- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) - -- [`workflow-archive-logs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-archive-logs.yaml) - -- [`workflow-template-ref-with-entrypoint-arg-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref-with-entrypoint-arg-passing.yaml) - -- [`workflow-template-ref.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/workflow-template-ref.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessModes`|`Array< string >`|accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| -|`dataSource`|[`TypedLocalObjectReference`](#typedlocalobjectreference)|dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.| -|`dataSourceRef`|[`TypedObjectReference`](#typedobjectreference)|dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| -|`resources`|[`VolumeResourceRequirements`](#volumeresourcerequirements)|resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources| -|`selector`|[`LabelSelector`](#labelselector)|selector is a label query over volumes to consider for binding.| -|`storageClassName`|`string`|storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1| -|`volumeAttributesClassName`|`string`|volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.| -|`volumeMode`|`string`|volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.| -|`volumeName`|`string`|volumeName is the binding reference to the PersistentVolume backing this claim.| - -## PersistentVolumeClaimStatus - -PersistentVolumeClaimStatus is the current status of a persistent volume claim. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`accessModes`|`Array< string >`|accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| -|`allocatedResourceStatuses`|`Map< string , string >`|allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. ClaimResourceStatus can be in any of following states: - ControllerResizeInProgress: State set when resize controller starts resizing the volume in control-plane. - ControllerResizeFailed: State set when resize has failed in resize controller with a terminal error. - NodeResizePending: State set when resize controller has finished resizing the volume but further resizing of volume is needed on the node. - NodeResizeInProgress: State set when kubelet starts resizing the volume. - NodeResizeFailed: State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed. For example: if expanding a PVC for more capacity - this field can be one of the following states: - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" When this field is not set, it means that no resize operation is in progress for the given PVC. A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| -|`allocatedResources`|[`Quantity`](#quantity)|allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. Capacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. A controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| -|`capacity`|[`Quantity`](#quantity)|capacity represents the actual resources of the underlying volume.| -|`conditions`|`Array<`[`PersistentVolumeClaimCondition`](#persistentvolumeclaimcondition)`>`|conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.| -|`currentVolumeAttributesClassName`|`string`|currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.| -|`modifyVolumeStatus`|[`ModifyVolumeStatus`](#modifyvolumestatus)|ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.| -|`phase`|`string`|phase represents the current phase of PersistentVolumeClaim.| - -## AWSElasticBlockStoreVolumeSource - -Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).| -|`readOnly`|`boolean`|readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`volumeID`|`string`|volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| - -## AzureDiskVolumeSource - -AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`cachingMode`|`string`|cachingMode is the Host Caching mode: None, Read Only, Read Write.| -|`diskName`|`string`|diskName is the Name of the data disk in the blob storage| -|`diskURI`|`string`|diskURI is the URI of data disk in the blob storage| -|`fsType`|`string`|fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`kind`|`string`|kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared| -|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| - -## AzureFileVolumeSource - -AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretName`|`string`|secretName is the name of secret that contains Azure Storage Account Name and Key| -|`shareName`|`string`|shareName is the azure share Name| - -## CephFSVolumeSource - -Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`monitors`|`Array< string >`|monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`path`|`string`|path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /| -|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`secretFile`|`string`|secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`user`|`string`|user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| - -## CinderVolumeSource - -Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.| -|`volumeID`|`string`|volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| - -## ConfigMapVolumeSource - -Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling. - -
-Examples with this field (click to open) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`defaultMode`|`integer`|defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| - -## CSIVolumeSource - -Represents a source location of a volume to mount, managed by an external CSI driver - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`driver`|`string`|driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.| -|`fsType`|`string`|fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.| -|`nodePublishSecretRef`|[`LocalObjectReference`](#localobjectreference)|nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.| -|`readOnly`|`boolean`|readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).| -|`volumeAttributes`|`Map< string , string >`|volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.| - -## DownwardAPIVolumeSource - -DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`defaultMode`|`integer`|Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`items`|`Array<`[`DownwardAPIVolumeFile`](#downwardapivolumefile)`>`|Items is a list of downward API volume file| - -## EmptyDirVolumeSource - -Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling. - -
-Examples with this field (click to open) - -- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifacts-workflowtemplate.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/init-container.yaml) - -- [`volumes-emptydir.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-emptydir.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`medium`|`string`|medium represents what type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| -|`sizeLimit`|[`Quantity`](#quantity)|sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| - -## EphemeralVolumeSource - -Represents an ephemeral volume that is handled by a normal storage driver. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`volumeClaimTemplate`|[`PersistentVolumeClaimTemplate`](#persistentvolumeclaimtemplate)|Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. Required, must not be nil.| - -## FCVolumeSource - -Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`lun`|`integer`|lun is Optional: FC target lun number| -|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`targetWWNs`|`Array< string >`|targetWWNs is Optional: FC target worldwide names (WWNs)| -|`wwids`|`Array< string >`|wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.| - -## FlexVolumeSource - -FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`driver`|`string`|driver is the name of the driver to use for this volume.| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.| -|`options`|`Map< string , string >`|options is Optional: this field holds extra command options if any.| -|`readOnly`|`boolean`|readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.| - -## FlockerVolumeSource - -Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`datasetName`|`string`|datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated| -|`datasetUUID`|`string`|datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset| - -## GCEPersistentDiskVolumeSource - -Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`pdName`|`string`|pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| - -## GitRepoVolumeSource - -Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`directory`|`string`|directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.| -|`repository`|`string`|repository is the URL| -|`revision`|`string`|revision is the commit hash for the specified revision.| - -## GlusterfsVolumeSource - -Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`endpoints`|`string`|endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| -|`path`|`string`|path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| -|`readOnly`|`boolean`|readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| - -## HostPathVolumeSource - -Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`path`|`string`|path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| -|`type`|`string`|type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| - -## ISCSIVolumeSource - -Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`chapAuthDiscovery`|`boolean`|chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication| -|`chapAuthSession`|`boolean`|chapAuthSession defines whether support iSCSI Session CHAP authentication| -|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi| -|`initiatorName`|`string`|initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.| -|`iqn`|`string`|iqn is the target iSCSI Qualified Name.| -|`iscsiInterface`|`string`|iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).| -|`lun`|`integer`|lun represents iSCSI Target Lun number.| -|`portals`|`Array< string >`|portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| -|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is the CHAP Secret for iSCSI target and initiator authentication| -|`targetPortal`|`string`|targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| - -## NFSVolumeSource - -Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`path`|`string`|path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`readOnly`|`boolean`|readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`server`|`string`|server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| - -## PersistentVolumeClaimVolumeSource - -PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system). - -
-Examples with this field (click to open) - -- [`volumes-existing.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-existing.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`claimName`|`string`|claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`readOnly`|`boolean`|readOnly Will force the ReadOnly setting in VolumeMounts. Default false.| - -## PhotonPersistentDiskVolumeSource - -Represents a Photon Controller persistent disk resource. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`pdID`|`string`|pdID is the ID that identifies Photon Controller persistent disk| - -## PortworxVolumeSource - -PortworxVolumeSource represents a Portworx volume resource. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.| -|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`volumeID`|`string`|volumeID uniquely identifies a Portworx volume| - -## ProjectedVolumeSource - -Represents a projected volume source - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`defaultMode`|`integer`|defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`sources`|`Array<`[`VolumeProjection`](#volumeprojection)`>`|sources is the list of volume projections| - -## QuobyteVolumeSource - -Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`group`|`string`|group to map volume access to Default is no group| -|`readOnly`|`boolean`|readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.| -|`registry`|`string`|registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes| -|`tenant`|`string`|tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin| -|`user`|`string`|user to map volume access to Defaults to serivceaccount user| -|`volume`|`string`|volume is a string that references an already created Quobyte volume by name.| - -## RBDVolumeSource - -Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd| -|`image`|`string`|image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`keyring`|`string`|keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`monitors`|`Array< string >`|monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`pool`|`string`|pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`user`|`string`|user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| - -## ScaleIOVolumeSource - -ScaleIOVolumeSource represents a persistent ScaleIO volume - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".| -|`gateway`|`string`|gateway is the host address of the ScaleIO API Gateway.| -|`protectionDomain`|`string`|protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.| -|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.| -|`sslEnabled`|`boolean`|sslEnabled Flag enable/disable SSL communication with Gateway, default false| -|`storageMode`|`string`|storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.| -|`storagePool`|`string`|storagePool is the ScaleIO Storage Pool associated with the protection domain.| -|`system`|`string`|system is the name of the storage system as configured in ScaleIO.| -|`volumeName`|`string`|volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.| - -## SecretVolumeSource - -Adapts a Secret into a volume. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`defaultMode`|`integer`|defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| -|`optional`|`boolean`|optional field specify whether the Secret or its keys must be defined| -|`secretName`|`string`|secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| - -## StorageOSVolumeSource - -Represents a StorageOS persistent volume resource. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.| -|`volumeName`|`string`|volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.| -|`volumeNamespace`|`string`|volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.| - -## VsphereVirtualDiskVolumeSource - -Represents a vSphere volume resource. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fsType`|`string`|fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`storagePolicyID`|`string`|storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.| -|`storagePolicyName`|`string`|storagePolicyName is the storage Policy Based Management (SPBM) profile name.| -|`volumePath`|`string`|volumePath is the path that identifies vSphere volume vmdk| - -## LabelSelectorRequirement - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|key is the label key that the selector applies to.| -|`operator`|`string`|operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.| -|`values`|`Array< string >`|values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.| - -## EnvVarSource - -EnvVarSource represents a source for the value of an EnvVar. - -
-Examples with this field (click to open) - -- [`arguments-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/arguments-parameters-from-configmap.yaml) - -- [`artifact-path-placeholders.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/artifact-path-placeholders.yaml) - -- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/conditional-parameters.yaml) - -- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/container-set-template/workspace-workflow.yaml) - -- [`custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/custom-metrics.yaml) - -- [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-conditional-parameters.yaml) - -- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/exit-handler-with-param.yaml) - -- [`expression-tag-template-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/expression-tag-template-workflow.yaml) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) - -- [`global-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-outputs.yaml) - -- [`global-parameters-from-configmap-referenced-as-local-variable.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap-referenced-as-local-variable.yaml) - -- [`global-parameters-from-configmap.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/global-parameters-from-configmap.yaml) - -- [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - -- [`intermediate-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/intermediate-parameters.yaml) - -- [`k8s-wait-wf.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/k8s-wait-wf.yaml) - -- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/nested-workflow.yaml) - -- [`output-parameter.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/output-parameter.yaml) - -- [`parameter-aggregation-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation-dag.yaml) - -- [`parameter-aggregation.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/parameter-aggregation.yaml) - -- [`pod-spec-from-previous-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-from-previous-step.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) - -- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`configMapKeyRef`|[`ConfigMapKeySelector`](#configmapkeyselector)|Selects a key of a ConfigMap.| -|`fieldRef`|[`ObjectFieldSelector`](#objectfieldselector)|Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.| -|`resourceFieldRef`|[`ResourceFieldSelector`](#resourcefieldselector)|Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.| -|`secretKeyRef`|[`SecretKeySelector`](#secretkeyselector)|Selects a key of a secret in the pod's namespace| - -## ConfigMapEnvSource - -ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the ConfigMap must be defined| - -## SecretEnvSource - -SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the Secret must be defined| - -## LifecycleHandler - -LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`exec`|[`ExecAction`](#execaction)|Exec specifies the action to take.| -|`httpGet`|[`HTTPGetAction`](#httpgetaction)|HTTPGet specifies the http request to perform.| -|`sleep`|[`SleepAction`](#sleepaction)|Sleep represents the duration that the container should sleep before being terminated.| -|`tcpSocket`|[`TCPSocketAction`](#tcpsocketaction)|Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.| - -## ExecAction - -ExecAction describes a "run in container" action. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`command`|`Array< string >`|Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.| - -## GRPCAction - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`port`|`integer`|Port number of the gRPC service. Number must be in the range 1 to 65535.| -|`service`|`string`|Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC.| - -## HTTPGetAction - -HTTPGetAction describes an action based on HTTP Get requests. - -
-Examples with this field (click to open) - -- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-nginx.yaml) - -- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) - -- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) - -- [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`host`|`string`|Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.| -|`httpHeaders`|`Array<`[`HTTPHeader`](#httpheader)`>`|Custom headers to set in the request. HTTP allows repeated headers.| -|`path`|`string`|Path to access on the HTTP server.| -|`port`|[`IntOrString`](#intorstring)|Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| -|`scheme`|`string`|Scheme to use for connecting to the host. Defaults to HTTP.| - -## TCPSocketAction - -TCPSocketAction describes an action based on opening a socket - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`host`|`string`|Optional: Host name to connect to, defaults to the pod IP.| -|`port`|[`IntOrString`](#intorstring)|Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| - -## ResourceClaim - -ResourceClaim references one entry in PodSpec.ResourceClaims. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.| - -## Quantity - -Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the "" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | "" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= "e" | "E" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as "1500m" - 1.5Gi will be serialized as "1536Mi" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. - -
-Examples with this field (click to open) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) -
- -## Capabilities - -Adds and removes POSIX capabilities from running containers. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`add`|`Array< string >`|Added capabilities| -|`drop`|`Array< string >`|Removed capabilities| - -## FieldsV1 - -FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. The exact format is defined in sigs.k8s.io/structured-merge-diff - -## PreferredSchedulingTerm - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`preference`|[`NodeSelectorTerm`](#nodeselectorterm)|A node selector term, associated with the corresponding weight.| -|`weight`|`integer`|Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.| - -## NodeSelector - -A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`nodeSelectorTerms`|`Array<`[`NodeSelectorTerm`](#nodeselectorterm)`>`|Required. A list of node selector terms. The terms are ORed.| - -## WeightedPodAffinityTerm - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`podAffinityTerm`|[`PodAffinityTerm`](#podaffinityterm)|Required. A pod affinity term, associated with the corresponding weight.| -|`weight`|`integer`|weight associated with matching the corresponding podAffinityTerm, in the range 1-100.| - -## PodAffinityTerm - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`labelSelector`|[`LabelSelector`](#labelselector)|A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.| -|`matchLabelKeys`|`Array< string >`|MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.| -|`mismatchLabelKeys`|`Array< string >`|MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.| -|`namespaceSelector`|[`LabelSelector`](#labelselector)|A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.| -|`namespaces`|`Array< string >`|namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".| -|`topologyKey`|`string`|This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.| - -## TypedLocalObjectReference - -TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiGroup`|`string`|APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.| -|`kind`|`string`|Kind is the type of resource being referenced| -|`name`|`string`|Name is the name of resource being referenced| - -## TypedObjectReference - -_No description available_ - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiGroup`|`string`|APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.| -|`kind`|`string`|Kind is the type of resource being referenced| -|`name`|`string`|Name is the name of resource being referenced| -|`namespace`|`string`|Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| - -## VolumeResourceRequirements - -VolumeResourceRequirements describes the storage resource requirements for a volume. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-output-artifact.yaml) - -- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci-workflowtemplate.yaml) - -- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/ci.yaml) - -- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dns-config.yaml) - -- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fun-with-gifs.yaml) - -- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) - -- [`pod-spec-patch-wf-tmpl.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-patch-wf-tmpl.yaml) - -- [`pod-spec-yaml-patch.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/pod-spec-yaml-patch.yaml) - -- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/volumes-pvc.yaml) - -- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`limits`|[`Quantity`](#quantity)|Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| - -## PersistentVolumeClaimCondition - -PersistentVolumeClaimCondition contains details about state of pvc - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`lastProbeTime`|[`Time`](#time)|lastProbeTime is the time we probed the condition.| -|`lastTransitionTime`|[`Time`](#time)|lastTransitionTime is the time the condition transitioned from one status to another.| -|`message`|`string`|message is the human-readable message indicating details about last transition.| -|`reason`|`string`|reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "Resizing" that means the underlying persistent volume is being resized.| -|`status`|`string`|_No description available_| -|`type`|`string`|_No description available_| - -## ModifyVolumeStatus - -ModifyVolumeStatus represents the status object of ControllerModifyVolume operation - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`status`|`string`|status is the status of the ControllerModifyVolume operation. It can be in any of following states: - Pending Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as the specified VolumeAttributesClass not existing. - InProgress InProgress indicates that the volume is being modified. - Infeasible Infeasible indicates that the request has been rejected as invalid by the CSI driver. To resolve the error, a valid VolumeAttributesClass needs to be specified. Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.| -|`targetVolumeAttributesClassName`|`string`|targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled| - -## KeyToPath - -Maps a string key to a path within a volume. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|key is the key to project.| -|`mode`|`integer`|mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`path`|`string`|path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.| - -## DownwardAPIVolumeFile - -DownwardAPIVolumeFile represents information to create the file containing the pod field - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`fieldRef`|[`ObjectFieldSelector`](#objectfieldselector)|Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.| -|`mode`|`integer`|Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`path`|`string`|Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'| -|`resourceFieldRef`|[`ResourceFieldSelector`](#resourcefieldselector)|Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.| - -## PersistentVolumeClaimTemplate - -PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`metadata`|[`ObjectMeta`](#objectmeta)|May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.| -|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.| - -## VolumeProjection - -Projection that may be projected along with other supported volume types - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`clusterTrustBundle`|[`ClusterTrustBundleProjection`](#clustertrustbundleprojection)|ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. Alpha, gated by the ClusterTrustBundleProjection feature gate. ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.| -|`configMap`|[`ConfigMapProjection`](#configmapprojection)|configMap information about the configMap data to project| -|`downwardAPI`|[`DownwardAPIProjection`](#downwardapiprojection)|downwardAPI information about the downwardAPI data to project| -|`secret`|[`SecretProjection`](#secretprojection)|secret information about the secret data to project| -|`serviceAccountToken`|[`ServiceAccountTokenProjection`](#serviceaccounttokenprojection)|serviceAccountToken is information about the serviceAccountToken data to project| - -## ObjectFieldSelector - -ObjectFieldSelector selects an APIVersioned field of an object. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`apiVersion`|`string`|Version of the schema the FieldPath is written in terms of, defaults to "v1".| -|`fieldPath`|`string`|Path of the field to select in the specified API version.| - -## ResourceFieldSelector - -ResourceFieldSelector represents container resources (cpu, memory) and their output format - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`containerName`|`string`|Container name: required for volumes, optional for env vars| -|`divisor`|[`Quantity`](#quantity)|Specifies the output format of the exposed resources, defaults to "1"| -|`resource`|`string`|Required: resource to select| - -## SleepAction - -SleepAction describes a "sleep" action. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`seconds`|`integer`|Seconds is the number of seconds to sleep.| - -## HTTPHeader - -HTTPHeader describes a custom header to be used in HTTP probes - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`name`|`string`|The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.| -|`value`|`string`|The header field value| - -## NodeSelectorTerm - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`matchExpressions`|`Array<`[`NodeSelectorRequirement`](#nodeselectorrequirement)`>`|A list of node selector requirements by node's labels.| -|`matchFields`|`Array<`[`NodeSelectorRequirement`](#nodeselectorrequirement)`>`|A list of node selector requirements by node's fields.| - -## ClusterTrustBundleProjection - -ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`labelSelector`|[`LabelSelector`](#labelselector)|Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything".| -|`name`|`string`|Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.| -|`optional`|`boolean`|If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.| -|`path`|`string`|Relative path from the volume root to write the bundle.| -|`signerName`|`string`|Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.| - -## ConfigMapProjection - -Adapts a ConfigMap into a projected volume. The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode. - -
-Examples with this field (click to open) - -- [`fibonacci-seq-conditional-param.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/fibonacci-seq-conditional-param.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| - -## DownwardAPIProjection - -Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`items`|`Array<`[`DownwardAPIVolumeFile`](#downwardapivolumefile)`>`|Items is a list of DownwardAPIVolume file| - -## SecretProjection - -Adapts a secret into a projected volume. The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode. - -
-Examples with this field (click to open) - -- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/buildkit-template.yaml) - -- [`secrets.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/secrets.yaml) -
- -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| -|`name`|`string`|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|optional field specify whether the Secret or its key must be defined| - -## ServiceAccountTokenProjection - -ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise). - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`audience`|`string`|audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.| -|`expirationSeconds`|`integer`|expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.| -|`path`|`string`|path is the path relative to the mount point of the file to project the token into.| - -## NodeSelectorRequirement - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - -### Fields -| Field Name | Field Type | Description | -|:----------:|:----------:|---------------| -|`key`|`string`|The label key that the selector applies to.| -|`operator`|`string`|Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.| -|`values`|`Array< string >`|An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.| +export SHELL:=bash +export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit + +# NOTE: Please ensure dependencies are synced with the flake.nix file in dev/nix/flake.nix before upgrading +# any external dependency. There is documentation on how to do this under the Developer Guide + +USE_NIX := false +# https://stackoverflow.com/questions/4122831/disable-make-builtin-rules-and-variables-from-inside-the-make-file +MAKEFLAGS += --no-builtin-rules +.SUFFIXES: + +# -- build metadata +BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') +# below 3 are copied verbatim to release.yaml +GIT_COMMIT := $(shell git rev-parse HEAD || echo unknown) +GIT_TAG := $(shell git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged) +GIT_TREE_STATE := $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) +GIT_REMOTE := origin +GIT_BRANCH := $(shell git rev-parse --symbolic-full-name --verify --quiet --abbrev-ref HEAD) +RELEASE_TAG := $(shell if [[ "$(GIT_TAG)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.*$$ ]]; then echo "true"; else echo "false"; fi) +DEV_BRANCH := $(shell [ "$(GIT_BRANCH)" = main ] || [ `echo $(GIT_BRANCH) | cut -c -8` = release- ] || [ `echo $(GIT_BRANCH) | cut -c -4` = dev- ] || [ $(RELEASE_TAG) = true ] && echo false || echo true) +SRC := $(GOPATH)/src/github.com/argoproj/argo-workflows +VERSION := latest +# VERSION is the version to be used for files in manifests and should always be latest unless we are releasing +# we assume HEAD means you are on a tag +ifeq ($(RELEASE_TAG),true) +VERSION := $(GIT_TAG) +endif + +# -- docker image publishing options +IMAGE_NAMESPACE ?= quay.io/argoproj +DOCKER_PUSH ?= false +TARGET_PLATFORM ?= linux/$(shell go env GOARCH) +K3D_CLUSTER_NAME ?= k3s-default # declares which cluster to import to in case it's not the default name + +# -- test options +E2E_WAIT_TIMEOUT ?= 90s # timeout for wait conditions +E2E_PARALLEL ?= 20 +E2E_SUITE_TIMEOUT ?= 15m +GOTEST ?= go test -v -p 20 + +# should we build the static files? +ifneq (,$(filter $(MAKECMDGOALS),codegen lint test docs start)) +STATIC_FILES := false +else +STATIC_FILES ?= $(shell [ $(DEV_BRANCH) = true ] && echo false || echo true) +endif + +# -- install & run options +PROFILE ?= minimal +KUBE_NAMESPACE ?= argo # namespace where Kubernetes resources/RBAC will be installed +PLUGINS ?= $(shell [ $PROFILE = plugins ] && echo false || echo true) +UI ?= false # start the UI +API ?= $(UI) # start the Argo Server +TASKS := controller +ifeq ($(API),true) +TASKS := controller server +endif +ifeq ($(UI),true) +TASKS := controller server ui +endif +# Which mode to run in: +# * `local` run the workflow–controller and argo-server as single replicas on the local machine (default) +# * `kubernetes` run the workflow-controller and argo-server on the Kubernetes cluster +RUN_MODE := local +KUBECTX := $(shell [[ "`which kubectl`" != '' ]] && kubectl config current-context || echo none) +DOCKER_DESKTOP := $(shell [[ "$(KUBECTX)" == "docker-desktop" ]] && echo true || echo false) +K3D := $(shell [[ "$(KUBECTX)" == "k3d-"* ]] && echo true || echo false) +ifeq ($(PROFILE),prometheus) +RUN_MODE := kubernetes +endif +ifeq ($(PROFILE),stress) +RUN_MODE := kubernetes +endif + +# -- controller + server + executor env vars +LOG_LEVEL := debug +UPPERIO_DB_DEBUG := 0 +DEFAULT_REQUEUE_TIME ?= 1s # by keeping this short we speed up tests +ALWAYS_OFFLOAD_NODE_STATUS := false +POD_STATUS_CAPTURE_FINALIZER ?= true +NAMESPACED := true +MANAGED_NAMESPACE ?= $(KUBE_NAMESPACE) +SECURE := false # whether or not to start Argo in TLS mode +AUTH_MODE := hybrid +ifeq ($(PROFILE),sso) +AUTH_MODE := sso +endif + +$(info GIT_COMMIT=$(GIT_COMMIT) GIT_BRANCH=$(GIT_BRANCH) GIT_TAG=$(GIT_TAG) GIT_TREE_STATE=$(GIT_TREE_STATE) RELEASE_TAG=$(RELEASE_TAG) DEV_BRANCH=$(DEV_BRANCH) VERSION=$(VERSION)) +$(info KUBECTX=$(KUBECTX) DOCKER_DESKTOP=$(DOCKER_DESKTOP) K3D=$(K3D) DOCKER_PUSH=$(DOCKER_PUSH) TARGET_PLATFORM=$(TARGET_PLATFORM)) +$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED)) + +override LDFLAGS += \ + -X github.com/argoproj/argo-workflows/v3.version=$(VERSION) \ + -X github.com/argoproj/argo-workflows/v3.buildDate=$(BUILD_DATE) \ + -X github.com/argoproj/argo-workflows/v3.gitCommit=$(GIT_COMMIT) \ + -X github.com/argoproj/argo-workflows/v3.gitTreeState=$(GIT_TREE_STATE) + +ifneq ($(GIT_TAG),) +override LDFLAGS += -X github.com/argoproj/argo-workflows/v3.gitTag=${GIT_TAG} +endif + +ifndef $(GOPATH) + GOPATH=$(shell go env GOPATH) + export GOPATH +endif + +# -- file lists +HACK_PKG_FILES_AS_PKGS ?= false +ifeq ($(HACK_PKG_FILES_AS_PKGS),false) + ARGOEXEC_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) + CLI_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) + CONTROLLER_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-) +else +# Building argoexec on windows cannot rebuild the openapi, we need to fall back to the old +# behaviour where we fake dependencies and therefore don't rebuild + ARGOEXEC_PKG_FILES := $(shell echo cmd/argoexec && go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) + CLI_PKG_FILES := $(shell echo cmd/argo && go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) + CONTROLLER_PKG_FILES := $(shell echo cmd/workflow-controller && go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) +endif + +TYPES := $(shell find pkg/apis/workflow/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go') +CRDS := $(shell find manifests/base/crds -type f -name 'argoproj.io_*.yaml') +SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \ + pkg/apiclient/_.secondary.swagger.json \ + pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ + pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ + pkg/apiclient/event/event.swagger.json \ + pkg/apiclient/eventsource/eventsource.swagger.json \ + pkg/apiclient/info/info.swagger.json \ + pkg/apiclient/sensor/sensor.swagger.json \ + pkg/apiclient/workflow/workflow.swagger.json \ + pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ + pkg/apiclient/workflowtemplate/workflow-template.swagger.json +PROTO_BINARIES := $(GOPATH)/bin/protoc-gen-gogo $(GOPATH)/bin/protoc-gen-gogofast $(GOPATH)/bin/goimports $(GOPATH)/bin/protoc-gen-grpc-gateway $(GOPATH)/bin/protoc-gen-swagger /usr/local/bin/clang-format + +# protoc,my.proto +define protoc + # protoc $(1) + [ -e ./vendor ] || go mod vendor + protoc \ + -I /usr/local/include \ + -I $(CURDIR) \ + -I $(CURDIR)/vendor \ + -I $(GOPATH)/src \ + -I $(GOPATH)/pkg/mod/github.com/gogo/protobuf@v1.3.2/gogoproto \ + -I $(GOPATH)/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ + --gogofast_out=plugins=grpc:$(GOPATH)/src \ + --grpc-gateway_out=logtostderr=true:$(GOPATH)/src \ + --swagger_out=logtostderr=true,fqn_for_swagger_name=true:. \ + $(1) + perl -i -pe 's|argoproj/argo-workflows/|argoproj/argo-workflows/v3/|g' `echo "$(1)" | sed 's/proto/pb.go/g'` + +endef + +# cli + +.PHONY: cli +cli: dist/argo + +ui/dist/app/index.html: $(shell find ui/src -type f && find ui -maxdepth 1 -type f) + # `yarn install` is fast (~2s), so you can call it safely. + JOBS=max yarn --cwd ui install + # `yarn build` is slow, so we guard it with a up-to-date check. + JOBS=max yarn --cwd ui build + +$(GOPATH)/bin/staticfiles: Makefile +# update this in Nix when updating it here +ifneq ($(USE_NIX), true) + go install bou.ke/staticfiles@dd04075 +endif + +ifeq ($(STATIC_FILES),true) +server/static/files.go: $(GOPATH)/bin/staticfiles ui/dist/app/index.html + # Pack UI into a Go file + $(GOPATH)/bin/staticfiles -o server/static/files.go ui/dist/app +else +server/static/files.go: + # Building without static files + cp ./server/static/files.go.stub ./server/static/files.go +endif + +dist/argo-linux-amd64: GOARGS = GOOS=linux GOARCH=amd64 +dist/argo-linux-arm64: GOARGS = GOOS=linux GOARCH=arm64 +dist/argo-linux-ppc64le: GOARGS = GOOS=linux GOARCH=ppc64le +dist/argo-linux-riscv64: GOARGS = GOOS=linux GOARCH=riscv64 +dist/argo-linux-s390x: GOARGS = GOOS=linux GOARCH=s390x +dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64 +dist/argo-darwin-arm64: GOARGS = GOOS=darwin GOARCH=arm64 +dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64 + +dist/argo-windows-%.gz: dist/argo-windows-% + gzip --force --keep dist/argo-windows-$*.exe + +dist/argo-windows-%: server/static/files.go $(CLI_PKG_FILES) go.sum + CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo + +dist/argo-%.gz: dist/argo-% + gzip --force --keep dist/argo-$* + +dist/argo-%: server/static/files.go $(CLI_PKG_FILES) go.sum + CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo + +dist/argo: server/static/files.go $(CLI_PKG_FILES) go.sum +ifeq ($(shell uname -s),Darwin) + # if local, then build fast: use CGO and dynamic-linking + go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS}' -o $@ ./cmd/argo +else + CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo +endif + +argocli-image: + +.PHONY: clis +clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-riscv64.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-darwin-arm64.gz dist/argo-windows-amd64.gz + +# controller + +.PHONY: controller +controller: dist/workflow-controller + +dist/workflow-controller: $(CONTROLLER_PKG_FILES) go.sum +ifeq ($(shell uname -s),Darwin) + # if local, then build fast: use CGO and dynamic-linking + go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller +else + CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller +endif + +workflow-controller-image: + +# argoexec + +dist/argoexec: $(ARGOEXEC_PKG_FILES) go.sum +ifeq ($(shell uname -s),Darwin) + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec +else + CGO_ENABLED=0 go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec +endif + +argoexec-image: + +%-image: + [ ! -e dist/$* ] || mv dist/$* . + docker buildx build \ + --platform $(TARGET_PLATFORM) \ + --build-arg GIT_COMMIT=$(GIT_COMMIT) \ + --build-arg GIT_TAG=$(GIT_TAG) \ + --build-arg GIT_TREE_STATE=$(GIT_TREE_STATE) \ + -t $(IMAGE_NAMESPACE)/$*:$(VERSION) \ + --target $* \ + --load \ + . + [ ! -e $* ] || mv $* dist/ + docker run --rm -t $(IMAGE_NAMESPACE)/$*:$(VERSION) version + if [ $(K3D) = true ]; then k3d image import -c $(K3D_CLUSTER_NAME) $(IMAGE_NAMESPACE)/$*:$(VERSION); fi + if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then docker push $(IMAGE_NAMESPACE)/$*:$(VERSION) ; fi + +.PHONY: codegen +codegen: types swagger manifests $(GOPATH)/bin/mockery docs/fields.md docs/cli/argo.md + go generate ./... + make --directory sdks/java USE_NIX=$(USE_NIX) generate + make --directory sdks/python USE_NIX=$(USE_NIX) generate + +.PHONY: check-pwd +check-pwd: + +ifneq ($(SRC),$(PWD)) + @echo "⚠️ Code generation will not work if code in not checked out into $(SRC)" >&2 +endif + +.PHONY: types +types: check-pwd pkg/apis/workflow/v1alpha1/generated.proto pkg/apis/workflow/v1alpha1/openapi_generated.go pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go + +.PHONY: swagger +swagger: \ + pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ + pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ + pkg/apiclient/event/event.swagger.json \ + pkg/apiclient/eventsource/eventsource.swagger.json \ + pkg/apiclient/info/info.swagger.json \ + pkg/apiclient/sensor/sensor.swagger.json \ + pkg/apiclient/workflow/workflow.swagger.json \ + pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ + pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ + manifests/base/crds/full/argoproj.io_workflows.yaml \ + manifests \ + api/openapi-spec/swagger.json \ + api/jsonschema/schema.json + + +$(GOPATH)/bin/mockery: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/vektra/mockery/v2@v2.42.2 +endif +$(GOPATH)/bin/controller-gen: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0 +endif +$(GOPATH)/bin/go-to-protobuf: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + # TODO: currently fails on v0.30.3 with + # Unable to clean package k8s.io.api.core.v1: remove /home/runner/go/pkg/mod/k8s.io/api@v0.30.3/core/v1/generated.proto: permission denied + go install k8s.io/code-generator/cmd/go-to-protobuf@v0.21.5 +endif +$(GOPATH)/src/github.com/gogo/protobuf: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + [ -e $@ ] || git clone --depth 1 https://github.com/gogo/protobuf.git -b v1.3.2 $@ +endif +$(GOPATH)/bin/protoc-gen-gogo: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/gogo/protobuf/protoc-gen-gogo@v1.3.2 +endif +$(GOPATH)/bin/protoc-gen-gogofast: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/gogo/protobuf/protoc-gen-gogofast@v1.3.2 +endif +$(GOPATH)/bin/protoc-gen-grpc-gateway: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0 +endif +$(GOPATH)/bin/protoc-gen-swagger: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.16.0 +endif +$(GOPATH)/bin/openapi-gen: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20220124234850-424119656bbf +endif +$(GOPATH)/bin/swagger: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 +endif +$(GOPATH)/bin/goimports: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + go install golang.org/x/tools/cmd/goimports@v0.1.7 +endif + +/usr/local/bin/clang-format: +ifeq (, $(shell which clang-format)) +ifeq ($(shell uname),Darwin) + brew install clang-format +else + sudo apt update + sudo apt install -y clang-format +endif +endif + +pkg/apis/workflow/v1alpha1/generated.proto: $(GOPATH)/bin/go-to-protobuf $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf + # These files are generated on a v3/ folder by the tool. Link them to the root folder + [ -e ./v3 ] || ln -s . v3 + # Format proto files. Formatting changes generated code, so we do it here, rather that at lint time. + # Why clang-format? Google uses it. + find pkg/apiclient -name '*.proto'|xargs clang-format -i + $(GOPATH)/bin/go-to-protobuf \ + --go-header-file=./hack/custom-boilerplate.go.txt \ + --packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ + --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1 \ + --proto-import $(GOPATH)/src + # Delete the link + [ -e ./v3 ] && rm -rf v3 + touch pkg/apis/workflow/v1alpha1/generated.proto + +# this target will also create a .pb.go and a .pb.gw.go file, but in Make 3 we cannot use _grouped target_, instead we must choose +# on file to represent all of them +pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto + $(call protoc,pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto) + +pkg/apiclient/cronworkflow/cron-workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/cronworkflow/cron-workflow.proto + $(call protoc,pkg/apiclient/cronworkflow/cron-workflow.proto) + +pkg/apiclient/event/event.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/event/event.proto + $(call protoc,pkg/apiclient/event/event.proto) + +pkg/apiclient/eventsource/eventsource.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/eventsource/eventsource.proto + $(call protoc,pkg/apiclient/eventsource/eventsource.proto) + +pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/info/info.proto + $(call protoc,pkg/apiclient/info/info.proto) + +pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto + $(call protoc,pkg/apiclient/sensor/sensor.proto) + +pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto + $(call protoc,pkg/apiclient/workflow/workflow.proto) + +pkg/apiclient/workflowarchive/workflow-archive.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowarchive/workflow-archive.proto + $(call protoc,pkg/apiclient/workflowarchive/workflow-archive.proto) + +pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowtemplate/workflow-template.proto + $(call protoc,pkg/apiclient/workflowtemplate/workflow-template.proto) + +# generate other files for other CRDs +manifests/base/crds/full/argoproj.io_workflows.yaml: $(GOPATH)/bin/controller-gen $(TYPES) ./hack/manifests/crdgen.sh ./hack/manifests/crds.go + ./hack/manifests/crdgen.sh + +.PHONY: manifests +manifests: \ + manifests/install.yaml \ + manifests/namespace-install.yaml \ + manifests/quick-start-minimal.yaml \ + manifests/quick-start-mysql.yaml \ + manifests/quick-start-postgres.yaml \ + dist/manifests/install.yaml \ + dist/manifests/namespace-install.yaml \ + dist/manifests/quick-start-minimal.yaml \ + dist/manifests/quick-start-mysql.yaml \ + dist/manifests/quick-start-postgres.yaml + +.PHONY: manifests/install.yaml +manifests/install.yaml: /dev/null + kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/cluster-install | ./hack/manifests/auto-gen-msg.sh > manifests/install.yaml + +.PHONY: manifests/namespace-install.yaml +manifests/namespace-install.yaml: /dev/null + kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/namespace-install | ./hack/manifests/auto-gen-msg.sh > manifests/namespace-install.yaml + +.PHONY: manifests/quick-start-minimal.yaml +manifests/quick-start-minimal.yaml: /dev/null + kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/minimal | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-minimal.yaml + +.PHONY: manifests/quick-start-mysql.yaml +manifests/quick-start-mysql.yaml: /dev/null + kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/mysql | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-mysql.yaml + +.PHONY: manifests/quick-start-postgres.yaml +manifests/quick-start-postgres.yaml: /dev/null + kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/postgres | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-postgres.yaml + +dist/manifests/%: manifests/% + @mkdir -p dist/manifests + sed 's/:latest/:$(VERSION)/' manifests/$* > $@ + +# lint/test/etc + +$(GOPATH)/bin/golangci-lint: Makefile + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.61.0 + +.PHONY: lint +lint: server/static/files.go $(GOPATH)/bin/golangci-lint + rm -Rf v3 vendor + # If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate. + # * Change `woc.wf.Spec` to `woc.execWf.Spec`. + # * Change `woc.execWf.Status` to `woc.wf.Status`. + @awk '(/woc.wf.Spec/ || /woc.execWf.Status/) && !/not-woc-misuse/ {print FILENAME ":" FNR "\t" $0 ; exit 1}' $(shell find workflow/controller -type f -name '*.go' -not -name '*test*') + # Tidy Go modules + go mod tidy + # Lint Go files + $(GOPATH)/bin/golangci-lint run --fix --verbose + # Lint the UI + if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi + # Deduplicate Node modules + if [ -e ui/node_modules ]; then yarn --cwd ui deduplicate ; fi + +# for local we have a faster target that prints to stdout, does not use json, and can cache because it has no coverage +.PHONY: test +test: server/static/files.go + go build ./... + env KUBECONFIG=/dev/null $(GOTEST) ./... + # marker file, based on it's modification time, we know how long ago this target was run + @mkdir -p dist + touch dist/test + +.PHONY: install +install: githooks + kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE) + kubectl config set-context --current --namespace=$(KUBE_NAMESPACE) + @echo "installing PROFILE=$(PROFILE)" + kubectl kustomize --load-restrictor=LoadRestrictionsNone test/e2e/manifests/$(PROFILE) | sed 's|quay.io/argoproj/|$(IMAGE_NAMESPACE)/|' | sed 's/namespace: argo/namespace: $(KUBE_NAMESPACE)/' | kubectl -n $(KUBE_NAMESPACE) apply --prune -l app.kubernetes.io/part-of=argo -f - +ifeq ($(PROFILE),stress) + kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml +endif +ifeq ($(RUN_MODE),kubernetes) + kubectl -n $(KUBE_NAMESPACE) scale deploy/workflow-controller --replicas 1 + kubectl -n $(KUBE_NAMESPACE) scale deploy/argo-server --replicas 1 +endif + +.PHONY: argosay +argosay: +ifeq ($(DOCKER_PUSH),true) + cd test/e2e/images/argosay/v2 && \ + docker buildx build \ + --platform linux/amd64,linux/arm64 \ + -t argoproj/argosay:v2 \ + --push \ + . +else + cd test/e2e/images/argosay/v2 && \ + docker build . -t argoproj/argosay:v2 +endif +ifeq ($(K3D),true) + k3d image import -c $(K3D_CLUSTER_NAME) argoproj/argosay:v2 +endif + +.PHONY: argosayv1 +argosayv1: +ifeq ($(DOCKER_PUSH),true) + cd test/e2e/images/argosay/v1 && \ + docker buildx build \ + --platform linux/amd64,linux/arm64 \ + -t argoproj/argosay:v1 \ + --push \ + . +else + cd test/e2e/images/argosay/v1 && \ + docker build . -t argoproj/argosay:v1 +endif + +dist/argosay: + mkdir -p dist + cp test/e2e/images/argosay/v2/argosay dist/ + +.PHONY: kit +kit: Makefile +ifeq ($(shell command -v kit),) +ifeq ($(shell uname),Darwin) + brew tap kitproj/kit --custom-remote https://github.com/kitproj/kit + brew install kit +else + curl -q https://raw.githubusercontent.com/kitproj/kit/main/install.sh | tag=v0.1.8 sh +endif +endif + + +.PHONY: start +ifeq ($(RUN_MODE),local) +ifeq ($(API),true) +start: install controller kit cli +else +start: install controller kit +endif +else +start: install kit +endif + @echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE)" +ifneq ($(API),true) + @echo "⚠️️ not starting API. If you want to test the API, use 'make start API=true' to start it" +endif +ifneq ($(UI),true) + @echo "⚠️ not starting UI. If you want to test the UI, run 'make start UI=true' to start it" +endif +ifneq ($(PLUGINS),true) + @echo "⚠️ not starting plugins. If you want to test plugins, run 'make start PROFILE=plugins' to start it" +endif + # Check dex, minio, postgres and mysql are in hosts file +ifeq ($(AUTH_MODE),sso) + grep '127.0.0.1.*dex' /etc/hosts +endif + # grep '127.0.0.1.*azurite' /etc/hosts + # grep '127.0.0.1.*minio' /etc/hosts + # grep '127.0.0.1.*postgres' /etc/hosts + # grep '127.0.0.1.*mysql' /etc/hosts +ifeq ($(RUN_MODE),local) + env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) +endif + +.PHONY: wait +wait: + # Wait for workflow controller + until lsof -i :9090 > /dev/null ; do sleep 10s ; done +ifeq ($(API),true) + # Wait for Argo Server + until lsof -i :2746 > /dev/null ; do sleep 10s ; done +endif +ifeq ($(PROFILE),mysql) + # Wait for MySQL + until (: < /dev/tcp/localhost/3306) ; do sleep 10s ; done +endif + +.PHONY: postgres-cli +postgres-cli: + kubectl exec -ti `kubectl get pod -l app=postgres -o name|cut -c 5-` -- psql -U postgres + +.PHONY: mysql-cli +mysql-cli: + kubectl exec -ti `kubectl get pod -l app=mysql -o name|cut -c 5-` -- mysql -u mysql -ppassword argo + +test-cli: ./dist/argo + +test-%: + E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $* -parallel $(E2E_PARALLEL) ./test/e2e + +.PHONY: test-examples +test-examples: + ./hack/test-examples.sh + +.PHONY: test-%-sdk +test-%-sdk: + make --directory sdks/$* install test -B + +Test%: + E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags api,cli,cron,executor,examples,corefunctional,functional,plugins -parallel $(E2E_PARALLEL) ./test/e2e -run='.*/$*' + + +# clean + +.PHONY: clean +clean: + go clean + rm -Rf test-results node_modules vendor v2 v3 argoexec-linux-amd64 dist/* ui/dist + +# swagger + +pkg/apis/workflow/v1alpha1/openapi_generated.go: $(GOPATH)/bin/openapi-gen $(TYPES) + # These files are generated on a v3/ folder by the tool. Link them to the root folder + [ -e ./v3 ] || ln -s . v3 + $(GOPATH)/bin/openapi-gen \ + --go-header-file ./hack/custom-boilerplate.go.txt \ + --input-dirs github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ + --output-package github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ + --report-filename pkg/apis/api-rules/violation_exceptions.list + # Force the timestamp to be up to date + touch $@ + # Delete the link + [ -e ./v3 ] && rm -rf v3 + + +# generates many other files (listers, informers, client etc). +pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(GOPATH)/bin/go-to-protobuf $(TYPES) + # These files are generated on a v3/ folder by the tool. Link them to the root folder + [ -e ./v3 ] || ln -s . v3 + bash $(GOPATH)/pkg/mod/k8s.io/code-generator@v0.21.5/generate-groups.sh \ + "deepcopy,client,informer,lister" \ + github.com/argoproj/argo-workflows/v3/pkg/client github.com/argoproj/argo-workflows/v3/pkg/apis \ + workflow:v1alpha1 \ + --go-header-file ./hack/custom-boilerplate.go.txt + # Force the timestamp to be up to date + touch $@ + # Delete the link + [ -e ./v3 ] && rm -rf v3 + +dist/kubernetes.swagger.json: Makefile + @mkdir -p dist + # recurl will only fetch if the file doesn't exist, so delete it + rm -f $@ + ./hack/recurl.sh $@ https://raw.githubusercontent.com/kubernetes/kubernetes/v1.30.3/api/openapi-spec/swagger.json + +pkg/apiclient/_.secondary.swagger.json: hack/api/swagger/secondaryswaggergen.go pkg/apis/workflow/v1alpha1/openapi_generated.go dist/kubernetes.swagger.json + rm -Rf v3 vendor + # We have `hack/api/swagger` so that most hack script do not depend on the whole code base and are therefore slow. + go run ./hack/api/swagger secondaryswaggergen + +# we always ignore the conflicts, so lets automated figuring out how many there will be and just use that +dist/swagger-conflicts: $(GOPATH)/bin/swagger $(SWAGGER_FILES) + swagger mixin $(SWAGGER_FILES) 2>&1 | grep -c skipping > dist/swagger-conflicts || true + +dist/mixed.swagger.json: $(GOPATH)/bin/swagger $(SWAGGER_FILES) dist/swagger-conflicts + swagger mixin -c $(shell cat dist/swagger-conflicts) $(SWAGGER_FILES) -o dist/mixed.swagger.json + +dist/swaggifed.swagger.json: dist/mixed.swagger.json hack/api/swagger/swaggify.sh + cat dist/mixed.swagger.json | ./hack/api/swagger/swaggify.sh > dist/swaggifed.swagger.json + +dist/kubeified.swagger.json: dist/swaggifed.swagger.json dist/kubernetes.swagger.json + go run ./hack/api/swagger kubeifyswagger dist/swaggifed.swagger.json dist/kubeified.swagger.json + +dist/swagger.0.json: $(GOPATH)/bin/swagger dist/kubeified.swagger.json + swagger flatten --with-flatten minimal --with-flatten remove-unused dist/kubeified.swagger.json -o dist/swagger.0.json + +api/openapi-spec/swagger.json: $(GOPATH)/bin/swagger dist/swagger.0.json + swagger flatten --with-flatten remove-unused dist/swagger.0.json -o api/openapi-spec/swagger.json + +api/jsonschema/schema.json: api/openapi-spec/swagger.json hack/api/jsonschema/main.go + go run ./hack/api/jsonschema + +go-diagrams/diagram.dot: ./hack/docs/diagram.go + rm -Rf go-diagrams + go run ./hack/docs diagram + +docs/assets/diagram.png: go-diagrams/diagram.dot + cd go-diagrams && dot -Tpng diagram.dot -o ../docs/assets/diagram.png + +docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) hack/docs/fields.go + env ARGO_SECURE=false ARGO_INSECURE_SKIP_VERIFY=false ARGO_SERVER= ARGO_INSTANCEID= go run ./hack/docs fields + +# generates several other files +docs/cli/argo.md: $(CLI_PKG_FILES) go.sum server/static/files.go hack/docs/cli.go + go run ./hack/docs cli + +# docs + +/usr/local/bin/mdspell: Makefile +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + npm list -g markdown-spellcheck@1.3.1 > /dev/null || npm i -g markdown-spellcheck@1.3.1 +endif + +.PHONY: docs-spellcheck +docs-spellcheck: /usr/local/bin/mdspell + # check docs for spelling mistakes + mdspell --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*') + # alphabetize spelling file -- ignore first line (comment), then sort the rest case-sensitive and remove duplicates + $(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "LC_COLLATE=C sort" }' | uniq | tee .spelling > /dev/null) + +/usr/local/bin/markdown-link-check: +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + npm list -g markdown-link-check@3.11.1 > /dev/null || npm i -g markdown-link-check@3.11.1 +endif + +.PHONY: docs-linkcheck +docs-linkcheck: /usr/local/bin/markdown-link-check + # check docs for broken links + markdown-link-check -q -c .mlc_config.json $(shell find docs -name '*.md' -not -name fields.md -not -name executor_swagger.md) + +/usr/local/bin/markdownlint: +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + npm list -g markdownlint-cli@0.33.0 > /dev/null || npm i -g markdownlint-cli@0.33.0 +endif + + +.PHONY: docs-lint +docs-lint: /usr/local/bin/markdownlint + # lint docs + markdownlint docs --fix --ignore docs/fields.md --ignore docs/executor_swagger.md --ignore docs/cli --ignore docs/walk-through/the-structure-of-workflow-specs.md + +/usr/local/bin/mkdocs: +# update this in Nix when upgrading it here +ifneq ($(USE_NIX), true) + python -m pip install --no-cache-dir -r docs/requirements.txt +endif + +.PHONY: docs +docs: /usr/local/bin/mkdocs \ + docs-spellcheck \ + docs-lint \ + # TODO: This is temporarily disabled to unblock merging PRs. + # docs-linkcheck + # copy README.md to docs/README.md + ./hack/docs/copy-readme.sh + # check environment-variables.md contains all variables mentioned in the code + ./hack/docs/check-env-doc.sh + # build the docs + TZ=UTC mkdocs build --strict + # tell the user the fastest way to edit docs + @echo "ℹ️ If you want to preview your docs, open site/index.html. If you want to edit them with hot-reload, run 'make docs-serve' to start mkdocs on port 8000" + +.PHONY: docs-serve +docs-serve: docs + mkdocs serve + +# pre-commit checks + +.git/hooks/%: hack/git/hooks/% + @mkdir -p .git/hooks + cp hack/git/hooks/$* .git/hooks/$* + +.PHONY: githooks +githooks: .git/hooks/pre-commit .git/hooks/commit-msg + +.PHONY: pre-commit +pre-commit: codegen lint docs + # marker file, based on it's modification time, we know how long ago this target was run + touch dist/pre-commit + +# release + +release-notes: /dev/null + version=$(VERSION) envsubst '$$version' < hack/release-notes.md > release-notes + +.PHONY: checksums +checksums: + sha256sum ./dist/argo-*.gz | awk -F './dist/' '{print $$1 $$2}' > ./dist/argo-workflows-cli-checksums.txt diff --git a/docs/fields.md b/docs/fields.md index a34f743aace1..91abfa490597 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -87,6 +87,8 @@ Workflow is the definition of a workflow resource - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -315,6 +317,8 @@ Workflow is the definition of a workflow resource - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -535,6 +539,8 @@ WorkflowSpec is the specification of a Workflow. - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -755,6 +761,8 @@ WorkflowSpec is the specification of a Workflow. - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -986,6 +994,8 @@ CronWorkflowSpec is the specification of a CronWorkflow - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -1206,6 +1216,8 @@ CronWorkflowSpec is the specification of a CronWorkflow - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -1344,6 +1356,8 @@ Arguments to a template - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -1460,6 +1474,8 @@ Arguments to a template - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) @@ -1620,6 +1636,8 @@ RetryStrategy provides controls on how to retry a workflow step - [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) @@ -1636,6 +1654,8 @@ RetryStrategy provides controls on how to retry a workflow step - [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) @@ -1657,6 +1677,10 @@ Synchronization holds synchronization lock configuration
Examples with this field (click to open) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) @@ -1977,6 +2001,10 @@ SynchronizationStatus stores the status of semaphore and mutex.
Examples with this field (click to open) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - [`synchronization-mutex-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level.yaml) @@ -2158,6 +2186,8 @@ Parameter indicate a passed string parameter to a service template with an optio - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -2272,6 +2302,8 @@ Parameter indicate a passed string parameter to a service template with an optio - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) @@ -2397,6 +2429,10 @@ Mutex holds Mutex configuration
Examples with this field (click to open) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) @@ -2496,6 +2532,8 @@ DAGTemplate is a template subtype for directed acyclic graph templates - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -2587,6 +2625,8 @@ _No description available_ - [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) @@ -2607,6 +2647,8 @@ _No description available_ - [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml)
@@ -2708,6 +2750,8 @@ Inputs are the mechanism for passing parameters, artifacts, volumes from one tem - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -2830,6 +2874,8 @@ Inputs are the mechanism for passing parameters, artifacts, volumes from one tem - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) - [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/suspend-template-outputs.yaml) @@ -3144,6 +3190,8 @@ WorkflowStep is a reference to a template to execute in a series of step - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -3289,6 +3337,10 @@ MutexStatus contains which objects hold mutex locks, and which objects this work
Examples with this field (click to open) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`synchronization-mutex-tmpl-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-tmpl-level-legacy.yaml) - [`synchronization-mutex-wf-level-legacy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/synchronization-mutex-wf-level-legacy.yaml) @@ -3486,6 +3538,8 @@ HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a - [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`http-hello-world.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/http-hello-world.yaml) @@ -3506,6 +3560,8 @@ HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a - [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/sidecar.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/webhdfs-input-output-artifacts.yaml)
@@ -3830,6 +3886,8 @@ ContainerSetRetryStrategy provides controls on how to retry a container set - [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) @@ -3846,6 +3904,8 @@ ContainerSetRetryStrategy provides controls on how to retry a container set - [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) @@ -3884,6 +3944,8 @@ DAGTask represents a node in the graph during DAG execution - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -4156,10 +4218,14 @@ Sequence expands a workflow step into numeric range - [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cron-backfill.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`handle-large-output-results.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/handle-large-output-results.yaml) - [`loops-sequence.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/loops-sequence.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`withsequence-nested-result.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/withsequence-nested-result.yaml) - [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/work-avoidance.yaml) @@ -4696,6 +4762,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -4916,6 +4984,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -5234,6 +5304,8 @@ _No description available_ - [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/cluster-workflow-template/clustertemplates.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-disable-failFast.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-disable-failFast.yaml) - [`retry-backoff.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-backoff.yaml) @@ -5248,6 +5320,8 @@ _No description available_ - [`retry-with-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/retry-with-steps.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`template-defaults.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/template-defaults.yaml) - [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/workflow-template/templates.yaml) @@ -5320,6 +5394,8 @@ A single application container that you want to run within a pod. - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -5510,6 +5586,8 @@ A single application container that you want to run within a pod. - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -6058,6 +6136,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -6278,6 +6358,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml) @@ -6890,9 +6972,13 @@ HTTPGetAction describes an action based on HTTP Get requests. - [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/daemon-step.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/influxdb-ci.yaml) + +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml)
### Fields From 53eca43d722aba27b8c980a05d61a94f157cc128 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 17:52:34 +0300 Subject: [PATCH 14/50] fix(docs): codegen Signed-off-by: MenD32 --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 14be3b0849c7..f3acdfd3c550 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - # grep '127.0.0.1.*azurite' /etc/hosts - # grep '127.0.0.1.*minio' /etc/hosts - # grep '127.0.0.1.*postgres' /etc/hosts - # grep '127.0.0.1.*mysql' /etc/hosts + grep '127.0.0.1.*azurite' /etc/hosts + grep '127.0.0.1.*minio' /etc/hosts + grep '127.0.0.1.*postgres' /etc/hosts + grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif From a0b2aa9f7cb405b3a8fa015d846a8758d19ebee5 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 13 Oct 2024 22:45:01 +0300 Subject: [PATCH 15/50] feat(controller): daemon retries now works on dag Signed-off-by: MenD32 --- workflow/controller/dag.go | 13 ++++++++++++- workflow/controller/operator.go | 4 ---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 8f708eb02d05..600cc73a0d06 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -263,6 +263,17 @@ func (woc *wfOperationCtx) executeDAG(ctx context.Context, nodeName string, tmpl targetTasks = strings.Split(tmpl.DAG.Target, " ") } + // pre-execute daemoned tasks + for _, task := range tmpl.DAG.Tasks { + taskNode := dagCtx.getTaskNode(task.Name) + if err != nil { + continue + } + if taskNode != nil && taskNode.IsDaemoned() { + woc.executeDAGTask(ctx, dagCtx, task.Name) + } + } + // kick off execution of each target task asynchronously onExitCompleted := true for _, taskName := range targetTasks { @@ -429,7 +440,7 @@ func (woc *wfOperationCtx) executeDAGTask(ctx context.Context, dagCtx *dagContex } } - if node != nil && node.Fulfilled() { + if node != nil && node.Phase.Fulfilled() { // Collect the completed task metrics _, tmpl, _, tmplErr := dagCtx.tmplCtx.ResolveTemplate(task) if tmplErr != nil { diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 07a7dbbfc2e8..8324e404ca6a 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -2113,10 +2113,6 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, retryParentNode = processedRetryParentNode childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) - for i := 0; i < 2000000; i++ { - woc.log.Debugf("A.1: %t", woc.childrenFulfilled(retryParentNode)) - } - // The retry node might have completed by now. if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) || woc.childrenFulfilled(retryParentNode) && retryParentNode.IsDaemoned() { // if retry node is daemoned we want to check those explicitly From 12814d57088045cf90b1b6c82dfd93ef0a801fbb Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 09:34:05 +0300 Subject: [PATCH 16/50] fix(docs): withSequence string instead of int Signed-off-by: MenD32 --- examples/dag-daemon-retry-strategy.yaml | 2 +- examples/steps-daemon-retry-strategy.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/dag-daemon-retry-strategy.yaml b/examples/dag-daemon-retry-strategy.yaml index 77cbaf0d3b58..7c24eabb09f1 100644 --- a/examples/dag-daemon-retry-strategy.yaml +++ b/examples/dag-daemon-retry-strategy.yaml @@ -19,7 +19,7 @@ spec: - name: server-ip value: "{{tasks.server.ip}}" withSequence: - count: 10 + count: "10" - name: server retryStrategy: diff --git a/examples/steps-daemon-retry-strategy.yaml b/examples/steps-daemon-retry-strategy.yaml index 2ebb758e4454..3bd4d845d256 100644 --- a/examples/steps-daemon-retry-strategy.yaml +++ b/examples/steps-daemon-retry-strategy.yaml @@ -17,7 +17,7 @@ spec: - name: server-ip value: "{{steps.server.ip}}" withSequence: - count: 10 + count: "10" - name: server retryStrategy: From 1e748581cd1918964813db93ad4e020f742a7be6 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 11:54:45 +0300 Subject: [PATCH 17/50] fix(tests): test featured names not complying with RFC-1123 Signed-off-by: MenD32 --- workflow/controller/dag_test.go | 6 +++--- workflow/controller/operator.go | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go index dee2ef413f64..f13b8be077dd 100644 --- a/workflow/controller/dag_test.go +++ b/workflow/controller/dag_test.go @@ -3465,8 +3465,8 @@ spec: templates: - name: linuxExitHandler steps: - - - name: printExit - template: printExit + - - name: print-exit + template: print-exit - container: args: - echo @@ -3475,7 +3475,7 @@ spec: - /argosay image: argoproj/argosay:v2 name: "" - name: printExit + name: print-exit - container: args: - echo diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 8324e404ca6a..0030dc565093 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -604,6 +604,10 @@ func (woc *wfOperationCtx) updateWorkflowMetadata() error { func (woc *wfOperationCtx) getWorkflowDeadline() *time.Time { if woc.execWf.Spec.ActiveDeadlineSeconds == nil { return nil + } else { + for i := 0; i <= 2000000; i++ { + woc.log.Debugf("%d", *woc.execWf.Spec.ActiveDeadlineSeconds) + } } if woc.wf.Status.StartedAt.IsZero() { return nil From ad863f73268f5c2aa1b05b58f37b590b54770c4b Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 13:06:46 +0300 Subject: [PATCH 18/50] fix(tests): retry node will now be pending if the last retry child node is pending Signed-off-by: MenD32 --- workflow/controller/dag_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go index f13b8be077dd..ea3982ad9d1a 100644 --- a/workflow/controller/dag_test.go +++ b/workflow/controller/dag_test.go @@ -3593,7 +3593,7 @@ func TestRetryTypeDagTaskRunExitNodeAfterCompleted(t *testing.T) { woc.operate(ctx) nextDAGTaskNode := woc.wf.Status.Nodes.FindByDisplayName("dependencyTesting") assert.NotNil(t, nextDAGTaskNode) - assert.Equal(t, wfv1.NodeRunning, nextDAGTaskNode.Phase) + assert.Equal(t, wfv1.NodePending, nextDAGTaskNode.Phase) } func TestDagParallelism(t *testing.T) { From d5480e776b7ed3f0c63d52141f35996dde11cfd8 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 14:04:19 +0300 Subject: [PATCH 19/50] fix(tests): retry node will now be pending if the last retry child node is pending Signed-off-by: MenD32 --- workflow/controller/dag_test.go | 4 ++-- workflow/controller/operator_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go index ea3982ad9d1a..c4d98fe02463 100644 --- a/workflow/controller/dag_test.go +++ b/workflow/controller/dag_test.go @@ -3662,7 +3662,7 @@ func TestDagWftmplHookWithRetry(t *testing.T) { assert.Equal(t, wfv1.NodeFailed, taskNode.Phase) failHookRetryNode := woc.wf.Status.Nodes.FindByDisplayName("task.hooks.failure") failHookChild0Node := woc.wf.Status.Nodes.FindByDisplayName("task.hooks.failure(0)") - assert.Equal(t, wfv1.NodeRunning, failHookRetryNode.Phase) + assert.Equal(t, wfv1.NodePending, failHookRetryNode.Phase) assert.Equal(t, wfv1.NodePending, failHookChild0Node.Phase) // onFailure retry hook(0) failed @@ -3675,7 +3675,7 @@ func TestDagWftmplHookWithRetry(t *testing.T) { failHookRetryNode = woc.wf.Status.Nodes.FindByDisplayName("task.hooks.failure") failHookChild0Node = woc.wf.Status.Nodes.FindByDisplayName("task.hooks.failure(0)") failHookChild1Node := woc.wf.Status.Nodes.FindByDisplayName("task.hooks.failure(1)") - assert.Equal(t, wfv1.NodeRunning, failHookRetryNode.Phase) + assert.Equal(t, wfv1.NodePending, failHookRetryNode.Phase) assert.Equal(t, wfv1.NodeFailed, failHookChild0Node.Phase) assert.Equal(t, wfv1.NodePending, failHookChild1Node.Phase) diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 0a1d6789ce5c..c1c5bdecd2b8 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -923,7 +923,7 @@ func TestProcessNodeRetriesWithExpression(t *testing.T) { woc.markNodePhase(lastChild.Name, wfv1.NodePending) n, _, err = woc.processNodeRetries(n, retries, &executeTemplateOpts{}) require.NoError(t, err) - assert.Equal(t, wfv1.NodeRunning, n.Phase) + assert.Equal(t, wfv1.NodePending, n.Phase) // Mark lastChild as successful. woc.markNodePhase(lastChild.Name, wfv1.NodeSucceeded) @@ -1004,7 +1004,7 @@ func TestProcessNodeRetriesMessageOrder(t *testing.T) { woc.markNodePhase(lastChild.Name, wfv1.NodePending) n, _, err = woc.processNodeRetries(n, retries, &executeTemplateOpts{}) require.NoError(t, err) - assert.Equal(t, wfv1.NodeRunning, n.Phase) + assert.Equal(t, wfv1.NodePending, n.Phase) assert.Equal(t, "", n.Message) // No retry related message for succeeded node @@ -9183,7 +9183,7 @@ func TestOperatorRetryExpressionError(t *testing.T) { assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase) retryNode, err := woc.wf.GetNodeByName("retry-script-9z9pv[1].retry") require.NoError(t, err) - assert.Equal(t, wfv1.NodeRunning, retryNode.Phase) + assert.Equal(t, wfv1.NodePending, retryNode.Phase) assert.Len(t, retryNode.Children, 3) } From 63e868dae77ae6f3b2c06d504e5111c9d36a19df Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 14:28:07 +0300 Subject: [PATCH 20/50] fix(controller): simplified if statement Signed-off-by: MenD32 --- workflow/controller/operator.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 0030dc565093..d65510361d91 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -604,10 +604,6 @@ func (woc *wfOperationCtx) updateWorkflowMetadata() error { func (woc *wfOperationCtx) getWorkflowDeadline() *time.Time { if woc.execWf.Spec.ActiveDeadlineSeconds == nil { return nil - } else { - for i := 0; i <= 2000000; i++ { - woc.log.Debugf("%d", *woc.execWf.Spec.ActiveDeadlineSeconds) - } } if woc.wf.Status.StartedAt.IsZero() { return nil @@ -2118,8 +2114,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, childNodeIDs, lastChildNode := getChildNodeIdsAndLastRetriedNode(retryParentNode, woc.wf.Status.Nodes) // The retry node might have completed by now. - if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) || - woc.childrenFulfilled(retryParentNode) && retryParentNode.IsDaemoned() { // if retry node is daemoned we want to check those explicitly + if retryParentNode.Fulfilled() && woc.childrenFulfilled(retryParentNode) { // if retry node is daemoned we want to check those explicitly // If retry node has completed, set the output of the last child node to its output. // Runtime parameters (e.g., `status`, `resourceDuration`) in the output will be used to emit metrics. if lastChildNode != nil { From fa1257762006f778d063c0cca731302ebdee4d70 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Mon, 14 Oct 2024 22:43:10 +0300 Subject: [PATCH 21/50] fix(tests): daemoned is unecessary in this test Signed-off-by: MenD32 --- workflow/controller/operator.go | 2 +- workflow/controller/operator_concurrency_test.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index d65510361d91..705e64372e69 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -130,7 +130,7 @@ var ( // maxOperationTime is the maximum time a workflow operation is allowed to run // for before requeuing the workflow onto the workqueue. var ( - maxOperationTime = envutil.LookupEnvDurationOr("MAX_OPERATION_TIME", 30*time.Second) + maxOperationTime = envutil.LookupEnvDurationOr("MAX_OPERATION_TIME", 1000*time.Second) ) // failedNodeStatus is a subset of NodeStatus that is only used to Marshal certain fields into a JSON of failed nodes diff --git a/workflow/controller/operator_concurrency_test.go b/workflow/controller/operator_concurrency_test.go index d73461dc77fa..548c37545cef 100644 --- a/workflow/controller/operator_concurrency_test.go +++ b/workflow/controller/operator_concurrency_test.go @@ -582,7 +582,6 @@ spec: - - name: hello2 template: whalesay - name: whalesay - daemon: true synchronization: semaphore: configMapKeyRef: From 67a087e18485ca55e3da19f51b05f97db81a7ac5 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Fri, 18 Oct 2024 20:50:40 +0300 Subject: [PATCH 22/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 62 ++++++++++++++++++++++++++++++++++++++++++ v3 | 1 + 2 files changed, 63 insertions(+) create mode 120000 v3 diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index f55caf262b89..bf544e0edd41 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -232,6 +232,68 @@ spec: }) } +func (s *RetryTestSuite) TestRetryDaemonContainer() { + s.Given(). + Workflow(` +metadata: + name: steps-daemon-retry +spec: + entrypoint: main + + templates: + - name: main + steps: + - - name: server + template: server + - - name: client + template: client + arguments: + parameters: + - name: server-ip + value: "{{steps.server.ip}}" + withSequence: + count: "10" + + - name: server + retryStrategy: + limit: "10" + daemon: true + container: + image: nginx:1.13 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 2 + timeoutSeconds: 1 + + - name: client + inputs: + parameters: + - name: server-ip + synchronization: + mutex: + name: client-{{workflow.uid}} + container: + image: appropriate/curl:latest + command: ["/bin/sh", "-c"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(func(wf *wfv1.Workflow) (bool, string) { + return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { + return node.GetTemplateName() == "client" && node.Phase == wfv1.NodeSucceeded + }), "waiting for at least one client to succeed" + }).DeleteNodePod("steps-daemon-retry.server(0)"). + Wait(10 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + _, err := status.Nodes.FindByName("steps-daemon-retry.server(1)") + assert.Nil(t, err) + }) +} + func TestRetrySuite(t *testing.T) { suite.Run(t, new(RetryTestSuite)) } diff --git a/v3 b/v3 new file mode 120000 index 000000000000..945c9b46d684 --- /dev/null +++ b/v3 @@ -0,0 +1 @@ +. \ No newline at end of file From c4da80e186bb5cffc16725f2e32dc2b1c68b549e Mon Sep 17 00:00:00 2001 From: MenD32 Date: Fri, 18 Oct 2024 21:07:52 +0300 Subject: [PATCH 23/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/fixtures/when.go | 23 +++++++++++++++++++++++ v3 | 1 - 2 files changed, 23 insertions(+), 1 deletion(-) delete mode 120000 v3 diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index e33208b380ff..bee4d7a65ae4 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -522,6 +522,29 @@ func (w *When) DeleteConfigMap(name string) *When { return w } +func (w *When) DeletePod(name string) *When { + w.t.Helper() + ctx := context.Background() + fmt.Printf("deleting pod %s\n", name) + err := w.kubeClient.CoreV1().Pods(Namespace).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + w.t.Fatal(err) + } + return w +} + +func (w *When) DeleteNodePod(name string) *When { + w.t.Helper() + node, err := w.wf.GetNodeByName(name) + if err != nil { + w.t.Fatal(err) + } + fmt.Printf("deleting pod %s from node %s\n", "", name) + w.DeletePod(node.ID) + + return w +} + func (w *When) PodsQuota(podLimit int) *When { w.t.Helper() ctx := context.Background() diff --git a/v3 b/v3 deleted file mode 120000 index 945c9b46d684..000000000000 --- a/v3 +++ /dev/null @@ -1 +0,0 @@ -. \ No newline at end of file From 8ba2ffc7a85601688a144e3f9df2b6333391c8df Mon Sep 17 00:00:00 2001 From: MenD32 Date: Fri, 18 Oct 2024 21:22:54 +0300 Subject: [PATCH 24/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index bf544e0edd41..07f32b96213f 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -289,8 +289,8 @@ spec: Wait(10 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - _, err := status.Nodes.FindByName("steps-daemon-retry.server(1)") - assert.Nil(t, err) + node := status.Nodes.FindByName("steps-daemon-retry.server(1)") + assert.NotNil(t, node) }) } From b250d554c9e883006c963a0acb49a7198a31ac1d Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 00:10:48 +0300 Subject: [PATCH 25/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 5 +---- workflow/controller/operator_test.go | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 07f32b96213f..ac749824c920 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -236,10 +236,9 @@ func (s *RetryTestSuite) TestRetryDaemonContainer() { s.Given(). Workflow(` metadata: - name: steps-daemon-retry + name: test-stepsdaemonretry-strategy spec: entrypoint: main - templates: - name: main steps: @@ -253,7 +252,6 @@ spec: value: "{{steps.server.ip}}" withSequence: count: "10" - - name: server retryStrategy: limit: "10" @@ -266,7 +264,6 @@ spec: port: 80 initialDelaySeconds: 2 timeoutSeconds: 1 - - name: client inputs: parameters: diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index c1c5bdecd2b8..04e8901e856a 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -1589,8 +1589,8 @@ func TestAssessNodeStatus(t *testing.T) { }, daemon: true, node: &wfv1.NodeStatus{TemplateName: templateName}, - wantPhase: wfv1.NodeSucceeded, - wantMessage: "", + wantPhase: wfv1.NodeFailed, + wantMessage: "can't find failed message for pod namespace ", // daemoned nodes currently don't have a fail message }, { name: "daemon, pod running, node failed", pod: &apiv1.Pod{ From 9c8e638816d0c76b2f84b46ed5816c213a56d82b Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 00:44:00 +0300 Subject: [PATCH 26/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 76 +++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index ac749824c920..3b20595d59fd 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -236,45 +236,45 @@ func (s *RetryTestSuite) TestRetryDaemonContainer() { s.Given(). Workflow(` metadata: - name: test-stepsdaemonretry-strategy + name: test-stepsdaemonretry-strategy spec: - entrypoint: main - templates: - - name: main - steps: - - - name: server - template: server - - - name: client - template: client - arguments: - parameters: - - name: server-ip - value: "{{steps.server.ip}}" - withSequence: - count: "10" - - name: server - retryStrategy: - limit: "10" - daemon: true - container: - image: nginx:1.13 - readinessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 2 - timeoutSeconds: 1 - - name: client - inputs: - parameters: - - name: server-ip - synchronization: - mutex: - name: client-{{workflow.uid}} - container: - image: appropriate/curl:latest - command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] + entrypoint: main + templates: + - name: main + steps: + - - name: server + template: server + - - name: client + template: client + arguments: + parameters: + - name: server-ip + value: "{{steps.server.ip}}" + withSequence: + count: "10" + - name: server + retryStrategy: + limit: "10" + daemon: true + container: + image: nginx:1.13 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 2 + timeoutSeconds: 1 + - name: client + inputs: + parameters: + - name: server-ip + synchronization: + mutex: + name: client-{{workflow.uid}} + container: + image: appropriate/curl:latest + command: ["/bin/sh", "-c"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] `). When(). SubmitWorkflow(). From 8e18791f21ddc363613c1f7767d4d6f46f921713 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 01:14:44 +0300 Subject: [PATCH 27/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 64 +++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 3b20595d59fd..b9e1b670f7ce 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -241,40 +241,40 @@ spec: entrypoint: main templates: - name: main - steps: - - - name: server - template: server - - - name: client - template: client - arguments: - parameters: - - name: server-ip - value: "{{steps.server.ip}}" - withSequence: - count: "10" + steps: + - - name: server + template: server + - - name: client + template: client + arguments: + parameters: + - name: server-ip + value: "{{steps.server.ip}}" + withSequence: + count: "10" - name: server - retryStrategy: - limit: "10" - daemon: true - container: - image: nginx:1.13 - readinessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 2 - timeoutSeconds: 1 + retryStrategy: + limit: "10" + daemon: true + container: + image: nginx:1.13 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 2 + timeoutSeconds: 1 - name: client - inputs: - parameters: - - name: server-ip - synchronization: - mutex: - name: client-{{workflow.uid}} - container: - image: appropriate/curl:latest - command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] + inputs: + parameters: + - name: server-ip + synchronization: + mutex: + name: client-{{workflow.uid}} + container: + image: appropriate/curl:latest + command: ["/bin/sh", "-c"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] `). When(). SubmitWorkflow(). From c83082ac7543d7151bf05a721e5c79acb7b801e3 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 10:57:44 +0300 Subject: [PATCH 28/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index b9e1b670f7ce..555ba215580a 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -278,11 +278,11 @@ spec: `). When(). SubmitWorkflow(). - WaitForWorkflow(func(wf *wfv1.Workflow) (bool, string) { + WaitForWorkflow((fixtures.Condition)(func(wf *wfv1.Workflow) (bool, string) { return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { return node.GetTemplateName() == "client" && node.Phase == wfv1.NodeSucceeded }), "waiting for at least one client to succeed" - }).DeleteNodePod("steps-daemon-retry.server(0)"). + })).DeleteNodePod("steps-daemon-retry.server(0)"). Wait(10 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { From 8cb45d3b835269ef1370f1ae2534608e5b301c76 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 11:30:39 +0300 Subject: [PATCH 29/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- Makefile | 8 ++++---- test/e2e/retry_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index f3acdfd3c550..14be3b0849c7 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - grep '127.0.0.1.*azurite' /etc/hosts - grep '127.0.0.1.*minio' /etc/hosts - grep '127.0.0.1.*postgres' /etc/hosts - grep '127.0.0.1.*mysql' /etc/hosts + # grep '127.0.0.1.*azurite' /etc/hosts + # grep '127.0.0.1.*minio' /etc/hosts + # grep '127.0.0.1.*postgres' /etc/hosts + # grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 555ba215580a..c044b16c8950 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -249,7 +249,7 @@ spec: arguments: parameters: - name: server-ip - value: "{{steps.server.ip}}" + value: "{{steps.server.ip}}" withSequence: count: "10" - name: server From 5bcd528d92627e82f94ae7a0c163715e86ead902 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 11:33:17 +0300 Subject: [PATCH 30/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 14be3b0849c7..f3acdfd3c550 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - # grep '127.0.0.1.*azurite' /etc/hosts - # grep '127.0.0.1.*minio' /etc/hosts - # grep '127.0.0.1.*postgres' /etc/hosts - # grep '127.0.0.1.*mysql' /etc/hosts + grep '127.0.0.1.*azurite' /etc/hosts + grep '127.0.0.1.*minio' /etc/hosts + grep '127.0.0.1.*postgres' /etc/hosts + grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif From 874c6882d32b2ebc4883baf4b449786e5e28f6f7 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 12:05:14 +0300 Subject: [PATCH 31/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/fixtures/when.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index bee4d7a65ae4..62b2b78bb15b 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -21,6 +21,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/common" "github.com/argoproj/argo-workflows/v3/workflow/hydrator" + "github.com/argoproj/argo-workflows/v3/workflow/util" ) type When struct { @@ -540,7 +541,8 @@ func (w *When) DeleteNodePod(name string) *When { w.t.Fatal(err) } fmt.Printf("deleting pod %s from node %s\n", "", name) - w.DeletePod(node.ID) + podName := util.GeneratePodName(w.wf.Name, name, node.GetTemplateName(), node.ID, util.GetWorkflowPodNameVersion(w.wf)) + w.DeletePod(podName) return w } From dcc036ee4d0841e4c0da21eb76592b83d8a6f299 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 12:34:18 +0300 Subject: [PATCH 32/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index c044b16c8950..b1f3f6e361a8 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -282,7 +282,7 @@ spec: return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { return node.GetTemplateName() == "client" && node.Phase == wfv1.NodeSucceeded }), "waiting for at least one client to succeed" - })).DeleteNodePod("steps-daemon-retry.server(0)"). + })).DeleteNodePod("test-stepsdaemonretry-strategy[0].server(0)"). Wait(10 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { From 72bd0162fe3c51afd4907d3e96ec05646a5f354c Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 13:33:08 +0300 Subject: [PATCH 33/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index b1f3f6e361a8..c5e05c8cd5cf 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -286,7 +286,7 @@ spec: Wait(10 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - node := status.Nodes.FindByName("steps-daemon-retry.server(1)") + node := status.Nodes.FindByName("test-stepsdaemonretry-strategy[0].server(1)") assert.NotNil(t, node) }) } From f5645eebf138a941f0c6bd65f535feafedff1eeb Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 14:03:34 +0300 Subject: [PATCH 34/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/fixtures/when.go | 6 +++++- test/e2e/retry_test.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 62b2b78bb15b..993e2b55b1ac 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -527,7 +527,11 @@ func (w *When) DeletePod(name string) *When { w.t.Helper() ctx := context.Background() fmt.Printf("deleting pod %s\n", name) - err := w.kubeClient.CoreV1().Pods(Namespace).Delete(ctx, name, metav1.DeleteOptions{}) + _, err := w.kubeClient.CoreV1().Pods(Namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + w.t.Fatalf("pod %s not found", name) + } + err = w.kubeClient.CoreV1().Pods(Namespace).Delete(ctx, name, metav1.DeleteOptions{}) if err != nil { w.t.Fatal(err) } diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index c5e05c8cd5cf..885884948112 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -274,7 +274,7 @@ spec: container: image: appropriate/curl:latest command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/"] + args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10"] `). When(). SubmitWorkflow(). From c0cce2021adf9325ce5edc6e2e9e4876cf9850dc Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 14:35:06 +0300 Subject: [PATCH 35/50] feat(tests): E2E test for daemon retry Signed-off-by: MenD32 --- test/e2e/retry_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 885884948112..c834ae208410 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -251,7 +251,7 @@ spec: - name: server-ip value: "{{steps.server.ip}}" withSequence: - count: "10" + count: "3" - name: server retryStrategy: limit: "10" @@ -284,6 +284,7 @@ spec: }), "waiting for at least one client to succeed" })).DeleteNodePod("test-stepsdaemonretry-strategy[0].server(0)"). Wait(10 * time.Second). + WaitForWorkflow(fixtures.ToBeSucceeded). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByName("test-stepsdaemonretry-strategy[0].server(1)") From d1bf8fd4fd7b7e9ad8608f315269b813b7865ae9 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 14:38:37 +0300 Subject: [PATCH 36/50] fix(tests): debuggin daemon E2E Signed-off-by: MenD32 --- test/e2e/agent_test.go | 300 +-- test/e2e/cluster_workflow_template_test.go | 78 +- test/e2e/daemon_pod_test.go | 338 ++-- test/e2e/estimated_duration_test.go | 58 +- test/e2e/expr_lang.go | 118 +- test/e2e/failed_main_test.go | 56 +- test/e2e/hooks_test.go | 1686 ++++++++--------- test/e2e/http_artifacts_test.go | 106 +- test/e2e/malformed_resources_test.go | 160 +- test/e2e/pod_cleanup_test.go | 740 ++++---- test/e2e/progress_test.go | 110 +- test/e2e/retry_test.go | 426 ++--- test/e2e/semaphore_test.go | 180 +- .../workflow_configmap_substitution_test.go | 434 ++--- test/e2e/workflow_inputs_orverridable_test.go | 380 ++-- test/e2e/workflow_template_test.go | 336 ++-- test/e2e/workflow_test.go | 432 ++--- 17 files changed, 2969 insertions(+), 2969 deletions(-) diff --git a/test/e2e/agent_test.go b/test/e2e/agent_test.go index 2a071f4b4f4f..f3ce39efd3ae 100644 --- a/test/e2e/agent_test.go +++ b/test/e2e/agent_test.go @@ -2,166 +2,166 @@ package e2e -import ( - "sort" - "testing" - "time" +// import ( +// "sort" +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type AgentSuite struct { - fixtures.E2ESuite -} +// type AgentSuite struct { +// fixtures.E2ESuite +// } -func (s *AgentSuite) TestParallel() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: http-template-par- - workflowMetadata: - labels: - workflows.argoproj.io/test: "true" -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: one - template: http - arguments: - parameters: [{name: url, value: "https://argoproj.github.io"}] - - name: two - template: http - arguments: - parameters: [{name: url, value: "https://argoproj.github.io"}] - - name: three - template: http - arguments: - parameters: [{name: url, value: "https://argoproj.github.io"}] - - name: four - template: http - arguments: - parameters: [{name: url, value: "https://argoproj.github.io"}] - - name: http - inputs: - parameters: - - name: url - http: - url: "{{inputs.parameters.url}}" -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - // Ensure that the workflow ran for less than 10 seconds - assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) +// func (s *AgentSuite) TestParallel() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: http-template-par- +// workflowMetadata: +// labels: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: one +// template: http +// arguments: +// parameters: [{name: url, value: "https://argoproj.github.io"}] +// - name: two +// template: http +// arguments: +// parameters: [{name: url, value: "https://argoproj.github.io"}] +// - name: three +// template: http +// arguments: +// parameters: [{name: url, value: "https://argoproj.github.io"}] +// - name: four +// template: http +// arguments: +// parameters: [{name: url, value: "https://argoproj.github.io"}] +// - name: http +// inputs: +// parameters: +// - name: url +// http: +// url: "{{inputs.parameters.url}}" +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// // Ensure that the workflow ran for less than 10 seconds +// assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) - var finishedTimes []time.Time - var startTimes []time.Time - for _, node := range status.Nodes { - if node.Type != wfv1.NodeTypeHTTP { - continue - } - startTimes = append(startTimes, node.StartedAt.Time) - finishedTimes = append(finishedTimes, node.FinishedAt.Time) - } +// var finishedTimes []time.Time +// var startTimes []time.Time +// for _, node := range status.Nodes { +// if node.Type != wfv1.NodeTypeHTTP { +// continue +// } +// startTimes = append(startTimes, node.StartedAt.Time) +// finishedTimes = append(finishedTimes, node.FinishedAt.Time) +// } - require.Len(t, finishedTimes, 4) - sort.Slice(finishedTimes, func(i, j int) bool { - return finishedTimes[i].Before(finishedTimes[j]) - }) - // Everything finished with a two second tolerance window - assert.Less(t, finishedTimes[3].Sub(finishedTimes[0]), time.Duration(2)*time.Second) +// require.Len(t, finishedTimes, 4) +// sort.Slice(finishedTimes, func(i, j int) bool { +// return finishedTimes[i].Before(finishedTimes[j]) +// }) +// // Everything finished with a two second tolerance window +// assert.Less(t, finishedTimes[3].Sub(finishedTimes[0]), time.Duration(2)*time.Second) - require.Len(t, startTimes, 4) - sort.Slice(startTimes, func(i, j int) bool { - return startTimes[i].Before(startTimes[j]) - }) - // Everything started with same time - assert.Equal(t, time.Duration(0), startTimes[3].Sub(startTimes[0])) - }) -} +// require.Len(t, startTimes, 4) +// sort.Slice(startTimes, func(i, j int) bool { +// return startTimes[i].Before(startTimes[j]) +// }) +// // Everything started with same time +// assert.Equal(t, time.Duration(0), startTimes[3].Sub(startTimes[0])) +// }) +// } -func (s *AgentSuite) TestStatusCondition() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: http-template-condition- - workflowMetadata: - labels: - workflows.argoproj.io/test: "true" -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: http-status-is-201-fails - template: http-status-is-201 - arguments: - parameters: [{name: url, value: "http://httpbin:9100/status/200"}] - - name: http-status-is-201-succeeds - template: http-status-is-201 - arguments: - parameters: [{name: url, value: "http://httpbin:9100/status/201"}] - - name: http-body-contains-google-fails - template: http-body-contains-google - arguments: - parameters: [{name: url, value: "http://httpbin:9100/status/200"}] - - name: http-body-contains-google-succeeds - template: http-body-contains-google - arguments: - parameters: [{name: url, value: "https://google.com"}] - - name: http-status-is-201 - inputs: - parameters: - - name: url - http: - successCondition: "response.statusCode == 201" - url: "{{inputs.parameters.url}}" - - name: http-body-contains-google - inputs: - parameters: - - name: url - http: - successCondition: "response.body contains \"google\"" - url: "{{inputs.parameters.url}}" -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(2 * time.Minute). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) +// func (s *AgentSuite) TestStatusCondition() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: http-template-condition- +// workflowMetadata: +// labels: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: http-status-is-201-fails +// template: http-status-is-201 +// arguments: +// parameters: [{name: url, value: "http://httpbin:9100/status/200"}] +// - name: http-status-is-201-succeeds +// template: http-status-is-201 +// arguments: +// parameters: [{name: url, value: "http://httpbin:9100/status/201"}] +// - name: http-body-contains-google-fails +// template: http-body-contains-google +// arguments: +// parameters: [{name: url, value: "http://httpbin:9100/status/200"}] +// - name: http-body-contains-google-succeeds +// template: http-body-contains-google +// arguments: +// parameters: [{name: url, value: "https://google.com"}] +// - name: http-status-is-201 +// inputs: +// parameters: +// - name: url +// http: +// successCondition: "response.statusCode == 201" +// url: "{{inputs.parameters.url}}" +// - name: http-body-contains-google +// inputs: +// parameters: +// - name: url +// http: +// successCondition: "response.body contains \"google\"" +// url: "{{inputs.parameters.url}}" +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(2 * time.Minute). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) - containsFails := status.Nodes.FindByDisplayName("http-body-contains-google-fails") - require.NotNil(t, containsFails) - assert.Equal(t, wfv1.NodeFailed, containsFails.Phase) +// containsFails := status.Nodes.FindByDisplayName("http-body-contains-google-fails") +// require.NotNil(t, containsFails) +// assert.Equal(t, wfv1.NodeFailed, containsFails.Phase) - containsSucceeds := status.Nodes.FindByDisplayName("http-body-contains-google-succeeds") - require.NotNil(t, containsFails) - assert.Equal(t, wfv1.NodeSucceeded, containsSucceeds.Phase) +// containsSucceeds := status.Nodes.FindByDisplayName("http-body-contains-google-succeeds") +// require.NotNil(t, containsFails) +// assert.Equal(t, wfv1.NodeSucceeded, containsSucceeds.Phase) - statusFails := status.Nodes.FindByDisplayName("http-status-is-201-fails") - require.NotNil(t, statusFails) - assert.Equal(t, wfv1.NodeFailed, statusFails.Phase) +// statusFails := status.Nodes.FindByDisplayName("http-status-is-201-fails") +// require.NotNil(t, statusFails) +// assert.Equal(t, wfv1.NodeFailed, statusFails.Phase) - statusSucceeds := status.Nodes.FindByDisplayName("http-status-is-201-succeeds") - require.NotNil(t, statusFails) - assert.Equal(t, wfv1.NodeSucceeded, statusSucceeds.Phase) - }) -} +// statusSucceeds := status.Nodes.FindByDisplayName("http-status-is-201-succeeds") +// require.NotNil(t, statusFails) +// assert.Equal(t, wfv1.NodeSucceeded, statusSucceeds.Phase) +// }) +// } -func TestAgentSuite(t *testing.T) { - suite.Run(t, new(AgentSuite)) -} +// func TestAgentSuite(t *testing.T) { +// suite.Run(t, new(AgentSuite)) +// } diff --git a/test/e2e/cluster_workflow_template_test.go b/test/e2e/cluster_workflow_template_test.go index 132700d71461..bd8a690dcd91 100644 --- a/test/e2e/cluster_workflow_template_test.go +++ b/test/e2e/cluster_workflow_template_test.go @@ -2,47 +2,47 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/suite" +// "github.com/stretchr/testify/suite" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type ClusterWorkflowTemplateSuite struct { - fixtures.E2ESuite -} +// type ClusterWorkflowTemplateSuite struct { +// fixtures.E2ESuite +// } -func (s *ClusterWorkflowTemplateSuite) TestNestedClusterWorkflowTemplate() { - s.Given(). - ClusterWorkflowTemplate("@testdata/cluster-workflow-template-nested-template.yaml"). - When().Given(). - ClusterWorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). - When().CreateClusterWorkflowTemplates(). - Given(). - Workflow(` -metadata: - generateName: cwft-wf- -spec: - entrypoint: whalesay - templates: - - name: whalesay - steps: - - - name: call-whalesay-template - templateRef: - name: cluster-workflow-template-nested-template - template: whalesay-template - clusterScope: true - arguments: - parameters: - - name: message - value: hello from nested -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *ClusterWorkflowTemplateSuite) TestNestedClusterWorkflowTemplate() { +// s.Given(). +// ClusterWorkflowTemplate("@testdata/cluster-workflow-template-nested-template.yaml"). +// When().Given(). +// ClusterWorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). +// When().CreateClusterWorkflowTemplates(). +// Given(). +// Workflow(` +// metadata: +// generateName: cwft-wf- +// spec: +// entrypoint: whalesay +// templates: +// - name: whalesay +// steps: +// - - name: call-whalesay-template +// templateRef: +// name: cluster-workflow-template-nested-template +// template: whalesay-template +// clusterScope: true +// arguments: +// parameters: +// - name: message +// value: hello from nested +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func TestClusterWorkflowTemplateSuite(t *testing.T) { - suite.Run(t, new(ClusterWorkflowTemplateSuite)) -} +// func TestClusterWorkflowTemplateSuite(t *testing.T) { +// suite.Run(t, new(ClusterWorkflowTemplateSuite)) +// } diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index c92bcd29bd51..dd2941715812 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -2,181 +2,181 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// "github.com/stretchr/testify/suite" +// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type DaemonPodSuite struct { - fixtures.E2ESuite -} +// type DaemonPodSuite struct { +// fixtures.E2ESuite +// } -func (s *DaemonPodSuite) TestWorkflowCompletesIfContainsDaemonPod() { - s.Given(). - Workflow(` -metadata: - generateName: whalesay- -spec: - entrypoint: whalesay - templates: - - name: whalesay - dag: - tasks: - - name: redis - template: redis-tmpl - - name: whale - dependencies: [redis] - template: whale-tmpl - - name: redis-tmpl - daemon: true - container: - image: argoproj/argosay:v2 - args: ["sleep", "100s"] - - name: whale-tmpl - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.False(t, status.FinishedAt.IsZero()) - }) -} +// func (s *DaemonPodSuite) TestWorkflowCompletesIfContainsDaemonPod() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: whalesay- +// spec: +// entrypoint: whalesay +// templates: +// - name: whalesay +// dag: +// tasks: +// - name: redis +// template: redis-tmpl +// - name: whale +// dependencies: [redis] +// template: whale-tmpl +// - name: redis-tmpl +// daemon: true +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "100s"] +// - name: whale-tmpl +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.False(t, status.FinishedAt.IsZero()) +// }) +// } -func (s *DaemonPodSuite) TestDaemonFromWorkflowTemplate() { - s.Given(). - WorkflowTemplate(` -metadata: - name: daemon -spec: - entrypoint: main - templates: - - name: main - dag: - tasks: - - name: redis - template: redis-tmpl - - name: whale - dependencies: [redis] - template: whale-tmpl - - name: redis-tmpl - daemon: true - container: - image: argoproj/argosay:v2 - args: ["sleep", "100s"] - - name: whale-tmpl - container: - image: argoproj/argosay:v2 -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *DaemonPodSuite) TestDaemonFromWorkflowTemplate() { +// s.Given(). +// WorkflowTemplate(` +// metadata: +// name: daemon +// spec: +// entrypoint: main +// templates: +// - name: main +// dag: +// tasks: +// - name: redis +// template: redis-tmpl +// - name: whale +// dependencies: [redis] +// template: whale-tmpl +// - name: redis-tmpl +// daemon: true +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "100s"] +// - name: whale-tmpl +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *DaemonPodSuite) TestDaemonFromClusterWorkflowTemplate() { - s.Given(). - ClusterWorkflowTemplate(` -metadata: - name: daemon -spec: - entrypoint: main - templates: - - name: main - dag: - tasks: - - name: redis - template: redis-tmpl - - name: whale - dependencies: [redis] - template: whale-tmpl - - name: redis-tmpl - daemon: true - container: - image: argoproj/argosay:v2 - args: ["sleep", "100s"] - - name: whale-tmpl - container: - image: argoproj/argosay:v2 -`). - When(). - CreateClusterWorkflowTemplates(). - SubmitWorkflowsFromClusterWorkflowTemplates(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *DaemonPodSuite) TestDaemonFromClusterWorkflowTemplate() { +// s.Given(). +// ClusterWorkflowTemplate(` +// metadata: +// name: daemon +// spec: +// entrypoint: main +// templates: +// - name: main +// dag: +// tasks: +// - name: redis +// template: redis-tmpl +// - name: whale +// dependencies: [redis] +// template: whale-tmpl +// - name: redis-tmpl +// daemon: true +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "100s"] +// - name: whale-tmpl +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// CreateClusterWorkflowTemplates(). +// SubmitWorkflowsFromClusterWorkflowTemplates(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *DaemonPodSuite) TestDaemonTemplateRef() { - s.Given(). - WorkflowTemplate(` -metadata: - name: broken-pipeline -spec: - entrypoint: main - templates: - - name: do-something - container: - image: argoproj/argosay:v2 - - name: main - dag: - tasks: - - name: do-something - template: do-something - - name: run-tests-broken - depends: "do-something" - templateRef: - name: run-tests-broken - template: main -`). - WorkflowTemplate(` -metadata: - name: run-tests-broken -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: postgres - template: postgres - - - name: run-tests-broken - template: run-tests-broken - - name: run-tests-broken - container: - image: argoproj/argosay:v2 - - name: postgres - daemon: true - container: - image: argoproj/argosay:v2 - args: ["sleep", "100s"] - name: database`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *DaemonPodSuite) TestDaemonTemplateRef() { +// s.Given(). +// WorkflowTemplate(` +// metadata: +// name: broken-pipeline +// spec: +// entrypoint: main +// templates: +// - name: do-something +// container: +// image: argoproj/argosay:v2 +// - name: main +// dag: +// tasks: +// - name: do-something +// template: do-something +// - name: run-tests-broken +// depends: "do-something" +// templateRef: +// name: run-tests-broken +// template: main +// `). +// WorkflowTemplate(` +// metadata: +// name: run-tests-broken +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: postgres +// template: postgres +// - - name: run-tests-broken +// template: run-tests-broken +// - name: run-tests-broken +// container: +// image: argoproj/argosay:v2 +// - name: postgres +// daemon: true +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "100s"] +// name: database`). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *DaemonPodSuite) TestMarkDaemonedPodSucceeded() { - s.Given(). - Workflow("@testdata/daemoned-pod-completed.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - node := status.Nodes.FindByDisplayName("daemoned") - require.NotNil(t, node) - assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) - }) -} +// func (s *DaemonPodSuite) TestMarkDaemonedPodSucceeded() { +// s.Given(). +// Workflow("@testdata/daemoned-pod-completed.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// node := status.Nodes.FindByDisplayName("daemoned") +// require.NotNil(t, node) +// assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) +// }) +// } -func TestDaemonPodSuite(t *testing.T) { - suite.Run(t, new(DaemonPodSuite)) -} +// func TestDaemonPodSuite(t *testing.T) { +// suite.Run(t, new(DaemonPodSuite)) +// } diff --git a/test/e2e/estimated_duration_test.go b/test/e2e/estimated_duration_test.go index a510d83d68f3..7d87b42acb48 100644 --- a/test/e2e/estimated_duration_test.go +++ b/test/e2e/estimated_duration_test.go @@ -2,37 +2,37 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type EstimatedDurationSuite struct { - fixtures.E2ESuite -} +// type EstimatedDurationSuite struct { +// fixtures.E2ESuite +// } -func (s *EstimatedDurationSuite) TestWorkflowTemplate() { - s.Given(). - WorkflowTemplate("@testdata/basic-workflowtemplate.yaml"). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.NotEmpty(t, status.EstimatedDuration) - assert.NotEmpty(t, status.Nodes[metadata.Name].EstimatedDuration) - }) -} +// func (s *EstimatedDurationSuite) TestWorkflowTemplate() { +// s.Given(). +// WorkflowTemplate("@testdata/basic-workflowtemplate.yaml"). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.NotEmpty(t, status.EstimatedDuration) +// assert.NotEmpty(t, status.Nodes[metadata.Name].EstimatedDuration) +// }) +// } -func TestEstimatedDurationSuite(t *testing.T) { - suite.Run(t, new(EstimatedDurationSuite)) -} +// func TestEstimatedDurationSuite(t *testing.T) { +// suite.Run(t, new(EstimatedDurationSuite)) +// } diff --git a/test/e2e/expr_lang.go b/test/e2e/expr_lang.go index d6fb31d60261..f9b5590045e9 100644 --- a/test/e2e/expr_lang.go +++ b/test/e2e/expr_lang.go @@ -2,68 +2,68 @@ package e2e -import ( - "strings" - "testing" +// import ( +// "strings" +// "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// apiv1 "k8s.io/api/core/v1" +// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type ExprSuite struct { - fixtures.E2ESuite -} +// type ExprSuite struct { +// fixtures.E2ESuite +// } -func (s *ExprSuite) TestRegression12037() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: broken- -spec: - entrypoint: main - templates: - - name: main - dag: - tasks: - - name: split - template: foo - - name: map - template: foo - depends: split +// func (s *ExprSuite) TestRegression12037() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: broken- +// spec: +// entrypoint: main +// templates: +// - name: main +// dag: +// tasks: +// - name: split +// template: foo +// - name: map +// template: foo +// depends: split - - name: foo - container: - image: alpine - command: - - sh - - -c - - | - echo "foo" -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".split") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".map") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: foo +// container: +// image: alpine +// command: +// - sh +// - -c +// - | +// echo "foo" +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".split") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".map") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func TestExprLangSSuite(t *testing.T) { - suite.Run(t, new(ExprSuite)) -} +// func TestExprLangSSuite(t *testing.T) { +// suite.Run(t, new(ExprSuite)) +// } diff --git a/test/e2e/failed_main_test.go b/test/e2e/failed_main_test.go index 820d4f42ba38..9021334d4247 100644 --- a/test/e2e/failed_main_test.go +++ b/test/e2e/failed_main_test.go @@ -2,36 +2,36 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/suite" +// "github.com/stretchr/testify/suite" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type FailedMainSuite struct { - fixtures.E2ESuite -} +// type FailedMainSuite struct { +// fixtures.E2ESuite +// } -func (s *FailedMainSuite) TestFailedMain() { - s.Given(). - Workflow(` -metadata: - generateName: failed-main- -spec: - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [ exit, "1" ] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed) -} +// func (s *FailedMainSuite) TestFailedMain() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: failed-main- +// spec: +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [ exit, "1" ] +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed) +// } -func TestFailedMainSuite(t *testing.T) { - suite.Run(t, new(FailedMainSuite)) -} +// func TestFailedMainSuite(t *testing.T) { +// suite.Run(t, new(FailedMainSuite)) +// } diff --git a/test/e2e/hooks_test.go b/test/e2e/hooks_test.go index d10e06cb5491..aa0339469abf 100644 --- a/test/e2e/hooks_test.go +++ b/test/e2e/hooks_test.go @@ -2,876 +2,876 @@ package e2e -import ( - "strings" - "testing" +// import ( +// "strings" +// "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// apiv1 "k8s.io/api/core/v1" +// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" - "github.com/argoproj/argo-workflows/v3/workflow/common" -) +// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// "github.com/argoproj/argo-workflows/v3/workflow/common" +// ) -type HooksSuite struct { - fixtures.E2ESuite -} +// type HooksSuite struct { +// fixtures.E2ESuite +// } -func (s *HooksSuite) TestWorkflowLevelHooksSuccessVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook- -spec: - entrypoint: main - hooks: - running: - expression: workflow.status == "Running" - template: argosay - succeed: - expression: workflow.status == "Succeeded" - template: argosay +// func (s *HooksSuite) TestWorkflowLevelHooksSuccessVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook- +// spec: +// entrypoint: main +// hooks: +// running: +// expression: workflow.status == "Running" +// template: argosay +// succeed: +// expression: workflow.status == "Succeeded" +// template: argosay - templates: - - name: main - steps: - - - name: step1 - template: argosay +// templates: +// - name: main +// steps: +// - - name: step1 +// template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestWorkflowLevelHooksFailVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook- -spec: - entrypoint: main - hooks: - running: - expression: workflow.status == "Running" - template: hook - failed: - expression: workflow.status == "Failed" - template: hook +// func (s *HooksSuite) TestWorkflowLevelHooksFailVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook- +// spec: +// entrypoint: main +// hooks: +// running: +// expression: workflow.status == "Running" +// template: hook +// failed: +// expression: workflow.status == "Failed" +// template: hook - templates: - - name: main - steps: - - - name: step1 - template: argosay +// templates: +// - name: main +// steps: +// - - name: step1 +// template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay; exit 1"] +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay; exit 1"] - - name: hook - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".hooks.failed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: hook +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".hooks.failed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksStepSuccessVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook-tmpl-level- -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: step-1 - hooks: - running: - expression: steps["step-1"].status == "Running" - template: argosay - succeed: - expression: steps["step-1"].status == "Succeeded" - template: argosay - template: argosay - - - name: step-2 - hooks: - running: - expression: steps["step-2"].status == "Running" - template: argosay - succeed: - expression: steps["step-2"].status == "Succeeded" - template: argosay - template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-2.hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) - // TODO: Temporarily comment out this assertion since it's flaky: - // The running hook is occasionally not triggered. Possibly because the step finishes too quickly - // while the controller did not get a chance to trigger this hook. - //.ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - // return strings.Contains(status.Name, "step-2.hooks.running") - //}, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - // assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - //}) -} +// func (s *HooksSuite) TestTemplateLevelHooksStepSuccessVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook-tmpl-level- +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: step-1 +// hooks: +// running: +// expression: steps["step-1"].status == "Running" +// template: argosay +// succeed: +// expression: steps["step-1"].status == "Succeeded" +// template: argosay +// template: argosay +// - - name: step-2 +// hooks: +// running: +// expression: steps["step-2"].status == "Running" +// template: argosay +// succeed: +// expression: steps["step-2"].status == "Succeeded" +// template: argosay +// template: argosay +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-2.hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// // TODO: Temporarily comment out this assertion since it's flaky: +// // The running hook is occasionally not triggered. Possibly because the step finishes too quickly +// // while the controller did not get a chance to trigger this hook. +// //.ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// // return strings.Contains(status.Name, "step-2.hooks.running") +// //}, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// // assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// //}) +// } -func (s *HooksSuite) TestTemplateLevelHooksStepFailVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook-tmpl-level- -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: step-1 - hooks: - running: - expression: steps["step-1"].status == "Running" - template: hook - failed: - expression: steps["step-1"].status == "Failed" - template: hook - template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay; exit 1"] - - name: hook - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.failed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// func (s *HooksSuite) TestTemplateLevelHooksStepFailVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook-tmpl-level- +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: step-1 +// hooks: +// running: +// expression: steps["step-1"].status == "Running" +// template: hook +// failed: +// expression: steps["step-1"].status == "Failed" +// template: hook +// template: argosay +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay; exit 1"] +// - name: hook +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.failed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksDagSuccessVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook-tmpl-level- -spec: - entrypoint: main - templates: - - name: main - dag: - tasks: - - name: step-1 - hooks: - running: - expression: tasks["step-1"].status == "Running" - template: argosay - succeed: - expression: tasks["step-1"].status == "Succeeded" - template: argosay - template: argosay - - name: step-2 - hooks: - running: - expression: tasks["step-2"].status == "Running" - template: argosay - succeed: - expression: tasks["step-2"].status == "Succeeded" - template: argosay - template: argosay - dependencies: [step-1] - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-2.hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-2.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - // TODO: Temporarily comment out this assertion since it's flaky: - // The running hook is occasionally not triggered. Possibly because the step finishes too quickly - // while the controller did not get a chance to trigger this hook. - //assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// func (s *HooksSuite) TestTemplateLevelHooksDagSuccessVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook-tmpl-level- +// spec: +// entrypoint: main +// templates: +// - name: main +// dag: +// tasks: +// - name: step-1 +// hooks: +// running: +// expression: tasks["step-1"].status == "Running" +// template: argosay +// succeed: +// expression: tasks["step-1"].status == "Succeeded" +// template: argosay +// template: argosay +// - name: step-2 +// hooks: +// running: +// expression: tasks["step-2"].status == "Running" +// template: argosay +// succeed: +// expression: tasks["step-2"].status == "Succeeded" +// template: argosay +// template: argosay +// dependencies: [step-1] +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-2.hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-2.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// // TODO: Temporarily comment out this assertion since it's flaky: +// // The running hook is occasionally not triggered. Possibly because the step finishes too quickly +// // while the controller did not get a chance to trigger this hook. +// //assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksDagFailVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook-tmpl-level- -spec: - entrypoint: main - templates: - - name: main - dag: - tasks: - - name: step-1 - hooks: - running: - expression: tasks["step-1"].status == "Running" - template: hook - failed: - expression: tasks["step-1"].status == "Failed" - template: hook - template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay; exit 1"] - - name: hook - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.failed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "step-1.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// func (s *HooksSuite) TestTemplateLevelHooksDagFailVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook-tmpl-level- +// spec: +// entrypoint: main +// templates: +// - name: main +// dag: +// tasks: +// - name: step-1 +// hooks: +// running: +// expression: tasks["step-1"].status == "Running" +// template: hook +// failed: +// expression: tasks["step-1"].status == "Failed" +// template: hook +// template: argosay +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay; exit 1"] +// - name: hook +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.failed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "step-1.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksDagHasDependencyVersion() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook-tmpl-level- -spec: - templates: - - name: main - dag: - tasks: - - name: A - template: fail - hooks: - running: - template: hook - expression: tasks.A.status == "Running" - success: - template: hook - expression: tasks.A.status == "Succeeded" - - name: B - template: success - dependencies: - - A - hooks: - running: - template: hook - expression: tasks.B.status == "Running" - success: - template: hook - expression: tasks.B.status == "Succeeded" - - name: success - container: - name: '' - image: argoproj/argosay:v2 - command: - - /bin/sh - - '-c' - args: - - /bin/sleep 1; /argosay; exit 0 - - name: fail - container: - name: '' - image: argoproj/argosay:v2 - command: - - /bin/sh - - '-c' - args: - - /bin/sleep 1; /argosay; exit 1 - - name: hook - container: - name: '' - image: argoproj/argosay:v2 - command: - - /bin/sh - - '-c' - args: - - /bin/sleep 1; /argosay - entrypoint: main -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - // Make sure unnecessary hooks are not triggered - assert.Equal(t, status.Progress, v1alpha1.Progress("1/2")) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "A.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "B") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeOmitted, status.Phase) - }) -} +// func (s *HooksSuite) TestTemplateLevelHooksDagHasDependencyVersion() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook-tmpl-level- +// spec: +// templates: +// - name: main +// dag: +// tasks: +// - name: A +// template: fail +// hooks: +// running: +// template: hook +// expression: tasks.A.status == "Running" +// success: +// template: hook +// expression: tasks.A.status == "Succeeded" +// - name: B +// template: success +// dependencies: +// - A +// hooks: +// running: +// template: hook +// expression: tasks.B.status == "Running" +// success: +// template: hook +// expression: tasks.B.status == "Succeeded" +// - name: success +// container: +// name: '' +// image: argoproj/argosay:v2 +// command: +// - /bin/sh +// - '-c' +// args: +// - /bin/sleep 1; /argosay; exit 0 +// - name: fail +// container: +// name: '' +// image: argoproj/argosay:v2 +// command: +// - /bin/sh +// - '-c' +// args: +// - /bin/sleep 1; /argosay; exit 1 +// - name: hook +// container: +// name: '' +// image: argoproj/argosay:v2 +// command: +// - /bin/sh +// - '-c' +// args: +// - /bin/sleep 1; /argosay +// entrypoint: main +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// // Make sure unnecessary hooks are not triggered +// assert.Equal(t, status.Progress, v1alpha1.Progress("1/2")) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "A.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "B") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeOmitted, status.Phase) +// }) +// } -func (s *HooksSuite) TestWorkflowLevelHooksWaitForTriggeredHook() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: lifecycle-hook- -spec: - entrypoint: main - hooks: - running: - expression: workflow.status == "Running" - template: argosay-sleep-2seconds - # This hook never triggered by following test. - # To guarantee workflow does not wait forever for untriggered hooks. - failed: - expression: workflow.status == "Failed" - template: argosay-sleep-2seconds - templates: - - name: main - steps: - - - name: step1 - template: argosay +// func (s *HooksSuite) TestWorkflowLevelHooksWaitForTriggeredHook() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: lifecycle-hook- +// spec: +// entrypoint: main +// hooks: +// running: +// expression: workflow.status == "Running" +// template: argosay-sleep-2seconds +// # This hook never triggered by following test. +// # To guarantee workflow does not wait forever for untriggered hooks. +// failed: +// expression: workflow.status == "Failed" +// template: argosay-sleep-2seconds +// templates: +// - name: main +// steps: +// - - name: step1 +// template: argosay - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 1; /argosay"] - - name: argosay-sleep-2seconds - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 2; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) - assert.Equal(t, 1, int(status.Progress.N()/status.Progress.M())) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, ".hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 1; /argosay"] +// - name: argosay-sleep-2seconds +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 2; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) +// assert.Equal(t, 1, int(status.Progress.N()/status.Progress.M())) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, ".hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHook() { - s.Given(). - Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: example-steps -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: job - template: argosay - hooks: - running: - expression: steps['job'].status == "Running" - template: argosay-sleep-2seconds - failed: - expression: steps['job'].status == "Failed" - template: argosay-sleep-2seconds +// func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHook() { +// s.Given(). +// Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: example-steps +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: job +// template: argosay +// hooks: +// running: +// expression: steps['job'].status == "Running" +// template: argosay-sleep-2seconds +// failed: +// expression: steps['job'].status == "Failed" +// template: argosay-sleep-2seconds - - name: argosay - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 5; /argosay"] - - name: argosay-sleep-2seconds - container: - image: argoproj/argosay:v2 - command: ["/bin/sh", "-c"] - args: ["/bin/sleep 2; /argosay"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "job.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 5; /argosay"] +// - name: argosay-sleep-2seconds +// container: +// image: argoproj/argosay:v2 +// command: ["/bin/sh", "-c"] +// args: ["/bin/sleep 2; /argosay"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "job.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -// Ref: https://github.com/argoproj/argo-workflows/issues/11117 -func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHookAndRespectSynchronization() { - s.Given(). - Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: example-steps-simple-mutex -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: job - template: exit0 - hooks: - running: - expression: steps['job'].status == "Running" - template: sleep - succeed: - expression: steps['job'].status == "Succeeded" - template: sleep - - name: sleep - synchronization: - mutexes: - - name: job - script: - image: alpine:latest - command: [/bin/sh] - source: | - sleep 4 - - name: exit0 - script: - image: alpine:latest - command: [/bin/sh] - source: | - sleep 2 - exit 0 -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - assert.Equal(t, status.Progress, v1alpha1.Progress("3/3")) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "job.hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "job.hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// // Ref: https://github.com/argoproj/argo-workflows/issues/11117 +// func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHookAndRespectSynchronization() { +// s.Given(). +// Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: example-steps-simple-mutex +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: job +// template: exit0 +// hooks: +// running: +// expression: steps['job'].status == "Running" +// template: sleep +// succeed: +// expression: steps['job'].status == "Succeeded" +// template: sleep +// - name: sleep +// synchronization: +// mutexes: +// - name: job +// script: +// image: alpine:latest +// command: [/bin/sh] +// source: | +// sleep 4 +// - name: exit0 +// script: +// image: alpine:latest +// command: [/bin/sh] +// source: | +// sleep 2 +// exit 0 +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// assert.Equal(t, status.Progress, v1alpha1.Progress("3/3")) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "job.hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "job.hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestWorkflowLevelHooksWithRetry() { - s.Given(). - Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - name: test-workflow-level-hooks-with-retry -spec: - templates: - - name: argosay - container: - image: argoproj/argosay:v2 - command: - - /bin/sh - - '-c' - args: - - /bin/sleep 1; exit 1 - retryStrategy: - limit: 1 - - name: hook - container: - image: argoproj/argosay:v2 - command: - - /bin/sh - - '-c' - args: - - /argosay - entrypoint: argosay - hooks: - failed: - template: hook - expression: workflow.status == "Failed" - running: - template: hook - expression: workflow.status == "Running" -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - assert.Equal(t, status.Progress, v1alpha1.Progress("2/4")) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-workflow-level-hooks-with-retry.hooks.running" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - assert.True(t, status.NodeFlag.Hooked) - assert.False(t, status.NodeFlag.Retried) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-workflow-level-hooks-with-retry.hooks.failed" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - assert.True(t, status.NodeFlag.Hooked) - assert.False(t, status.NodeFlag.Retried) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-workflow-level-hooks-with-retry" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeFailed, status.Phase) - assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) - assert.Nil(t, status.NodeFlag) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-workflow-level-hooks-with-retry(0)" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeFailed, status.Phase) - assert.False(t, status.NodeFlag.Hooked) - assert.True(t, status.NodeFlag.Retried) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-workflow-level-hooks-with-retry(1)" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeFailed, status.Phase) - assert.False(t, status.NodeFlag.Hooked) - assert.True(t, status.NodeFlag.Retried) - }) -} +// func (s *HooksSuite) TestWorkflowLevelHooksWithRetry() { +// s.Given(). +// Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// name: test-workflow-level-hooks-with-retry +// spec: +// templates: +// - name: argosay +// container: +// image: argoproj/argosay:v2 +// command: +// - /bin/sh +// - '-c' +// args: +// - /bin/sleep 1; exit 1 +// retryStrategy: +// limit: 1 +// - name: hook +// container: +// image: argoproj/argosay:v2 +// command: +// - /bin/sh +// - '-c' +// args: +// - /argosay +// entrypoint: argosay +// hooks: +// failed: +// template: hook +// expression: workflow.status == "Failed" +// running: +// template: hook +// expression: workflow.status == "Running" +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// assert.Equal(t, status.Progress, v1alpha1.Progress("2/4")) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-workflow-level-hooks-with-retry.hooks.running" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// assert.True(t, status.NodeFlag.Hooked) +// assert.False(t, status.NodeFlag.Retried) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-workflow-level-hooks-with-retry.hooks.failed" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// assert.True(t, status.NodeFlag.Hooked) +// assert.False(t, status.NodeFlag.Retried) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-workflow-level-hooks-with-retry" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) +// assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) +// assert.Nil(t, status.NodeFlag) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-workflow-level-hooks-with-retry(0)" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) +// assert.False(t, status.NodeFlag.Hooked) +// assert.True(t, status.NodeFlag.Retried) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-workflow-level-hooks-with-retry(1)" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) +// assert.False(t, status.NodeFlag.Hooked) +// assert.True(t, status.NodeFlag.Retried) +// }) +// } -func (s *HooksSuite) TestTemplateLevelHooksWithRetry() { - var children []string - (s.Given(). - Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - name: retries-with-hooks-and-artifact - labels: - workflows.argoproj.io/test: "true" - annotations: - workflows.argoproj.io/description: | - when retries and hooks are both included, the workflow cannot resolve the artifact - workflows.argoproj.io/version: '>= 3.5.0' -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: build - template: output-artifact - hooks: - started: - expression: steps["build"].status == "Running" - template: started - success: - expression: steps["build"].status == "Succeeded" - template: success - failed: - expression: steps["build"].status == "Failed" || steps["build"].status == "Error" - template: failed - - - name: print - template: print-artifact - arguments: - artifacts: - - name: message - from: "{{steps.build.outputs.artifacts.result}}" +// func (s *HooksSuite) TestTemplateLevelHooksWithRetry() { +// var children []string +// (s.Given(). +// Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// name: retries-with-hooks-and-artifact +// labels: +// workflows.argoproj.io/test: "true" +// annotations: +// workflows.argoproj.io/description: | +// when retries and hooks are both included, the workflow cannot resolve the artifact +// workflows.argoproj.io/version: '>= 3.5.0' +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: build +// template: output-artifact +// hooks: +// started: +// expression: steps["build"].status == "Running" +// template: started +// success: +// expression: steps["build"].status == "Succeeded" +// template: success +// failed: +// expression: steps["build"].status == "Failed" || steps["build"].status == "Error" +// template: failed +// - - name: print +// template: print-artifact +// arguments: +// artifacts: +// - name: message +// from: "{{steps.build.outputs.artifacts.result}}" - - name: output-artifact - script: - image: python:alpine3.6 - command: [ python ] - source: | - import time - import random - import sys - time.sleep(1) # lifecycle hook for running won't trigger unless it runs for more than "a few seconds" - with open("result.txt", "w") as f: - f.write("Welcome") - if {{retries}} == 2: - sys.exit(0) - sys.exit(1) - retryStrategy: - limit: 2 - outputs: - artifacts: - - name: result - path: /result.txt +// - name: output-artifact +// script: +// image: python:alpine3.6 +// command: [ python ] +// source: | +// import time +// import random +// import sys +// time.sleep(1) # lifecycle hook for running won't trigger unless it runs for more than "a few seconds" +// with open("result.txt", "w") as f: +// f.write("Welcome") +// if {{retries}} == 2: +// sys.exit(0) +// sys.exit(1) +// retryStrategy: +// limit: 2 +// outputs: +// artifacts: +// - name: result +// path: /result.txt - - name: started - container: - image: python:alpine3.6 - command: [sh, -c] - args: ["echo STARTED!"] +// - name: started +// container: +// image: python:alpine3.6 +// command: [sh, -c] +// args: ["echo STARTED!"] - - name: success - container: - image: python:alpine3.6 - command: [sh, -c] - args: ["echo SUCCEEDED!"] +// - name: success +// container: +// image: python:alpine3.6 +// command: [sh, -c] +// args: ["echo SUCCEEDED!"] - - name: failed - container: - image: python:alpine3.6 - command: [sh, -c] - args: ["echo FAILED or ERROR!"] +// - name: failed +// container: +// image: python:alpine3.6 +// command: [sh, -c] +// args: ["echo FAILED or ERROR!"] - - name: print-artifact - inputs: - artifacts: - - name: message - path: /tmp/message - container: - image: python:alpine3.6 - command: [sh, -c] - args: ["cat /tmp/message"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.True(t, status.Fulfilled()) - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - for _, node := range status.Nodes { - if node.Type == v1alpha1.NodeTypeRetry { - assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) - children = node.Children - } - } - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "retries-with-hooks-and-artifact[0].build(0)" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Contains(t, children, status.ID) - assert.False(t, status.NodeFlag.Hooked) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.started" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Contains(t, children, status.ID) - assert.True(t, status.NodeFlag.Hooked) - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - })). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.success" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Contains(t, children, status.ID) - assert.True(t, status.NodeFlag.Hooked) - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "retries-with-hooks-and-artifact[1].print" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// - name: print-artifact +// inputs: +// artifacts: +// - name: message +// path: /tmp/message +// container: +// image: python:alpine3.6 +// command: [sh, -c] +// args: ["cat /tmp/message"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.True(t, status.Fulfilled()) +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// for _, node := range status.Nodes { +// if node.Type == v1alpha1.NodeTypeRetry { +// assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) +// children = node.Children +// } +// } +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "retries-with-hooks-and-artifact[0].build(0)" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Contains(t, children, status.ID) +// assert.False(t, status.NodeFlag.Hooked) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.started" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Contains(t, children, status.ID) +// assert.True(t, status.NodeFlag.Hooked) +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// })). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.success" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Contains(t, children, status.ID) +// assert.True(t, status.NodeFlag.Hooked) +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "retries-with-hooks-and-artifact[1].print" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *HooksSuite) TestExitHandlerWithWorkflowLevelDeadline() { - var onExitNodeName string - (s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - name: exit-handler-with-workflow-level-deadline -spec: - entrypoint: main - activeDeadlineSeconds: 1 - hooks: - exit: - template: exit-handler - templates: - - name: main - steps: - - - name: sleep - template: sleep - - name: exit-handler - steps: - - - name: sleep - template: sleep - - name: sleep - container: - image: argoproj/argosay:v2 - args: ["sleep", "5"] -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted). - WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { - onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) - onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) - return onExitNode.Completed(), "exit handler completed" - })). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.DisplayName == onExitNodeName - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.True(t, status.NodeFlag.Hooked) - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - })) -} +// func (s *HooksSuite) TestExitHandlerWithWorkflowLevelDeadline() { +// var onExitNodeName string +// (s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// name: exit-handler-with-workflow-level-deadline +// spec: +// entrypoint: main +// activeDeadlineSeconds: 1 +// hooks: +// exit: +// template: exit-handler +// templates: +// - name: main +// steps: +// - - name: sleep +// template: sleep +// - name: exit-handler +// steps: +// - - name: sleep +// template: sleep +// - name: sleep +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "5"] +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted). +// WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { +// onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) +// onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) +// return onExitNode.Completed(), "exit handler completed" +// })). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.DisplayName == onExitNodeName +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.True(t, status.NodeFlag.Hooked) +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// })) +// } -func (s *HooksSuite) TestHttpExitHandlerWithWorkflowLevelDeadline() { - var onExitNodeName string - (s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - name: http-exit-handler-with-workflow-level-deadline -spec: - entrypoint: main - activeDeadlineSeconds: 1 - hooks: - exit: - template: exit-handler - templates: - - name: main - steps: - - - name: sleep - template: sleep - - name: sleep - container: - image: argoproj/argosay:v2 - args: ["sleep", "5"] - - name: exit-handler - steps: - - - name: http - template: http - - name: http - http: - url: http://httpbin:9100/get -`).When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted). - WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { - onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) - onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) - return onExitNode.Completed(), "exit handler completed" - })). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.DisplayName == onExitNodeName - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.True(t, status.NodeFlag.Hooked) - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - })) -} +// func (s *HooksSuite) TestHttpExitHandlerWithWorkflowLevelDeadline() { +// var onExitNodeName string +// (s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// name: http-exit-handler-with-workflow-level-deadline +// spec: +// entrypoint: main +// activeDeadlineSeconds: 1 +// hooks: +// exit: +// template: exit-handler +// templates: +// - name: main +// steps: +// - - name: sleep +// template: sleep +// - name: sleep +// container: +// image: argoproj/argosay:v2 +// args: ["sleep", "5"] +// - name: exit-handler +// steps: +// - - name: http +// template: http +// - name: http +// http: +// url: http://httpbin:9100/get +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted). +// WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { +// onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) +// onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) +// return onExitNode.Completed(), "exit handler completed" +// })). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.DisplayName == onExitNodeName +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.True(t, status.NodeFlag.Hooked) +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// })) +// } -func TestHooksSuite(t *testing.T) { - suite.Run(t, new(HooksSuite)) -} +// func TestHooksSuite(t *testing.T) { +// suite.Run(t, new(HooksSuite)) +// } diff --git a/test/e2e/http_artifacts_test.go b/test/e2e/http_artifacts_test.go index e391560fc074..4f86bbe0fcdb 100644 --- a/test/e2e/http_artifacts_test.go +++ b/test/e2e/http_artifacts_test.go @@ -2,66 +2,66 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/suite" +// "github.com/stretchr/testify/suite" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type HttpArtifactsSuite struct { - fixtures.E2ESuite -} +// type HttpArtifactsSuite struct { +// fixtures.E2ESuite +// } -func (s *HttpArtifactsSuite) TestInputArtifactHttp() { - s.Given(). - Workflow("@testdata/http/input-artifact-http.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestInputArtifactHttp() { +// s.Given(). +// Workflow("@testdata/http/input-artifact-http.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *HttpArtifactsSuite) TestOutputArtifactHttp() { - s.Given(). - Workflow("@testdata/http/output-artifact-http.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestOutputArtifactHttp() { +// s.Given(). +// Workflow("@testdata/http/output-artifact-http.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *HttpArtifactsSuite) TestBasicAuthArtifactHttp() { - s.Given(). - Workflow("@testdata/http/basic-auth-artifact-http.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestBasicAuthArtifactHttp() { +// s.Given(). +// Workflow("@testdata/http/basic-auth-artifact-http.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *HttpArtifactsSuite) TestOAuthArtifactHttp() { - s.Given(). - Workflow("@testdata/http/oauth-artifact-http.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestOAuthArtifactHttp() { +// s.Given(). +// Workflow("@testdata/http/oauth-artifact-http.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *HttpArtifactsSuite) TestClientCertAuthArtifactHttp() { - s.Given(). - Workflow("@testdata/http/clientcert-auth-artifact-http.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestClientCertAuthArtifactHttp() { +// s.Given(). +// Workflow("@testdata/http/clientcert-auth-artifact-http.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func (s *HttpArtifactsSuite) TestArtifactoryArtifacts() { - s.Given(). - Workflow("@testdata/http/artifactory-artifact.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded) -} +// func (s *HttpArtifactsSuite) TestArtifactoryArtifacts() { +// s.Given(). +// Workflow("@testdata/http/artifactory-artifact.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded) +// } -func TestHttpArtifactsSuite(t *testing.T) { - suite.Run(t, new(HttpArtifactsSuite)) -} +// func TestHttpArtifactsSuite(t *testing.T) { +// suite.Run(t, new(HttpArtifactsSuite)) +// } diff --git a/test/e2e/malformed_resources_test.go b/test/e2e/malformed_resources_test.go index 97f8f26ace73..8b5c5af2fd5f 100644 --- a/test/e2e/malformed_resources_test.go +++ b/test/e2e/malformed_resources_test.go @@ -2,92 +2,92 @@ package e2e -import ( - "testing" - "time" +// import ( +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type MalformedResourcesSuite struct { - fixtures.E2ESuite -} +// type MalformedResourcesSuite struct { +// fixtures.E2ESuite +// } -func (s *MalformedResourcesSuite) TestMalformedWorkflow() { - s.Given(). - Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflow.yaml"}, fixtures.NoError). - WorkflowName("malformed"). - When(). - // it is not possible to wait for this to finish, because it is malformed - Wait(3 * time.Second). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "malformed", metadata.Name) - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) - }) -} +// func (s *MalformedResourcesSuite) TestMalformedWorkflow() { +// s.Given(). +// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflow.yaml"}, fixtures.NoError). +// WorkflowName("malformed"). +// When(). +// // it is not possible to wait for this to finish, because it is malformed +// Wait(3 * time.Second). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "malformed", metadata.Name) +// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) +// }) +// } -func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplate() { - s.Given(). - Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-workflow-template-ref.yaml"}, fixtures.NoError). - When(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "wellformed", metadata.Name) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplate() { +// s.Given(). +// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-workflow-template-ref.yaml"}, fixtures.NoError). +// When(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "wellformed", metadata.Name) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplateRef() { - s.Given(). - Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-workflow-template-ref.yaml"}, fixtures.NoError). - When(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "wellformed", metadata.Name) - assert.Equal(t, wfv1.WorkflowError, status.Phase) - assert.Contains(t, status.Message, "malformed workflow template") - }) -} +// func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplateRef() { +// s.Given(). +// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-workflow-template-ref.yaml"}, fixtures.NoError). +// When(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "wellformed", metadata.Name) +// assert.Equal(t, wfv1.WorkflowError, status.Phase) +// assert.Contains(t, status.Message, "malformed workflow template") +// }) +// } -func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplate() { - s.Given(). - Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-cluster-workflow-template-ref.yaml"}, fixtures.NoError). - When(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "wellformed", metadata.Name) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplate() { +// s.Given(). +// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-cluster-workflow-template-ref.yaml"}, fixtures.NoError). +// When(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "wellformed", metadata.Name) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplateRef() { - s.Given(). - Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). - Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-cluster-workflow-template-ref.yaml"}, fixtures.NoError). - When(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "wellformed", metadata.Name) - assert.Equal(t, wfv1.WorkflowError, status.Phase) - assert.Contains(t, status.Message, "malformed cluster workflow template") - }) -} +// func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplateRef() { +// s.Given(). +// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). +// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-cluster-workflow-template-ref.yaml"}, fixtures.NoError). +// When(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "wellformed", metadata.Name) +// assert.Equal(t, wfv1.WorkflowError, status.Phase) +// assert.Contains(t, status.Message, "malformed cluster workflow template") +// }) +// } -func TestMalformedResourcesSuite(t *testing.T) { - suite.Run(t, new(MalformedResourcesSuite)) -} +// func TestMalformedResourcesSuite(t *testing.T) { +// suite.Run(t, new(MalformedResourcesSuite)) +// } diff --git a/test/e2e/pod_cleanup_test.go b/test/e2e/pod_cleanup_test.go index bf5f91cbac65..5a05f3f5a219 100644 --- a/test/e2e/pod_cleanup_test.go +++ b/test/e2e/pod_cleanup_test.go @@ -2,390 +2,390 @@ package e2e -import ( - "testing" +// import ( +// "testing" - "github.com/stretchr/testify/suite" +// "github.com/stretchr/testify/suite" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type PodCleanupSuite struct { - fixtures.E2ESuite -} +// type PodCleanupSuite struct { +// fixtures.E2ESuite +// } -func (s *PodCleanupSuite) TestNone() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup- -spec: - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) -} +// func (s *PodCleanupSuite) TestNone() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup- +// spec: +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// } -func (s *PodCleanupSuite) TestOnPodCompletion() { - s.Run("FailedPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-completion- -spec: - podGC: - strategy: OnPodCompletion - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) - }) - s.Run("SucceededPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-completion- -spec: - podGC: - strategy: OnPodCompletion - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) - }) -} +// func (s *PodCleanupSuite) TestOnPodCompletion() { +// s.Run("FailedPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-completion- +// spec: +// podGC: +// strategy: OnPodCompletion +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// }) +// s.Run("SucceededPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-completion- +// spec: +// podGC: +// strategy: OnPodCompletion +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// }) +// } -func (s *PodCleanupSuite) TestOnPodCompletionLabelSelected() { - s.Run("FailedPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-completion-label-selected- -spec: - podGC: - strategy: OnPodCompletion - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] - metadata: - labels: - evicted: true -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) - }) - s.Run("SucceededPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-completion-label-selected- -spec: - podGC: - strategy: OnPodCompletion - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) - }) -} +// func (s *PodCleanupSuite) TestOnPodCompletionLabelSelected() { +// s.Run("FailedPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-completion-label-selected- +// spec: +// podGC: +// strategy: OnPodCompletion +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// metadata: +// labels: +// evicted: true +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// }) +// s.Run("SucceededPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-completion-label-selected- +// spec: +// podGC: +// strategy: OnPodCompletion +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// }) +// } -func (s *PodCleanupSuite) TestOnPodSuccess() { - s.Run("FailedPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-success- -spec: - podGC: - strategy: OnPodSuccess - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) - }) - s.Run("SucceededPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-success- -spec: - podGC: - strategy: OnPodSuccess - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) - }) -} +// func (s *PodCleanupSuite) TestOnPodSuccess() { +// s.Run("FailedPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-success- +// spec: +// podGC: +// strategy: OnPodSuccess +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// }) +// s.Run("SucceededPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-success- +// spec: +// podGC: +// strategy: OnPodSuccess +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// }) +// } -func (s *PodCleanupSuite) TestOnPodSuccessLabelNotMatch() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-success-label-not-match- -spec: - podGC: - strategy: OnPodSuccess - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) -} +// func (s *PodCleanupSuite) TestOnPodSuccessLabelNotMatch() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-success-label-not-match- +// spec: +// podGC: +// strategy: OnPodSuccess +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// } -func (s *PodCleanupSuite) TestOnPodSuccessLabelMatch() { - s.Run("FailedPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-success-label-match- -spec: - podGC: - strategy: OnPodSuccess - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) - }) - s.Run("SucceededPod", func() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-pod-success-label-match- -spec: - podGC: - strategy: OnPodSuccess - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - metadata: - labels: - evicted: true -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) - }) -} +// func (s *PodCleanupSuite) TestOnPodSuccessLabelMatch() { +// s.Run("FailedPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-success-label-match- +// spec: +// podGC: +// strategy: OnPodSuccess +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// }) +// s.Run("SucceededPod", func() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-pod-success-label-match- +// spec: +// podGC: +// strategy: OnPodSuccess +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// metadata: +// labels: +// evicted: true +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// }) +// } -func (s *PodCleanupSuite) TestOnWorkflowCompletion() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-completion- -spec: - podGC: - strategy: OnWorkflowCompletion - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowCompletion() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-completion- +// spec: +// podGC: +// strategy: OnWorkflowCompletion +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelNotMatch() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-completion-label-not-match- -spec: - podGC: - strategy: OnWorkflowCompletion - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelNotMatch() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-completion-label-not-match- +// spec: +// podGC: +// strategy: OnWorkflowCompletion +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelMatch() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-completion-label-match- -spec: - podGC: - strategy: OnWorkflowCompletion - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - args: [exit, 1] - metadata: - labels: - evicted: true -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelMatch() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-completion-label-match- +// spec: +// podGC: +// strategy: OnWorkflowCompletion +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// args: [exit, 1] +// metadata: +// labels: +// evicted: true +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowSuccess() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-success- -spec: - podGC: - strategy: OnWorkflowSuccess - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowSuccess() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-success- +// spec: +// podGC: +// strategy: OnWorkflowSuccess +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelNotMatch() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-success-label-not-match- -spec: - podGC: - strategy: OnWorkflowSuccess - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodCompleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelNotMatch() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-success-label-not-match- +// spec: +// podGC: +// strategy: OnWorkflowSuccess +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodCompleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelMatch() { - s.Given(). - Workflow(` -metadata: - generateName: test-pod-cleanup-on-workflow-success-label-match- -spec: - podGC: - strategy: OnWorkflowSuccess - labelSelector: - matchLabels: - evicted: true - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 - metadata: - labels: - evicted: true -`). - When(). - SubmitWorkflow(). - WaitForPod(fixtures.PodDeleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelMatch() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-pod-cleanup-on-workflow-success-label-match- +// spec: +// podGC: +// strategy: OnWorkflowSuccess +// labelSelector: +// matchLabels: +// evicted: true +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// metadata: +// labels: +// evicted: true +// `). +// When(). +// SubmitWorkflow(). +// WaitForPod(fixtures.PodDeleted) +// } -func (s *PodCleanupSuite) TestOnWorkflowTemplate() { - s.Given(). - WorkflowTemplate(` -metadata: - name: test-pod-cleanup -spec: - podGC: - strategy: OnWorkflowCompletion - entrypoint: main - templates: - - name: main - container: - image: argoproj/argosay:v2 -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForPod(fixtures.PodDeleted) -} +// func (s *PodCleanupSuite) TestOnWorkflowTemplate() { +// s.Given(). +// WorkflowTemplate(` +// metadata: +// name: test-pod-cleanup +// spec: +// podGC: +// strategy: OnWorkflowCompletion +// entrypoint: main +// templates: +// - name: main +// container: +// image: argoproj/argosay:v2 +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForPod(fixtures.PodDeleted) +// } -func TestPodCleanupSuite(t *testing.T) { - suite.Run(t, new(PodCleanupSuite)) -} +// func TestPodCleanupSuite(t *testing.T) { +// suite.Run(t, new(PodCleanupSuite)) +// } diff --git a/test/e2e/progress_test.go b/test/e2e/progress_test.go index d277042de3d6..2893d4deda42 100644 --- a/test/e2e/progress_test.go +++ b/test/e2e/progress_test.go @@ -2,58 +2,58 @@ package e2e -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) - -type ProgressSuite struct { - fixtures.E2ESuite -} - -func (s *ProgressSuite) TestDefaultProgress() { - s.Given(). - Workflow("@testdata/basic-workflow.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.Progress("1/1"), status.Progress) - assert.Equal(t, wfv1.Progress("1/1"), status.Nodes[metadata.Name].Progress) - }) -} - -func (s *ProgressSuite) TestLoggedProgress() { - toHaveProgress := func(p wfv1.Progress) fixtures.Condition { - return func(wf *wfv1.Workflow) (bool, string) { - return wf.Status.Nodes[wf.Name].Progress == p && - wf.Status.Nodes.FindByDisplayName("progress").Progress == p, fmt.Sprintf("progress is %s", p) - } - } - - s.Given(). - Workflow("@testdata/progress-workflow.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeRunning). - WaitForWorkflow(toHaveProgress("0/100"), time.Minute). - WaitForWorkflow(toHaveProgress("50/100"), time.Minute). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.Progress("100/100"), status.Nodes[metadata.Name].Progress) - }) -} - -func TestProgressSuite(t *testing.T) { - suite.Run(t, new(ProgressSuite)) -} +// import ( +// "fmt" +// "testing" +// "time" + +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) + +// type ProgressSuite struct { +// fixtures.E2ESuite +// } + +// func (s *ProgressSuite) TestDefaultProgress() { +// s.Given(). +// Workflow("@testdata/basic-workflow.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.Progress("1/1"), status.Progress) +// assert.Equal(t, wfv1.Progress("1/1"), status.Nodes[metadata.Name].Progress) +// }) +// } + +// func (s *ProgressSuite) TestLoggedProgress() { +// toHaveProgress := func(p wfv1.Progress) fixtures.Condition { +// return func(wf *wfv1.Workflow) (bool, string) { +// return wf.Status.Nodes[wf.Name].Progress == p && +// wf.Status.Nodes.FindByDisplayName("progress").Progress == p, fmt.Sprintf("progress is %s", p) +// } +// } + +// s.Given(). +// Workflow("@testdata/progress-workflow.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeRunning). +// WaitForWorkflow(toHaveProgress("0/100"), time.Minute). +// WaitForWorkflow(toHaveProgress("50/100"), time.Minute). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.Progress("100/100"), status.Nodes[metadata.Name].Progress) +// }) +// } + +// func TestProgressSuite(t *testing.T) { +// suite.Run(t, new(ProgressSuite)) +// } diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index c834ae208410..364b41b8be46 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -3,234 +3,234 @@ package e2e import ( - "context" - "io" - "strings" + // "context" + // "io" + // "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" - apiv1 "k8s.io/api/core/v1" + // apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + // "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" ) -type RetryTestSuite struct { - fixtures.E2ESuite -} +// type RetryTestSuite struct { +// fixtures.E2ESuite +// } -func (s *RetryTestSuite) TestRetryLimit() { - s.Given(). - Workflow(` -metadata: - name: test-retry-limit -spec: - entrypoint: main - templates: - - name: main - retryStrategy: - limit: 0 - backoff: - duration: 2s - factor: 2 - maxDuration: 5m - container: - name: main - image: 'argoproj/argosay:v2' - args: [ exit, "1" ] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) - assert.Equal(t, "No more retries left", status.Message) - assert.Equal(t, v1alpha1.Progress("0/1"), status.Progress) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-retry-limit" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeFailed, status.Phase) - assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) - assert.Nil(t, status.NodeFlag) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "test-retry-limit(0)" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeFailed, status.Phase) - assert.True(t, status.NodeFlag.Retried) - }) -} +// func (s *RetryTestSuite) TestRetryLimit() { +// s.Given(). +// Workflow(` +// metadata: +// name: test-retry-limit +// spec: +// entrypoint: main +// templates: +// - name: main +// retryStrategy: +// limit: 0 +// backoff: +// duration: 2s +// factor: 2 +// maxDuration: 5m +// container: +// name: main +// image: 'argoproj/argosay:v2' +// args: [ exit, "1" ] +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) +// assert.Equal(t, "No more retries left", status.Message) +// assert.Equal(t, v1alpha1.Progress("0/1"), status.Progress) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-retry-limit" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) +// assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) +// assert.Nil(t, status.NodeFlag) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "test-retry-limit(0)" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) +// assert.True(t, status.NodeFlag.Retried) +// }) +// } -func (s *RetryTestSuite) TestRetryBackoff() { - s.Given(). - Workflow(` -metadata: - generateName: test-backoff-strategy- -spec: - entrypoint: main - templates: - - name: main - retryStrategy: - limit: '10' - backoff: - duration: 10s - maxDuration: 1m - container: - name: main - image: 'argoproj/argosay:v2' - args: [ exit, "1" ] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(time.Second * 90). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) - assert.LessOrEqual(t, len(status.Nodes), 10) - }) - s.Given(). - Workflow(` -metadata: - generateName: test-backoff-strategy- -spec: - entrypoint: main - templates: - - name: main - retryStrategy: - limit: 10 - backoff: - duration: 10s - maxDuration: 1m - container: - name: main - image: 'argoproj/argosay:v2' - args: [ exit, "1" ] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(time.Second * 90). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) - assert.LessOrEqual(t, len(status.Nodes), 10) - }) -} +// func (s *RetryTestSuite) TestRetryBackoff() { +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-backoff-strategy- +// spec: +// entrypoint: main +// templates: +// - name: main +// retryStrategy: +// limit: '10' +// backoff: +// duration: 10s +// maxDuration: 1m +// container: +// name: main +// image: 'argoproj/argosay:v2' +// args: [ exit, "1" ] +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(time.Second * 90). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) +// assert.LessOrEqual(t, len(status.Nodes), 10) +// }) +// s.Given(). +// Workflow(` +// metadata: +// generateName: test-backoff-strategy- +// spec: +// entrypoint: main +// templates: +// - name: main +// retryStrategy: +// limit: 10 +// backoff: +// duration: 10s +// maxDuration: 1m +// container: +// name: main +// image: 'argoproj/argosay:v2' +// args: [ exit, "1" ] +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(time.Second * 90). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) +// assert.LessOrEqual(t, len(status.Nodes), 10) +// }) +// } -func (s *RetryTestSuite) TestWorkflowTemplateWithRetryStrategyInContainerSet() { - var name string - var ns string - s.Given(). - WorkflowTemplate("@testdata/workflow-template-with-containerset.yaml"). - Workflow(` -metadata: - name: workflow-template-containerset -spec: - workflowTemplateRef: - name: containerset-with-retrystrategy -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return status.Name == "workflow-template-containerset" - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - name = pod.GetName() - ns = pod.GetNamespace() - }) - // Success, no need retry - s.Run("ContainerLogs", func() { - ctx := context.Background() - podLogOptions := &apiv1.PodLogOptions{Container: "c1"} - stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) - s.Require().NoError(err) - defer stream.Close() - logBytes, err := io.ReadAll(stream) - s.Require().NoError(err) - output := string(logBytes) - count := strings.Count(output, "capturing logs") - s.Equal(1, count) - s.Contains(output, "hi") - }) - // Command err. No retry logic is entered. - s.Run("ContainerLogs", func() { - ctx := context.Background() - podLogOptions := &apiv1.PodLogOptions{Container: "c2"} - stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) - s.Require().NoError(err) - defer stream.Close() - logBytes, err := io.ReadAll(stream) - s.Require().NoError(err) - output := string(logBytes) - count := strings.Count(output, "capturing logs") - s.Equal(0, count) - s.Contains(output, "executable file not found in $PATH") - }) - // Retry when err. - s.Run("ContainerLogs", func() { - ctx := context.Background() - podLogOptions := &apiv1.PodLogOptions{Container: "c3"} - stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) - s.Require().NoError(err) - defer stream.Close() - logBytes, err := io.ReadAll(stream) - s.Require().NoError(err) - output := string(logBytes) - count := strings.Count(output, "capturing logs") - s.Equal(2, count) - countFailureInfo := strings.Count(output, "intentional failure") - s.Equal(2, countFailureInfo) - }) -} +// func (s *RetryTestSuite) TestWorkflowTemplateWithRetryStrategyInContainerSet() { +// var name string +// var ns string +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-with-containerset.yaml"). +// Workflow(` +// metadata: +// name: workflow-template-containerset +// spec: +// workflowTemplateRef: +// name: containerset-with-retrystrategy +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return status.Name == "workflow-template-containerset" +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// name = pod.GetName() +// ns = pod.GetNamespace() +// }) +// // Success, no need retry +// s.Run("ContainerLogs", func() { +// ctx := context.Background() +// podLogOptions := &apiv1.PodLogOptions{Container: "c1"} +// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) +// s.Require().NoError(err) +// defer stream.Close() +// logBytes, err := io.ReadAll(stream) +// s.Require().NoError(err) +// output := string(logBytes) +// count := strings.Count(output, "capturing logs") +// s.Equal(1, count) +// s.Contains(output, "hi") +// }) +// // Command err. No retry logic is entered. +// s.Run("ContainerLogs", func() { +// ctx := context.Background() +// podLogOptions := &apiv1.PodLogOptions{Container: "c2"} +// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) +// s.Require().NoError(err) +// defer stream.Close() +// logBytes, err := io.ReadAll(stream) +// s.Require().NoError(err) +// output := string(logBytes) +// count := strings.Count(output, "capturing logs") +// s.Equal(0, count) +// s.Contains(output, "executable file not found in $PATH") +// }) +// // Retry when err. +// s.Run("ContainerLogs", func() { +// ctx := context.Background() +// podLogOptions := &apiv1.PodLogOptions{Container: "c3"} +// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) +// s.Require().NoError(err) +// defer stream.Close() +// logBytes, err := io.ReadAll(stream) +// s.Require().NoError(err) +// output := string(logBytes) +// count := strings.Count(output, "capturing logs") +// s.Equal(2, count) +// countFailureInfo := strings.Count(output, "intentional failure") +// s.Equal(2, countFailureInfo) +// }) +// } -func (s *RetryTestSuite) TestRetryNodeAntiAffinity() { - s.Given(). - Workflow(` -metadata: - name: test-nodeantiaffinity-strategy -spec: - entrypoint: main - templates: - - name: main - retryStrategy: - limit: '1' - retryPolicy: "Always" - affinity: - nodeAntiAffinity: {} - container: - name: main - image: 'argoproj/argosay:v2' - args: [ exit, "1" ] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToHaveFailedPod). - Wait(5 * time.Second). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - if status.Phase == wfv1.WorkflowFailed { - nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") - nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") - assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) - } - if status.Phase == wfv1.WorkflowRunning { - nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") - nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") - assert.Contains(t, nodeStatusRetry.Message, "didn't match Pod's node affinity/selector") - assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) - } - }) -} +// func (s *RetryTestSuite) TestRetryNodeAntiAffinity() { +// s.Given(). +// Workflow(` +// metadata: +// name: test-nodeantiaffinity-strategy +// spec: +// entrypoint: main +// templates: +// - name: main +// retryStrategy: +// limit: '1' +// retryPolicy: "Always" +// affinity: +// nodeAntiAffinity: {} +// container: +// name: main +// image: 'argoproj/argosay:v2' +// args: [ exit, "1" ] +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToHaveFailedPod). +// Wait(5 * time.Second). +// Then(). +// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// if status.Phase == wfv1.WorkflowFailed { +// nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") +// nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") +// assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) +// } +// if status.Phase == wfv1.WorkflowRunning { +// nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") +// nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") +// assert.Contains(t, nodeStatusRetry.Message, "didn't match Pod's node affinity/selector") +// assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) +// } +// }) +// } func (s *RetryTestSuite) TestRetryDaemonContainer() { s.Given(). diff --git a/test/e2e/semaphore_test.go b/test/e2e/semaphore_test.go index d1e41cdf3938..7ca3a9ee10fc 100644 --- a/test/e2e/semaphore_test.go +++ b/test/e2e/semaphore_test.go @@ -2,104 +2,104 @@ package e2e -import ( - "strings" - "testing" - "time" +// import ( +// "strings" +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type SemaphoreSuite struct { - fixtures.E2ESuite -} +// type SemaphoreSuite struct { +// fixtures.E2ESuite +// } -func (s *SemaphoreSuite) TestSynchronizationWfLevelMutex() { - s.Given(). - Workflow("@functional/synchronization-mutex-wf-level-1.yaml"). - When(). - SubmitWorkflow(). - Given(). - Workflow("@functional/synchronization-mutex-wf-level.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// func (s *SemaphoreSuite) TestSynchronizationWfLevelMutex() { +// s.Given(). +// Workflow("@functional/synchronization-mutex-wf-level-1.yaml"). +// When(). +// SubmitWorkflow(). +// Given(). +// Workflow("@functional/synchronization-mutex-wf-level.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -func (s *SemaphoreSuite) TestTemplateLevelMutex() { - s.Given(). - Workflow("@functional/synchronization-mutex-tmpl-level.yaml"). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// func (s *SemaphoreSuite) TestTemplateLevelMutex() { +// s.Given(). +// Workflow("@functional/synchronization-mutex-tmpl-level.yaml"). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -func (s *SemaphoreSuite) TestWorkflowLevelSemaphore() { - s.Given(). - Workflow("@testdata/semaphore-wf-level.yaml"). - When(). - CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToHavePhase(wfv1.WorkflowUnknown), 90*time.Second). - WaitForWorkflow(). - DeleteConfigMap("my-config"). - Then(). - When(). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// func (s *SemaphoreSuite) TestWorkflowLevelSemaphore() { +// s.Given(). +// Workflow("@testdata/semaphore-wf-level.yaml"). +// When(). +// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToHavePhase(wfv1.WorkflowUnknown), 90*time.Second). +// WaitForWorkflow(). +// DeleteConfigMap("my-config"). +// Then(). +// When(). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -func (s *SemaphoreSuite) TestTemplateLevelSemaphore() { - s.Given(). - Workflow("@testdata/semaphore-tmpl-level.yaml"). - When(). - CreateConfigMap("my-config", map[string]string{"template": "1"}, map[string]string{}). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeRunning, 90*time.Second). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.True(t, status.Nodes.Any(func(n wfv1.NodeStatus) bool { - return strings.Contains(n.Message, "Waiting for") - })) - }). - When(). - WaitForWorkflow(time.Second * 90) -} +// func (s *SemaphoreSuite) TestTemplateLevelSemaphore() { +// s.Given(). +// Workflow("@testdata/semaphore-tmpl-level.yaml"). +// When(). +// CreateConfigMap("my-config", map[string]string{"template": "1"}, map[string]string{}). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeRunning, 90*time.Second). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.True(t, status.Nodes.Any(func(n wfv1.NodeStatus) bool { +// return strings.Contains(n.Message, "Waiting for") +// })) +// }). +// When(). +// WaitForWorkflow(time.Second * 90) +// } -func (s *SemaphoreSuite) TestSynchronizationTmplLevelMutexAndSemaphore() { - s.Given(). - Workflow("@functional/synchronization-tmpl-level-mutex-semaphore.yaml"). - When(). - CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// func (s *SemaphoreSuite) TestSynchronizationTmplLevelMutexAndSemaphore() { +// s.Given(). +// Workflow("@functional/synchronization-tmpl-level-mutex-semaphore.yaml"). +// When(). +// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -func (s *SemaphoreSuite) TestSynchronizationMultiple() { - s.Given(). - Workflow("@functional/synchronization-multiple.yaml"). - When(). - CreateConfigMap("my-config", map[string]string{"workflow": "2"}, map[string]string{}). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// func (s *SemaphoreSuite) TestSynchronizationMultiple() { +// s.Given(). +// Workflow("@functional/synchronization-multiple.yaml"). +// When(). +// CreateConfigMap("my-config", map[string]string{"workflow": "2"}, map[string]string{}). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -// Legacy CRD entries: mutex and semaphore -func (s *SemaphoreSuite) TestSynchronizationLegacyMutexAndSemaphore() { - s.Given(). - Workflow("@functional/synchronization-legacy-mutex-semaphore.yaml"). - When(). - CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -} +// // Legacy CRD entries: mutex and semaphore +// func (s *SemaphoreSuite) TestSynchronizationLegacyMutexAndSemaphore() { +// s.Given(). +// Workflow("@functional/synchronization-legacy-mutex-semaphore.yaml"). +// When(). +// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +// } -func TestSemaphoreSuite(t *testing.T) { - suite.Run(t, new(SemaphoreSuite)) -} +// func TestSemaphoreSuite(t *testing.T) { +// suite.Run(t, new(SemaphoreSuite)) +// } diff --git a/test/e2e/workflow_configmap_substitution_test.go b/test/e2e/workflow_configmap_substitution_test.go index 25655bebfbde..a7fd0e286992 100644 --- a/test/e2e/workflow_configmap_substitution_test.go +++ b/test/e2e/workflow_configmap_substitution_test.go @@ -2,229 +2,229 @@ package e2e -import ( - "testing" - "time" +// import ( +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type WorkflowConfigMapSelectorSubstitutionSuite struct { - fixtures.E2ESuite -} +// type WorkflowConfigMapSelectorSubstitutionSuite struct { +// fixtures.E2ESuite +// } -func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestKeySubstitution() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-configmapkeyselector-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: msg - templates: - - name: whalesay - inputs: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: cmref-parameters - key: '{{ workflow.parameters.message }}' - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"msg": "hello world"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestKeySubstitution() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-configmapkeyselector-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// value: msg +// templates: +// - name: whalesay +// inputs: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: cmref-parameters +// key: '{{ workflow.parameters.message }}' +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"msg": "hello world"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestNameSubstitution() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-configmapkeyselector-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: cm-name - value: cmref-parameters - templates: - - name: whalesay - inputs: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: '{{ workflow.parameters.cm-name}}' - key: msg - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"msg": "hello world"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestNameSubstitution() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-configmapkeyselector-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: cm-name +// value: cmref-parameters +// templates: +// - name: whalesay +// inputs: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: '{{ workflow.parameters.cm-name}}' +// key: msg +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"msg": "hello world"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestInvalidNameParameterSubstitution() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-configmapkeyselector-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: cm-name - value: cmref-parameters - templates: - - name: whalesay - inputs: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: '{{ workflow.parameters.cm-name }}' - key: msg - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeErrored) -} +// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestInvalidNameParameterSubstitution() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-configmapkeyselector-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: cm-name +// value: cmref-parameters +// templates: +// - name: whalesay +// inputs: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: '{{ workflow.parameters.cm-name }}' +// key: msg +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeErrored) +// } -func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestDefaultParamValueWhenNotFound() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-configmapkeyselector-wf-default-param- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: msg - templates: - - name: whalesay - inputs: - parameters: - - name: message - valueFrom: - default: "default-val" - configMapKeyRef: - name: cmref-parameters - key: not-existing-key - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"msg": "hello world"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestDefaultParamValueWhenNotFound() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-configmapkeyselector-wf-default-param- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// value: msg +// templates: +// - name: whalesay +// inputs: +// parameters: +// - name: message +// valueFrom: +// default: "default-val" +// configMapKeyRef: +// name: cmref-parameters +// key: not-existing-key +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"msg": "hello world"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestGlobalArgDefaultCMParamValueWhenNotFound() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-cmkeyselector-wf-global-arg-default-param- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: simple-global-param - valueFrom: - default: "default value" - configMapKeyRef: - name: not-existing-cm - key: not-existing-key - templates: - - name: whalesay - container: - image: argoproj/argosay:v2 - command: [sh, -c] - args: ["sleep 1; echo -n {{workflow.parameters.simple-global-param}} > /tmp/message.txt"] - outputs: - parameters: - - name: message - valueFrom: - path: /tmp/message.txt -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "default value", status.Nodes[metadata.Name].Outputs.Parameters[0].Value.String()) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestGlobalArgDefaultCMParamValueWhenNotFound() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-cmkeyselector-wf-global-arg-default-param- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: simple-global-param +// valueFrom: +// default: "default value" +// configMapKeyRef: +// name: not-existing-cm +// key: not-existing-key +// templates: +// - name: whalesay +// container: +// image: argoproj/argosay:v2 +// command: [sh, -c] +// args: ["sleep 1; echo -n {{workflow.parameters.simple-global-param}} > /tmp/message.txt"] +// outputs: +// parameters: +// - name: message +// valueFrom: +// path: /tmp/message.txt +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "default value", status.Nodes[metadata.Name].Outputs.Parameters[0].Value.String()) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func TestConfigMapKeySelectorSubstitutionSuite(t *testing.T) { - suite.Run(t, new(WorkflowConfigMapSelectorSubstitutionSuite)) -} +// func TestConfigMapKeySelectorSubstitutionSuite(t *testing.T) { +// suite.Run(t, new(WorkflowConfigMapSelectorSubstitutionSuite)) +// } diff --git a/test/e2e/workflow_inputs_orverridable_test.go b/test/e2e/workflow_inputs_orverridable_test.go index a5a99f9505ab..49321219978d 100644 --- a/test/e2e/workflow_inputs_orverridable_test.go +++ b/test/e2e/workflow_inputs_orverridable_test.go @@ -2,201 +2,201 @@ package e2e -import ( - "testing" - "time" +// import ( +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type WorkflowInputsOverridableSuite struct { - fixtures.E2ESuite -} +// type WorkflowInputsOverridableSuite struct { +// fixtures.E2ESuite +// } -func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValueFrom() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-inputs-overridable-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: arg-value - templates: - - name: whalesay - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" - inputs: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: cmref-parameters - key: cmref-key -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"cmref-key": "input-value"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValueFrom() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-inputs-overridable-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// value: arg-value +// templates: +// - name: whalesay +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// inputs: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: cmref-parameters +// key: cmref-key +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"cmref-key": "input-value"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValueFrom() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-inputs-overridable-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: new-cmref-parameters - key: cmref-key - templates: - - name: whalesay - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" - inputs: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: cmref-parameters - key: cmref-key -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"cmref-key": "input-value"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - CreateConfigMap( - "new-cmref-parameters", - map[string]string{"cmref-key": "arg-value"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - DeleteConfigMap("new-cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValueFrom() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-inputs-overridable-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: new-cmref-parameters +// key: cmref-key +// templates: +// - name: whalesay +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// inputs: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: cmref-parameters +// key: cmref-key +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"cmref-key": "input-value"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// CreateConfigMap( +// "new-cmref-parameters", +// map[string]string{"cmref-key": "arg-value"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// DeleteConfigMap("new-cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValue() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - value: arg-value - templates: - - name: whalesay - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" - inputs: - parameters: - - name: message - value: input-value -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValue() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// value: arg-value +// templates: +// - name: whalesay +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// inputs: +// parameters: +// - name: message +// value: input-value +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValue() { - s.Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-inputs-overridable-wf- - label: - workflows.argoproj.io/test: "true" -spec: - entrypoint: whalesay - arguments: - parameters: - - name: message - valueFrom: - configMapKeyRef: - name: cmref-parameters - key: cmref-key - templates: - - name: whalesay - container: - image: argoproj/argosay:v2 - args: - - echo - - "{{inputs.parameters.message}}" - inputs: - parameters: - - name: message - value: input-value -`). - When(). - CreateConfigMap( - "cmref-parameters", - map[string]string{"cmref-key": "arg-value"}, - map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). - Wait(1 * time.Second). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - DeleteConfigMap("cmref-parameters"). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValue() { +// s.Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-inputs-overridable-wf- +// label: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: whalesay +// arguments: +// parameters: +// - name: message +// valueFrom: +// configMapKeyRef: +// name: cmref-parameters +// key: cmref-key +// templates: +// - name: whalesay +// container: +// image: argoproj/argosay:v2 +// args: +// - echo +// - "{{inputs.parameters.message}}" +// inputs: +// parameters: +// - name: message +// value: input-value +// `). +// When(). +// CreateConfigMap( +// "cmref-parameters", +// map[string]string{"cmref-key": "arg-value"}, +// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). +// Wait(1 * time.Second). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// DeleteConfigMap("cmref-parameters"). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func TestWorkflowInputsOverridableSuiteSuite(t *testing.T) { - suite.Run(t, new(WorkflowInputsOverridableSuite)) -} +// func TestWorkflowInputsOverridableSuiteSuite(t *testing.T) { +// suite.Run(t, new(WorkflowInputsOverridableSuite)) +// } diff --git a/test/e2e/workflow_template_test.go b/test/e2e/workflow_template_test.go index e4ba111dc1fe..373f6268cd48 100644 --- a/test/e2e/workflow_template_test.go +++ b/test/e2e/workflow_template_test.go @@ -2,183 +2,183 @@ package e2e -import ( - "strings" - "testing" +// import ( +// "strings" +// "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// apiv1 "k8s.io/api/core/v1" +// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -) +// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// ) -type WorkflowTemplateSuite struct { - fixtures.E2ESuite -} +// type WorkflowTemplateSuite struct { +// fixtures.E2ESuite +// } -func (s *WorkflowTemplateSuite) TestNestedWorkflowTemplate() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-nested-template.yaml"). - WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml"). - When(). - CreateWorkflowTemplates(). - Given(). - Workflow(`apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: workflow-template-nested- -spec: - entrypoint: whalesay - templates: - - name: whalesay - steps: - - - name: call-whalesay-template - templateRef: - name: workflow-template-nested-template - template: whalesay-template - arguments: - parameters: - - name: message - value: "hello from nested" -`).When(). - SubmitWorkflow(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowTemplateSuite) TestNestedWorkflowTemplate() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-nested-template.yaml"). +// WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml"). +// When(). +// CreateWorkflowTemplates(). +// Given(). +// Workflow(`apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: workflow-template-nested- +// spec: +// entrypoint: whalesay +// templates: +// - name: whalesay +// steps: +// - - name: call-whalesay-template +// templateRef: +// name: workflow-template-nested-template +// template: whalesay-template +// arguments: +// parameters: +// - name: message +// value: "hello from nested" +// `).When(). +// SubmitWorkflow(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithEnum() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-with-enum-values.yaml"). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithEnum() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-with-enum-values.yaml"). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWorkflowMetadataSubstitution() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-sub-test.yaml"). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWorkflowMetadataSubstitution() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-sub-test.yaml"). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateResourceUnquotedExpressions() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-with-resource-expr.yaml"). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateResourceUnquotedExpressions() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-with-resource-expr.yaml"). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithParallelStepsRequiringPVC() { - s.Given(). - WorkflowTemplate("@testdata/loops-steps-limited-parallelism-pvc.yaml"). - When(). - CreateWorkflowTemplates(). - SubmitWorkflowsFromWorkflowTemplates(). - WaitForWorkflow(). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }). - ExpectPVCDeleted() -} +// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithParallelStepsRequiringPVC() { +// s.Given(). +// WorkflowTemplate("@testdata/loops-steps-limited-parallelism-pvc.yaml"). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflowsFromWorkflowTemplates(). +// WaitForWorkflow(). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }). +// ExpectPVCDeleted() +// } -func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidOnExit() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-invalid-onexit.yaml"). - Workflow(` -metadata: - generateName: workflow-template-invalid-onexit- -spec: - workflowTemplateRef: - name: workflow-template-invalid-onexit -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeErrored). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowError, status.Phase) - assert.Contains(t, status.Message, "error in exit template execution") - }). - ExpectPVCDeleted() -} +// func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidOnExit() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-invalid-onexit.yaml"). +// Workflow(` +// metadata: +// generateName: workflow-template-invalid-onexit- +// spec: +// workflowTemplateRef: +// name: workflow-template-invalid-onexit +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeErrored). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowError, status.Phase) +// assert.Contains(t, status.Message, "error in exit template execution") +// }). +// ExpectPVCDeleted() +// } -func (s *WorkflowTemplateSuite) TestWorkflowTemplateWithHook() { - s.Given(). - WorkflowTemplate("@testdata/workflow-templates/success-hook.yaml"). - Workflow(` -metadata: - generateName: workflow-template-hook- -spec: - workflowTemplateRef: - name: hook -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "hooks.running") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "hooks.succeed") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) - }) -} +// func (s *WorkflowTemplateSuite) TestWorkflowTemplateWithHook() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-templates/success-hook.yaml"). +// Workflow(` +// metadata: +// generateName: workflow-template-hook- +// spec: +// workflowTemplateRef: +// name: hook +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "hooks.running") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "hooks.succeed") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) +// }) +// } -func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidEntryPoint() { - s.Given(). - WorkflowTemplate("@testdata/workflow-template-invalid-entrypoint.yaml"). - Workflow(` -metadata: - generateName: workflow-template-invalid-entrypoint- -spec: - workflowTemplateRef: - name: workflow-template-invalid-entrypoint -`). - When(). - CreateWorkflowTemplates(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeErrored). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { - assert.Equal(t, v1alpha1.WorkflowError, status.Phase) - assert.Contains(t, status.Message, "error in entry template execution") - }). - ExpectPVCDeleted() -} +// func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidEntryPoint() { +// s.Given(). +// WorkflowTemplate("@testdata/workflow-template-invalid-entrypoint.yaml"). +// Workflow(` +// metadata: +// generateName: workflow-template-invalid-entrypoint- +// spec: +// workflowTemplateRef: +// name: workflow-template-invalid-entrypoint +// `). +// When(). +// CreateWorkflowTemplates(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeErrored). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { +// assert.Equal(t, v1alpha1.WorkflowError, status.Phase) +// assert.Contains(t, status.Message, "error in entry template execution") +// }). +// ExpectPVCDeleted() +// } -func TestWorkflowTemplateSuite(t *testing.T) { - suite.Run(t, new(WorkflowTemplateSuite)) -} +// func TestWorkflowTemplateSuite(t *testing.T) { +// suite.Run(t, new(WorkflowTemplateSuite)) +// } diff --git a/test/e2e/workflow_test.go b/test/e2e/workflow_test.go index 76a0b6dc6a2b..00c856af552c 100644 --- a/test/e2e/workflow_test.go +++ b/test/e2e/workflow_test.go @@ -2,227 +2,227 @@ package e2e -import ( - "strings" - "testing" - "time" +// import ( +// "strings" +// "testing" +// "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// apiv1 "k8s.io/api/core/v1" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" - "github.com/argoproj/argo-workflows/v3/workflow/common" -) +// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +// "github.com/argoproj/argo-workflows/v3/workflow/common" +// ) -type WorkflowSuite struct { - fixtures.E2ESuite -} +// type WorkflowSuite struct { +// fixtures.E2ESuite +// } -func (s *WorkflowSuite) TestContainerTemplateAutomountServiceAccountTokenDisabled() { - s.Given().Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: get-resources-via-container-template- - namespace: argo -spec: - serviceAccountName: argo - automountServiceAccountToken: false - executor: - serviceAccountName: get-cm - entrypoint: main - templates: - - name: main - container: - name: main - image: bitnami/kubectl - command: - - sh - args: - - -c - - | - kubectl get cm -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowSuite) TestContainerTemplateAutomountServiceAccountTokenDisabled() { +// s.Given().Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: get-resources-via-container-template- +// namespace: argo +// spec: +// serviceAccountName: argo +// automountServiceAccountToken: false +// executor: +// serviceAccountName: get-cm +// entrypoint: main +// templates: +// - name: main +// container: +// name: main +// image: bitnami/kubectl +// command: +// - sh +// args: +// - -c +// - | +// kubectl get cm +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowSuite) TestScriptTemplateAutomountServiceAccountTokenDisabled() { - s.Given().Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: get-resources-via-script-template- - namespace: argo -spec: - serviceAccountName: argo - automountServiceAccountToken: false - executor: - serviceAccountName: get-cm - entrypoint: main - templates: - - name: main - script: - name: main - image: bitnami/kubectl - command: - - sh - source: - kubectl get cm -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }) -} +// func (s *WorkflowSuite) TestScriptTemplateAutomountServiceAccountTokenDisabled() { +// s.Given().Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: get-resources-via-script-template- +// namespace: argo +// spec: +// serviceAccountName: argo +// automountServiceAccountToken: false +// executor: +// serviceAccountName: get-cm +// entrypoint: main +// templates: +// - name: main +// script: +// name: main +// image: bitnami/kubectl +// command: +// - sh +// source: +// kubectl get cm +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }) +// } -func (s *WorkflowSuite) TestWorkflowFailedWhenAllPodSetFailedFromPending() { - (s.Given().Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: active-deadline-fanout-template-level- - namespace: argo -spec: - entrypoint: entrypoint - templates: - - name: entrypoint - steps: - - - name: fanout - template: echo - arguments: - parameters: - - name: item - value: "{{item}}" - withItems: - - 1 - - 2 - - 3 - - 4 - - name: echo - inputs: - parameters: - - name: item - container: - image: centos:latest - imagePullPolicy: Always - command: - - sh - - '-c' - args: - - echo - - 'workflow number {{inputs.parameters.item}}' - - sleep - - '20' - activeDeadlineSeconds: 2 # defined on template level, not workflow level ! -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeFailed, time.Minute*11). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) - for _, node := range status.Nodes { - if node.Type == wfv1.NodeTypePod { - assert.Equal(t, wfv1.NodeFailed, node.Phase) - assert.Contains(t, node.Message, "Pod was active on the node longer than the specified deadline") - } - } - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "fanout(0:1)") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - for _, c := range pod.Status.ContainerStatuses { - if c.Name == common.WaitContainerName && c.State.Terminated == nil { - assert.NotNil(t, c.State.Waiting) - assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") - assert.Nil(t, c.State.Running) - } - } - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "fanout(1:2)") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - for _, c := range pod.Status.ContainerStatuses { - if c.Name == common.WaitContainerName && c.State.Terminated == nil { - assert.NotNil(t, c.State.Waiting) - assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") - assert.Nil(t, c.State.Running) - } - } - })). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "fanout(2:3)") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - for _, c := range pod.Status.ContainerStatuses { - if c.Name == common.WaitContainerName && c.State.Terminated == nil { - assert.NotNil(t, c.State.Waiting) - assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") - assert.Nil(t, c.State.Running) - } - } - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "fanout(3:4)") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - for _, c := range pod.Status.ContainerStatuses { - if c.Name == common.WaitContainerName && c.State.Terminated == nil { - assert.NotNil(t, c.State.Waiting) - assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") - assert.Nil(t, c.State.Running) - } - } - }) -} +// func (s *WorkflowSuite) TestWorkflowFailedWhenAllPodSetFailedFromPending() { +// (s.Given().Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: active-deadline-fanout-template-level- +// namespace: argo +// spec: +// entrypoint: entrypoint +// templates: +// - name: entrypoint +// steps: +// - - name: fanout +// template: echo +// arguments: +// parameters: +// - name: item +// value: "{{item}}" +// withItems: +// - 1 +// - 2 +// - 3 +// - 4 +// - name: echo +// inputs: +// parameters: +// - name: item +// container: +// image: centos:latest +// imagePullPolicy: Always +// command: +// - sh +// - '-c' +// args: +// - echo +// - 'workflow number {{inputs.parameters.item}}' +// - sleep +// - '20' +// activeDeadlineSeconds: 2 # defined on template level, not workflow level ! +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeFailed, time.Minute*11). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) +// for _, node := range status.Nodes { +// if node.Type == wfv1.NodeTypePod { +// assert.Equal(t, wfv1.NodeFailed, node.Phase) +// assert.Contains(t, node.Message, "Pod was active on the node longer than the specified deadline") +// } +// } +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "fanout(0:1)") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// for _, c := range pod.Status.ContainerStatuses { +// if c.Name == common.WaitContainerName && c.State.Terminated == nil { +// assert.NotNil(t, c.State.Waiting) +// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") +// assert.Nil(t, c.State.Running) +// } +// } +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "fanout(1:2)") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// for _, c := range pod.Status.ContainerStatuses { +// if c.Name == common.WaitContainerName && c.State.Terminated == nil { +// assert.NotNil(t, c.State.Waiting) +// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") +// assert.Nil(t, c.State.Running) +// } +// } +// })). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "fanout(2:3)") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// for _, c := range pod.Status.ContainerStatuses { +// if c.Name == common.WaitContainerName && c.State.Terminated == nil { +// assert.NotNil(t, c.State.Waiting) +// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") +// assert.Nil(t, c.State.Running) +// } +// } +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "fanout(3:4)") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// for _, c := range pod.Status.ContainerStatuses { +// if c.Name == common.WaitContainerName && c.State.Terminated == nil { +// assert.NotNil(t, c.State.Waiting) +// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") +// assert.Nil(t, c.State.Running) +// } +// } +// }) +// } -func (s *WorkflowSuite) TestWorkflowInlinePodName() { - s.Given().Workflow(` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: steps-inline- - labels: - workflows.argoproj.io/test: "true" -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: a - inline: - container: - image: argoproj/argosay:v2 - command: - - cowsay - args: - - "foo" -`). - When(). - SubmitWorkflow(). - WaitForWorkflow(fixtures.ToBeCompleted, time.Minute*1). - Then(). - ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) - }). - ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { - return strings.Contains(status.Name, "a") - }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { - assert.NotContains(t, pod.Name, "--") - }) -} +// func (s *WorkflowSuite) TestWorkflowInlinePodName() { +// s.Given().Workflow(` +// apiVersion: argoproj.io/v1alpha1 +// kind: Workflow +// metadata: +// generateName: steps-inline- +// labels: +// workflows.argoproj.io/test: "true" +// spec: +// entrypoint: main +// templates: +// - name: main +// steps: +// - - name: a +// inline: +// container: +// image: argoproj/argosay:v2 +// command: +// - cowsay +// args: +// - "foo" +// `). +// When(). +// SubmitWorkflow(). +// WaitForWorkflow(fixtures.ToBeCompleted, time.Minute*1). +// Then(). +// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { +// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) +// }). +// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { +// return strings.Contains(status.Name, "a") +// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { +// assert.NotContains(t, pod.Name, "--") +// }) +// } -func TestWorkflowSuite(t *testing.T) { - suite.Run(t, new(WorkflowSuite)) -} +// func TestWorkflowSuite(t *testing.T) { +// suite.Run(t, new(WorkflowSuite)) +// } From ea105b840fbde56fb18bfba90ccf34fe5b754096 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 14:48:45 +0300 Subject: [PATCH 37/50] fix(tests): debuggin daemon E2E Signed-off-by: MenD32 --- test/e2e/retry_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 364b41b8be46..40d3bf22bb1a 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -19,9 +19,9 @@ import ( "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" ) -// type RetryTestSuite struct { -// fixtures.E2ESuite -// } +type RetryTestSuite struct { + fixtures.E2ESuite +} // func (s *RetryTestSuite) TestRetryLimit() { // s.Given(). From c60697bec12635a5ae2781114768fcd9d47cd230 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 15:10:49 +0300 Subject: [PATCH 38/50] fix(tests): debuggin daemon E2E Signed-off-by: MenD32 --- test/e2e/agent_test.go | 300 +-- test/e2e/cluster_workflow_template_test.go | 78 +- test/e2e/daemon_pod_test.go | 338 ++-- test/e2e/estimated_duration_test.go | 58 +- test/e2e/expr_lang.go | 118 +- test/e2e/failed_main_test.go | 56 +- test/e2e/hooks_test.go | 1686 ++++++++--------- test/e2e/http_artifacts_test.go | 106 +- test/e2e/malformed_resources_test.go | 160 +- test/e2e/pod_cleanup_test.go | 740 ++++---- test/e2e/progress_test.go | 110 +- test/e2e/retry_test.go | 420 ++-- test/e2e/semaphore_test.go | 180 +- .../workflow_configmap_substitution_test.go | 434 ++--- test/e2e/workflow_inputs_orverridable_test.go | 380 ++-- test/e2e/workflow_template_test.go | 336 ++-- test/e2e/workflow_test.go | 432 ++--- 17 files changed, 2966 insertions(+), 2966 deletions(-) diff --git a/test/e2e/agent_test.go b/test/e2e/agent_test.go index f3ce39efd3ae..2a071f4b4f4f 100644 --- a/test/e2e/agent_test.go +++ b/test/e2e/agent_test.go @@ -2,166 +2,166 @@ package e2e -// import ( -// "sort" -// "testing" -// "time" +import ( + "sort" + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/require" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type AgentSuite struct { -// fixtures.E2ESuite -// } +type AgentSuite struct { + fixtures.E2ESuite +} -// func (s *AgentSuite) TestParallel() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: http-template-par- -// workflowMetadata: -// labels: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: one -// template: http -// arguments: -// parameters: [{name: url, value: "https://argoproj.github.io"}] -// - name: two -// template: http -// arguments: -// parameters: [{name: url, value: "https://argoproj.github.io"}] -// - name: three -// template: http -// arguments: -// parameters: [{name: url, value: "https://argoproj.github.io"}] -// - name: four -// template: http -// arguments: -// parameters: [{name: url, value: "https://argoproj.github.io"}] -// - name: http -// inputs: -// parameters: -// - name: url -// http: -// url: "{{inputs.parameters.url}}" -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// // Ensure that the workflow ran for less than 10 seconds -// assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) +func (s *AgentSuite) TestParallel() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: http-template-par- + workflowMetadata: + labels: + workflows.argoproj.io/test: "true" +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: one + template: http + arguments: + parameters: [{name: url, value: "https://argoproj.github.io"}] + - name: two + template: http + arguments: + parameters: [{name: url, value: "https://argoproj.github.io"}] + - name: three + template: http + arguments: + parameters: [{name: url, value: "https://argoproj.github.io"}] + - name: four + template: http + arguments: + parameters: [{name: url, value: "https://argoproj.github.io"}] + - name: http + inputs: + parameters: + - name: url + http: + url: "{{inputs.parameters.url}}" +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + // Ensure that the workflow ran for less than 10 seconds + assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) -// var finishedTimes []time.Time -// var startTimes []time.Time -// for _, node := range status.Nodes { -// if node.Type != wfv1.NodeTypeHTTP { -// continue -// } -// startTimes = append(startTimes, node.StartedAt.Time) -// finishedTimes = append(finishedTimes, node.FinishedAt.Time) -// } + var finishedTimes []time.Time + var startTimes []time.Time + for _, node := range status.Nodes { + if node.Type != wfv1.NodeTypeHTTP { + continue + } + startTimes = append(startTimes, node.StartedAt.Time) + finishedTimes = append(finishedTimes, node.FinishedAt.Time) + } -// require.Len(t, finishedTimes, 4) -// sort.Slice(finishedTimes, func(i, j int) bool { -// return finishedTimes[i].Before(finishedTimes[j]) -// }) -// // Everything finished with a two second tolerance window -// assert.Less(t, finishedTimes[3].Sub(finishedTimes[0]), time.Duration(2)*time.Second) + require.Len(t, finishedTimes, 4) + sort.Slice(finishedTimes, func(i, j int) bool { + return finishedTimes[i].Before(finishedTimes[j]) + }) + // Everything finished with a two second tolerance window + assert.Less(t, finishedTimes[3].Sub(finishedTimes[0]), time.Duration(2)*time.Second) -// require.Len(t, startTimes, 4) -// sort.Slice(startTimes, func(i, j int) bool { -// return startTimes[i].Before(startTimes[j]) -// }) -// // Everything started with same time -// assert.Equal(t, time.Duration(0), startTimes[3].Sub(startTimes[0])) -// }) -// } + require.Len(t, startTimes, 4) + sort.Slice(startTimes, func(i, j int) bool { + return startTimes[i].Before(startTimes[j]) + }) + // Everything started with same time + assert.Equal(t, time.Duration(0), startTimes[3].Sub(startTimes[0])) + }) +} -// func (s *AgentSuite) TestStatusCondition() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: http-template-condition- -// workflowMetadata: -// labels: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: http-status-is-201-fails -// template: http-status-is-201 -// arguments: -// parameters: [{name: url, value: "http://httpbin:9100/status/200"}] -// - name: http-status-is-201-succeeds -// template: http-status-is-201 -// arguments: -// parameters: [{name: url, value: "http://httpbin:9100/status/201"}] -// - name: http-body-contains-google-fails -// template: http-body-contains-google -// arguments: -// parameters: [{name: url, value: "http://httpbin:9100/status/200"}] -// - name: http-body-contains-google-succeeds -// template: http-body-contains-google -// arguments: -// parameters: [{name: url, value: "https://google.com"}] -// - name: http-status-is-201 -// inputs: -// parameters: -// - name: url -// http: -// successCondition: "response.statusCode == 201" -// url: "{{inputs.parameters.url}}" -// - name: http-body-contains-google -// inputs: -// parameters: -// - name: url -// http: -// successCondition: "response.body contains \"google\"" -// url: "{{inputs.parameters.url}}" -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(2 * time.Minute). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) +func (s *AgentSuite) TestStatusCondition() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: http-template-condition- + workflowMetadata: + labels: + workflows.argoproj.io/test: "true" +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: http-status-is-201-fails + template: http-status-is-201 + arguments: + parameters: [{name: url, value: "http://httpbin:9100/status/200"}] + - name: http-status-is-201-succeeds + template: http-status-is-201 + arguments: + parameters: [{name: url, value: "http://httpbin:9100/status/201"}] + - name: http-body-contains-google-fails + template: http-body-contains-google + arguments: + parameters: [{name: url, value: "http://httpbin:9100/status/200"}] + - name: http-body-contains-google-succeeds + template: http-body-contains-google + arguments: + parameters: [{name: url, value: "https://google.com"}] + - name: http-status-is-201 + inputs: + parameters: + - name: url + http: + successCondition: "response.statusCode == 201" + url: "{{inputs.parameters.url}}" + - name: http-body-contains-google + inputs: + parameters: + - name: url + http: + successCondition: "response.body contains \"google\"" + url: "{{inputs.parameters.url}}" +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(2 * time.Minute). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) -// containsFails := status.Nodes.FindByDisplayName("http-body-contains-google-fails") -// require.NotNil(t, containsFails) -// assert.Equal(t, wfv1.NodeFailed, containsFails.Phase) + containsFails := status.Nodes.FindByDisplayName("http-body-contains-google-fails") + require.NotNil(t, containsFails) + assert.Equal(t, wfv1.NodeFailed, containsFails.Phase) -// containsSucceeds := status.Nodes.FindByDisplayName("http-body-contains-google-succeeds") -// require.NotNil(t, containsFails) -// assert.Equal(t, wfv1.NodeSucceeded, containsSucceeds.Phase) + containsSucceeds := status.Nodes.FindByDisplayName("http-body-contains-google-succeeds") + require.NotNil(t, containsFails) + assert.Equal(t, wfv1.NodeSucceeded, containsSucceeds.Phase) -// statusFails := status.Nodes.FindByDisplayName("http-status-is-201-fails") -// require.NotNil(t, statusFails) -// assert.Equal(t, wfv1.NodeFailed, statusFails.Phase) + statusFails := status.Nodes.FindByDisplayName("http-status-is-201-fails") + require.NotNil(t, statusFails) + assert.Equal(t, wfv1.NodeFailed, statusFails.Phase) -// statusSucceeds := status.Nodes.FindByDisplayName("http-status-is-201-succeeds") -// require.NotNil(t, statusFails) -// assert.Equal(t, wfv1.NodeSucceeded, statusSucceeds.Phase) -// }) -// } + statusSucceeds := status.Nodes.FindByDisplayName("http-status-is-201-succeeds") + require.NotNil(t, statusFails) + assert.Equal(t, wfv1.NodeSucceeded, statusSucceeds.Phase) + }) +} -// func TestAgentSuite(t *testing.T) { -// suite.Run(t, new(AgentSuite)) -// } +func TestAgentSuite(t *testing.T) { + suite.Run(t, new(AgentSuite)) +} diff --git a/test/e2e/cluster_workflow_template_test.go b/test/e2e/cluster_workflow_template_test.go index bd8a690dcd91..132700d71461 100644 --- a/test/e2e/cluster_workflow_template_test.go +++ b/test/e2e/cluster_workflow_template_test.go @@ -2,47 +2,47 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/suite" + "github.com/stretchr/testify/suite" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type ClusterWorkflowTemplateSuite struct { -// fixtures.E2ESuite -// } +type ClusterWorkflowTemplateSuite struct { + fixtures.E2ESuite +} -// func (s *ClusterWorkflowTemplateSuite) TestNestedClusterWorkflowTemplate() { -// s.Given(). -// ClusterWorkflowTemplate("@testdata/cluster-workflow-template-nested-template.yaml"). -// When().Given(). -// ClusterWorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). -// When().CreateClusterWorkflowTemplates(). -// Given(). -// Workflow(` -// metadata: -// generateName: cwft-wf- -// spec: -// entrypoint: whalesay -// templates: -// - name: whalesay -// steps: -// - - name: call-whalesay-template -// templateRef: -// name: cluster-workflow-template-nested-template -// template: whalesay-template -// clusterScope: true -// arguments: -// parameters: -// - name: message -// value: hello from nested -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *ClusterWorkflowTemplateSuite) TestNestedClusterWorkflowTemplate() { + s.Given(). + ClusterWorkflowTemplate("@testdata/cluster-workflow-template-nested-template.yaml"). + When().Given(). + ClusterWorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). + When().CreateClusterWorkflowTemplates(). + Given(). + Workflow(` +metadata: + generateName: cwft-wf- +spec: + entrypoint: whalesay + templates: + - name: whalesay + steps: + - - name: call-whalesay-template + templateRef: + name: cluster-workflow-template-nested-template + template: whalesay-template + clusterScope: true + arguments: + parameters: + - name: message + value: hello from nested +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func TestClusterWorkflowTemplateSuite(t *testing.T) { -// suite.Run(t, new(ClusterWorkflowTemplateSuite)) -// } +func TestClusterWorkflowTemplateSuite(t *testing.T) { + suite.Run(t, new(ClusterWorkflowTemplateSuite)) +} diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index dd2941715812..c92bcd29bd51 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -2,181 +2,181 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/require" -// "github.com/stretchr/testify/suite" -// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type DaemonPodSuite struct { -// fixtures.E2ESuite -// } +type DaemonPodSuite struct { + fixtures.E2ESuite +} -// func (s *DaemonPodSuite) TestWorkflowCompletesIfContainsDaemonPod() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: whalesay- -// spec: -// entrypoint: whalesay -// templates: -// - name: whalesay -// dag: -// tasks: -// - name: redis -// template: redis-tmpl -// - name: whale -// dependencies: [redis] -// template: whale-tmpl -// - name: redis-tmpl -// daemon: true -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "100s"] -// - name: whale-tmpl -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.False(t, status.FinishedAt.IsZero()) -// }) -// } +func (s *DaemonPodSuite) TestWorkflowCompletesIfContainsDaemonPod() { + s.Given(). + Workflow(` +metadata: + generateName: whalesay- +spec: + entrypoint: whalesay + templates: + - name: whalesay + dag: + tasks: + - name: redis + template: redis-tmpl + - name: whale + dependencies: [redis] + template: whale-tmpl + - name: redis-tmpl + daemon: true + container: + image: argoproj/argosay:v2 + args: ["sleep", "100s"] + - name: whale-tmpl + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.False(t, status.FinishedAt.IsZero()) + }) +} -// func (s *DaemonPodSuite) TestDaemonFromWorkflowTemplate() { -// s.Given(). -// WorkflowTemplate(` -// metadata: -// name: daemon -// spec: -// entrypoint: main -// templates: -// - name: main -// dag: -// tasks: -// - name: redis -// template: redis-tmpl -// - name: whale -// dependencies: [redis] -// template: whale-tmpl -// - name: redis-tmpl -// daemon: true -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "100s"] -// - name: whale-tmpl -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *DaemonPodSuite) TestDaemonFromWorkflowTemplate() { + s.Given(). + WorkflowTemplate(` +metadata: + name: daemon +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: redis + template: redis-tmpl + - name: whale + dependencies: [redis] + template: whale-tmpl + - name: redis-tmpl + daemon: true + container: + image: argoproj/argosay:v2 + args: ["sleep", "100s"] + - name: whale-tmpl + container: + image: argoproj/argosay:v2 +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *DaemonPodSuite) TestDaemonFromClusterWorkflowTemplate() { -// s.Given(). -// ClusterWorkflowTemplate(` -// metadata: -// name: daemon -// spec: -// entrypoint: main -// templates: -// - name: main -// dag: -// tasks: -// - name: redis -// template: redis-tmpl -// - name: whale -// dependencies: [redis] -// template: whale-tmpl -// - name: redis-tmpl -// daemon: true -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "100s"] -// - name: whale-tmpl -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// CreateClusterWorkflowTemplates(). -// SubmitWorkflowsFromClusterWorkflowTemplates(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *DaemonPodSuite) TestDaemonFromClusterWorkflowTemplate() { + s.Given(). + ClusterWorkflowTemplate(` +metadata: + name: daemon +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: redis + template: redis-tmpl + - name: whale + dependencies: [redis] + template: whale-tmpl + - name: redis-tmpl + daemon: true + container: + image: argoproj/argosay:v2 + args: ["sleep", "100s"] + - name: whale-tmpl + container: + image: argoproj/argosay:v2 +`). + When(). + CreateClusterWorkflowTemplates(). + SubmitWorkflowsFromClusterWorkflowTemplates(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *DaemonPodSuite) TestDaemonTemplateRef() { -// s.Given(). -// WorkflowTemplate(` -// metadata: -// name: broken-pipeline -// spec: -// entrypoint: main -// templates: -// - name: do-something -// container: -// image: argoproj/argosay:v2 -// - name: main -// dag: -// tasks: -// - name: do-something -// template: do-something -// - name: run-tests-broken -// depends: "do-something" -// templateRef: -// name: run-tests-broken -// template: main -// `). -// WorkflowTemplate(` -// metadata: -// name: run-tests-broken -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: postgres -// template: postgres -// - - name: run-tests-broken -// template: run-tests-broken -// - name: run-tests-broken -// container: -// image: argoproj/argosay:v2 -// - name: postgres -// daemon: true -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "100s"] -// name: database`). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *DaemonPodSuite) TestDaemonTemplateRef() { + s.Given(). + WorkflowTemplate(` +metadata: + name: broken-pipeline +spec: + entrypoint: main + templates: + - name: do-something + container: + image: argoproj/argosay:v2 + - name: main + dag: + tasks: + - name: do-something + template: do-something + - name: run-tests-broken + depends: "do-something" + templateRef: + name: run-tests-broken + template: main +`). + WorkflowTemplate(` +metadata: + name: run-tests-broken +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: postgres + template: postgres + - - name: run-tests-broken + template: run-tests-broken + - name: run-tests-broken + container: + image: argoproj/argosay:v2 + - name: postgres + daemon: true + container: + image: argoproj/argosay:v2 + args: ["sleep", "100s"] + name: database`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *DaemonPodSuite) TestMarkDaemonedPodSucceeded() { -// s.Given(). -// Workflow("@testdata/daemoned-pod-completed.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// node := status.Nodes.FindByDisplayName("daemoned") -// require.NotNil(t, node) -// assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) -// }) -// } +func (s *DaemonPodSuite) TestMarkDaemonedPodSucceeded() { + s.Given(). + Workflow("@testdata/daemoned-pod-completed.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + node := status.Nodes.FindByDisplayName("daemoned") + require.NotNil(t, node) + assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) + }) +} -// func TestDaemonPodSuite(t *testing.T) { -// suite.Run(t, new(DaemonPodSuite)) -// } +func TestDaemonPodSuite(t *testing.T) { + suite.Run(t, new(DaemonPodSuite)) +} diff --git a/test/e2e/estimated_duration_test.go b/test/e2e/estimated_duration_test.go index 7d87b42acb48..a510d83d68f3 100644 --- a/test/e2e/estimated_duration_test.go +++ b/test/e2e/estimated_duration_test.go @@ -2,37 +2,37 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type EstimatedDurationSuite struct { -// fixtures.E2ESuite -// } +type EstimatedDurationSuite struct { + fixtures.E2ESuite +} -// func (s *EstimatedDurationSuite) TestWorkflowTemplate() { -// s.Given(). -// WorkflowTemplate("@testdata/basic-workflowtemplate.yaml"). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.NotEmpty(t, status.EstimatedDuration) -// assert.NotEmpty(t, status.Nodes[metadata.Name].EstimatedDuration) -// }) -// } +func (s *EstimatedDurationSuite) TestWorkflowTemplate() { + s.Given(). + WorkflowTemplate("@testdata/basic-workflowtemplate.yaml"). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.NotEmpty(t, status.EstimatedDuration) + assert.NotEmpty(t, status.Nodes[metadata.Name].EstimatedDuration) + }) +} -// func TestEstimatedDurationSuite(t *testing.T) { -// suite.Run(t, new(EstimatedDurationSuite)) -// } +func TestEstimatedDurationSuite(t *testing.T) { + suite.Run(t, new(EstimatedDurationSuite)) +} diff --git a/test/e2e/expr_lang.go b/test/e2e/expr_lang.go index f9b5590045e9..d6fb31d60261 100644 --- a/test/e2e/expr_lang.go +++ b/test/e2e/expr_lang.go @@ -2,68 +2,68 @@ package e2e -// import ( -// "strings" -// "testing" +import ( + "strings" + "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// apiv1 "k8s.io/api/core/v1" -// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type ExprSuite struct { -// fixtures.E2ESuite -// } +type ExprSuite struct { + fixtures.E2ESuite +} -// func (s *ExprSuite) TestRegression12037() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: broken- -// spec: -// entrypoint: main -// templates: -// - name: main -// dag: -// tasks: -// - name: split -// template: foo -// - name: map -// template: foo -// depends: split +func (s *ExprSuite) TestRegression12037() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: broken- +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: split + template: foo + - name: map + template: foo + depends: split -// - name: foo -// container: -// image: alpine -// command: -// - sh -// - -c -// - | -// echo "foo" -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".split") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".map") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: foo + container: + image: alpine + command: + - sh + - -c + - | + echo "foo" +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".split") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".map") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func TestExprLangSSuite(t *testing.T) { -// suite.Run(t, new(ExprSuite)) -// } +func TestExprLangSSuite(t *testing.T) { + suite.Run(t, new(ExprSuite)) +} diff --git a/test/e2e/failed_main_test.go b/test/e2e/failed_main_test.go index 9021334d4247..820d4f42ba38 100644 --- a/test/e2e/failed_main_test.go +++ b/test/e2e/failed_main_test.go @@ -2,36 +2,36 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/suite" + "github.com/stretchr/testify/suite" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type FailedMainSuite struct { -// fixtures.E2ESuite -// } +type FailedMainSuite struct { + fixtures.E2ESuite +} -// func (s *FailedMainSuite) TestFailedMain() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: failed-main- -// spec: -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [ exit, "1" ] -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed) -// } +func (s *FailedMainSuite) TestFailedMain() { + s.Given(). + Workflow(` +metadata: + generateName: failed-main- +spec: + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed) +} -// func TestFailedMainSuite(t *testing.T) { -// suite.Run(t, new(FailedMainSuite)) -// } +func TestFailedMainSuite(t *testing.T) { + suite.Run(t, new(FailedMainSuite)) +} diff --git a/test/e2e/hooks_test.go b/test/e2e/hooks_test.go index aa0339469abf..d10e06cb5491 100644 --- a/test/e2e/hooks_test.go +++ b/test/e2e/hooks_test.go @@ -2,876 +2,876 @@ package e2e -// import ( -// "strings" -// "testing" +import ( + "strings" + "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// apiv1 "k8s.io/api/core/v1" -// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// "github.com/argoproj/argo-workflows/v3/workflow/common" -// ) + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" + "github.com/argoproj/argo-workflows/v3/workflow/common" +) -// type HooksSuite struct { -// fixtures.E2ESuite -// } +type HooksSuite struct { + fixtures.E2ESuite +} -// func (s *HooksSuite) TestWorkflowLevelHooksSuccessVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook- -// spec: -// entrypoint: main -// hooks: -// running: -// expression: workflow.status == "Running" -// template: argosay -// succeed: -// expression: workflow.status == "Succeeded" -// template: argosay +func (s *HooksSuite) TestWorkflowLevelHooksSuccessVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook- +spec: + entrypoint: main + hooks: + running: + expression: workflow.status == "Running" + template: argosay + succeed: + expression: workflow.status == "Succeeded" + template: argosay -// templates: -// - name: main -// steps: -// - - name: step1 -// template: argosay + templates: + - name: main + steps: + - - name: step1 + template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestWorkflowLevelHooksFailVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook- -// spec: -// entrypoint: main -// hooks: -// running: -// expression: workflow.status == "Running" -// template: hook -// failed: -// expression: workflow.status == "Failed" -// template: hook +func (s *HooksSuite) TestWorkflowLevelHooksFailVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook- +spec: + entrypoint: main + hooks: + running: + expression: workflow.status == "Running" + template: hook + failed: + expression: workflow.status == "Failed" + template: hook -// templates: -// - name: main -// steps: -// - - name: step1 -// template: argosay + templates: + - name: main + steps: + - - name: step1 + template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay; exit 1"] + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay; exit 1"] -// - name: hook -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".hooks.failed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: hook + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".hooks.failed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksStepSuccessVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook-tmpl-level- -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: step-1 -// hooks: -// running: -// expression: steps["step-1"].status == "Running" -// template: argosay -// succeed: -// expression: steps["step-1"].status == "Succeeded" -// template: argosay -// template: argosay -// - - name: step-2 -// hooks: -// running: -// expression: steps["step-2"].status == "Running" -// template: argosay -// succeed: -// expression: steps["step-2"].status == "Succeeded" -// template: argosay -// template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-2.hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// // TODO: Temporarily comment out this assertion since it's flaky: -// // The running hook is occasionally not triggered. Possibly because the step finishes too quickly -// // while the controller did not get a chance to trigger this hook. -// //.ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// // return strings.Contains(status.Name, "step-2.hooks.running") -// //}, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// // assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// //}) -// } +func (s *HooksSuite) TestTemplateLevelHooksStepSuccessVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook-tmpl-level- +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: step-1 + hooks: + running: + expression: steps["step-1"].status == "Running" + template: argosay + succeed: + expression: steps["step-1"].status == "Succeeded" + template: argosay + template: argosay + - - name: step-2 + hooks: + running: + expression: steps["step-2"].status == "Running" + template: argosay + succeed: + expression: steps["step-2"].status == "Succeeded" + template: argosay + template: argosay + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-2.hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) + // TODO: Temporarily comment out this assertion since it's flaky: + // The running hook is occasionally not triggered. Possibly because the step finishes too quickly + // while the controller did not get a chance to trigger this hook. + //.ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + // return strings.Contains(status.Name, "step-2.hooks.running") + //}, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + // assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + //}) +} -// func (s *HooksSuite) TestTemplateLevelHooksStepFailVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook-tmpl-level- -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: step-1 -// hooks: -// running: -// expression: steps["step-1"].status == "Running" -// template: hook -// failed: -// expression: steps["step-1"].status == "Failed" -// template: hook -// template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay; exit 1"] -// - name: hook -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.failed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } +func (s *HooksSuite) TestTemplateLevelHooksStepFailVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook-tmpl-level- +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: step-1 + hooks: + running: + expression: steps["step-1"].status == "Running" + template: hook + failed: + expression: steps["step-1"].status == "Failed" + template: hook + template: argosay + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay; exit 1"] + - name: hook + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.failed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksDagSuccessVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook-tmpl-level- -// spec: -// entrypoint: main -// templates: -// - name: main -// dag: -// tasks: -// - name: step-1 -// hooks: -// running: -// expression: tasks["step-1"].status == "Running" -// template: argosay -// succeed: -// expression: tasks["step-1"].status == "Succeeded" -// template: argosay -// template: argosay -// - name: step-2 -// hooks: -// running: -// expression: tasks["step-2"].status == "Running" -// template: argosay -// succeed: -// expression: tasks["step-2"].status == "Succeeded" -// template: argosay -// template: argosay -// dependencies: [step-1] -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-2.hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-2.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// // TODO: Temporarily comment out this assertion since it's flaky: -// // The running hook is occasionally not triggered. Possibly because the step finishes too quickly -// // while the controller did not get a chance to trigger this hook. -// //assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } +func (s *HooksSuite) TestTemplateLevelHooksDagSuccessVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook-tmpl-level- +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: step-1 + hooks: + running: + expression: tasks["step-1"].status == "Running" + template: argosay + succeed: + expression: tasks["step-1"].status == "Succeeded" + template: argosay + template: argosay + - name: step-2 + hooks: + running: + expression: tasks["step-2"].status == "Running" + template: argosay + succeed: + expression: tasks["step-2"].status == "Succeeded" + template: argosay + template: argosay + dependencies: [step-1] + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-2.hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-2.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + // TODO: Temporarily comment out this assertion since it's flaky: + // The running hook is occasionally not triggered. Possibly because the step finishes too quickly + // while the controller did not get a chance to trigger this hook. + //assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksDagFailVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook-tmpl-level- -// spec: -// entrypoint: main -// templates: -// - name: main -// dag: -// tasks: -// - name: step-1 -// hooks: -// running: -// expression: tasks["step-1"].status == "Running" -// template: hook -// failed: -// expression: tasks["step-1"].status == "Failed" -// template: hook -// template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay; exit 1"] -// - name: hook -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.failed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "step-1.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } +func (s *HooksSuite) TestTemplateLevelHooksDagFailVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook-tmpl-level- +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: step-1 + hooks: + running: + expression: tasks["step-1"].status == "Running" + template: hook + failed: + expression: tasks["step-1"].status == "Failed" + template: hook + template: argosay + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay; exit 1"] + - name: hook + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.failed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }).ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "step-1.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksDagHasDependencyVersion() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook-tmpl-level- -// spec: -// templates: -// - name: main -// dag: -// tasks: -// - name: A -// template: fail -// hooks: -// running: -// template: hook -// expression: tasks.A.status == "Running" -// success: -// template: hook -// expression: tasks.A.status == "Succeeded" -// - name: B -// template: success -// dependencies: -// - A -// hooks: -// running: -// template: hook -// expression: tasks.B.status == "Running" -// success: -// template: hook -// expression: tasks.B.status == "Succeeded" -// - name: success -// container: -// name: '' -// image: argoproj/argosay:v2 -// command: -// - /bin/sh -// - '-c' -// args: -// - /bin/sleep 1; /argosay; exit 0 -// - name: fail -// container: -// name: '' -// image: argoproj/argosay:v2 -// command: -// - /bin/sh -// - '-c' -// args: -// - /bin/sleep 1; /argosay; exit 1 -// - name: hook -// container: -// name: '' -// image: argoproj/argosay:v2 -// command: -// - /bin/sh -// - '-c' -// args: -// - /bin/sleep 1; /argosay -// entrypoint: main -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// // Make sure unnecessary hooks are not triggered -// assert.Equal(t, status.Progress, v1alpha1.Progress("1/2")) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "A.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "B") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeOmitted, status.Phase) -// }) -// } +func (s *HooksSuite) TestTemplateLevelHooksDagHasDependencyVersion() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook-tmpl-level- +spec: + templates: + - name: main + dag: + tasks: + - name: A + template: fail + hooks: + running: + template: hook + expression: tasks.A.status == "Running" + success: + template: hook + expression: tasks.A.status == "Succeeded" + - name: B + template: success + dependencies: + - A + hooks: + running: + template: hook + expression: tasks.B.status == "Running" + success: + template: hook + expression: tasks.B.status == "Succeeded" + - name: success + container: + name: '' + image: argoproj/argosay:v2 + command: + - /bin/sh + - '-c' + args: + - /bin/sleep 1; /argosay; exit 0 + - name: fail + container: + name: '' + image: argoproj/argosay:v2 + command: + - /bin/sh + - '-c' + args: + - /bin/sleep 1; /argosay; exit 1 + - name: hook + container: + name: '' + image: argoproj/argosay:v2 + command: + - /bin/sh + - '-c' + args: + - /bin/sleep 1; /argosay + entrypoint: main +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + // Make sure unnecessary hooks are not triggered + assert.Equal(t, status.Progress, v1alpha1.Progress("1/2")) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "A.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "B") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeOmitted, status.Phase) + }) +} -// func (s *HooksSuite) TestWorkflowLevelHooksWaitForTriggeredHook() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: lifecycle-hook- -// spec: -// entrypoint: main -// hooks: -// running: -// expression: workflow.status == "Running" -// template: argosay-sleep-2seconds -// # This hook never triggered by following test. -// # To guarantee workflow does not wait forever for untriggered hooks. -// failed: -// expression: workflow.status == "Failed" -// template: argosay-sleep-2seconds -// templates: -// - name: main -// steps: -// - - name: step1 -// template: argosay +func (s *HooksSuite) TestWorkflowLevelHooksWaitForTriggeredHook() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: lifecycle-hook- +spec: + entrypoint: main + hooks: + running: + expression: workflow.status == "Running" + template: argosay-sleep-2seconds + # This hook never triggered by following test. + # To guarantee workflow does not wait forever for untriggered hooks. + failed: + expression: workflow.status == "Failed" + template: argosay-sleep-2seconds + templates: + - name: main + steps: + - - name: step1 + template: argosay -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 1; /argosay"] -// - name: argosay-sleep-2seconds -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 2; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) -// assert.Equal(t, 1, int(status.Progress.N()/status.Progress.M())) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, ".hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 1; /argosay"] + - name: argosay-sleep-2seconds + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 2; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) + assert.Equal(t, 1, int(status.Progress.N()/status.Progress.M())) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, ".hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHook() { -// s.Given(). -// Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: example-steps -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: job -// template: argosay -// hooks: -// running: -// expression: steps['job'].status == "Running" -// template: argosay-sleep-2seconds -// failed: -// expression: steps['job'].status == "Failed" -// template: argosay-sleep-2seconds +func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHook() { + s.Given(). + Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: example-steps +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: job + template: argosay + hooks: + running: + expression: steps['job'].status == "Running" + template: argosay-sleep-2seconds + failed: + expression: steps['job'].status == "Failed" + template: argosay-sleep-2seconds -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 5; /argosay"] -// - name: argosay-sleep-2seconds -// container: -// image: argoproj/argosay:v2 -// command: ["/bin/sh", "-c"] -// args: ["/bin/sleep 2; /argosay"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "job.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: argosay + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 5; /argosay"] + - name: argosay-sleep-2seconds + container: + image: argoproj/argosay:v2 + command: ["/bin/sh", "-c"] + args: ["/bin/sleep 2; /argosay"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + assert.Equal(t, status.Progress, v1alpha1.Progress("2/2")) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "job.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// // Ref: https://github.com/argoproj/argo-workflows/issues/11117 -// func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHookAndRespectSynchronization() { -// s.Given(). -// Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: example-steps-simple-mutex -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: job -// template: exit0 -// hooks: -// running: -// expression: steps['job'].status == "Running" -// template: sleep -// succeed: -// expression: steps['job'].status == "Succeeded" -// template: sleep -// - name: sleep -// synchronization: -// mutexes: -// - name: job -// script: -// image: alpine:latest -// command: [/bin/sh] -// source: | -// sleep 4 -// - name: exit0 -// script: -// image: alpine:latest -// command: [/bin/sh] -// source: | -// sleep 2 -// exit 0 -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// assert.Equal(t, status.Progress, v1alpha1.Progress("3/3")) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "job.hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "job.hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } +// Ref: https://github.com/argoproj/argo-workflows/issues/11117 +func (s *HooksSuite) TestTemplateLevelHooksWaitForTriggeredHookAndRespectSynchronization() { + s.Given(). + Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: example-steps-simple-mutex +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: job + template: exit0 + hooks: + running: + expression: steps['job'].status == "Running" + template: sleep + succeed: + expression: steps['job'].status == "Succeeded" + template: sleep + - name: sleep + synchronization: + mutexes: + - name: job + script: + image: alpine:latest + command: [/bin/sh] + source: | + sleep 4 + - name: exit0 + script: + image: alpine:latest + command: [/bin/sh] + source: | + sleep 2 + exit 0 +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + assert.Equal(t, status.Progress, v1alpha1.Progress("3/3")) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "job.hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "job.hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestWorkflowLevelHooksWithRetry() { -// s.Given(). -// Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// name: test-workflow-level-hooks-with-retry -// spec: -// templates: -// - name: argosay -// container: -// image: argoproj/argosay:v2 -// command: -// - /bin/sh -// - '-c' -// args: -// - /bin/sleep 1; exit 1 -// retryStrategy: -// limit: 1 -// - name: hook -// container: -// image: argoproj/argosay:v2 -// command: -// - /bin/sh -// - '-c' -// args: -// - /argosay -// entrypoint: argosay -// hooks: -// failed: -// template: hook -// expression: workflow.status == "Failed" -// running: -// template: hook -// expression: workflow.status == "Running" -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// assert.Equal(t, status.Progress, v1alpha1.Progress("2/4")) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-workflow-level-hooks-with-retry.hooks.running" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// assert.True(t, status.NodeFlag.Hooked) -// assert.False(t, status.NodeFlag.Retried) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-workflow-level-hooks-with-retry.hooks.failed" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// assert.True(t, status.NodeFlag.Hooked) -// assert.False(t, status.NodeFlag.Retried) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-workflow-level-hooks-with-retry" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) -// assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) -// assert.Nil(t, status.NodeFlag) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-workflow-level-hooks-with-retry(0)" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) -// assert.False(t, status.NodeFlag.Hooked) -// assert.True(t, status.NodeFlag.Retried) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-workflow-level-hooks-with-retry(1)" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) -// assert.False(t, status.NodeFlag.Hooked) -// assert.True(t, status.NodeFlag.Retried) -// }) -// } +func (s *HooksSuite) TestWorkflowLevelHooksWithRetry() { + s.Given(). + Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: test-workflow-level-hooks-with-retry +spec: + templates: + - name: argosay + container: + image: argoproj/argosay:v2 + command: + - /bin/sh + - '-c' + args: + - /bin/sleep 1; exit 1 + retryStrategy: + limit: 1 + - name: hook + container: + image: argoproj/argosay:v2 + command: + - /bin/sh + - '-c' + args: + - /argosay + entrypoint: argosay + hooks: + failed: + template: hook + expression: workflow.status == "Failed" + running: + template: hook + expression: workflow.status == "Running" +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + assert.Equal(t, status.Progress, v1alpha1.Progress("2/4")) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-workflow-level-hooks-with-retry.hooks.running" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + assert.True(t, status.NodeFlag.Hooked) + assert.False(t, status.NodeFlag.Retried) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-workflow-level-hooks-with-retry.hooks.failed" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + assert.True(t, status.NodeFlag.Hooked) + assert.False(t, status.NodeFlag.Retried) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-workflow-level-hooks-with-retry" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeFailed, status.Phase) + assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) + assert.Nil(t, status.NodeFlag) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-workflow-level-hooks-with-retry(0)" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeFailed, status.Phase) + assert.False(t, status.NodeFlag.Hooked) + assert.True(t, status.NodeFlag.Retried) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-workflow-level-hooks-with-retry(1)" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeFailed, status.Phase) + assert.False(t, status.NodeFlag.Hooked) + assert.True(t, status.NodeFlag.Retried) + }) +} -// func (s *HooksSuite) TestTemplateLevelHooksWithRetry() { -// var children []string -// (s.Given(). -// Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// name: retries-with-hooks-and-artifact -// labels: -// workflows.argoproj.io/test: "true" -// annotations: -// workflows.argoproj.io/description: | -// when retries and hooks are both included, the workflow cannot resolve the artifact -// workflows.argoproj.io/version: '>= 3.5.0' -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: build -// template: output-artifact -// hooks: -// started: -// expression: steps["build"].status == "Running" -// template: started -// success: -// expression: steps["build"].status == "Succeeded" -// template: success -// failed: -// expression: steps["build"].status == "Failed" || steps["build"].status == "Error" -// template: failed -// - - name: print -// template: print-artifact -// arguments: -// artifacts: -// - name: message -// from: "{{steps.build.outputs.artifacts.result}}" +func (s *HooksSuite) TestTemplateLevelHooksWithRetry() { + var children []string + (s.Given(). + Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: retries-with-hooks-and-artifact + labels: + workflows.argoproj.io/test: "true" + annotations: + workflows.argoproj.io/description: | + when retries and hooks are both included, the workflow cannot resolve the artifact + workflows.argoproj.io/version: '>= 3.5.0' +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: build + template: output-artifact + hooks: + started: + expression: steps["build"].status == "Running" + template: started + success: + expression: steps["build"].status == "Succeeded" + template: success + failed: + expression: steps["build"].status == "Failed" || steps["build"].status == "Error" + template: failed + - - name: print + template: print-artifact + arguments: + artifacts: + - name: message + from: "{{steps.build.outputs.artifacts.result}}" -// - name: output-artifact -// script: -// image: python:alpine3.6 -// command: [ python ] -// source: | -// import time -// import random -// import sys -// time.sleep(1) # lifecycle hook for running won't trigger unless it runs for more than "a few seconds" -// with open("result.txt", "w") as f: -// f.write("Welcome") -// if {{retries}} == 2: -// sys.exit(0) -// sys.exit(1) -// retryStrategy: -// limit: 2 -// outputs: -// artifacts: -// - name: result -// path: /result.txt + - name: output-artifact + script: + image: python:alpine3.6 + command: [ python ] + source: | + import time + import random + import sys + time.sleep(1) # lifecycle hook for running won't trigger unless it runs for more than "a few seconds" + with open("result.txt", "w") as f: + f.write("Welcome") + if {{retries}} == 2: + sys.exit(0) + sys.exit(1) + retryStrategy: + limit: 2 + outputs: + artifacts: + - name: result + path: /result.txt -// - name: started -// container: -// image: python:alpine3.6 -// command: [sh, -c] -// args: ["echo STARTED!"] + - name: started + container: + image: python:alpine3.6 + command: [sh, -c] + args: ["echo STARTED!"] -// - name: success -// container: -// image: python:alpine3.6 -// command: [sh, -c] -// args: ["echo SUCCEEDED!"] + - name: success + container: + image: python:alpine3.6 + command: [sh, -c] + args: ["echo SUCCEEDED!"] -// - name: failed -// container: -// image: python:alpine3.6 -// command: [sh, -c] -// args: ["echo FAILED or ERROR!"] + - name: failed + container: + image: python:alpine3.6 + command: [sh, -c] + args: ["echo FAILED or ERROR!"] -// - name: print-artifact -// inputs: -// artifacts: -// - name: message -// path: /tmp/message -// container: -// image: python:alpine3.6 -// command: [sh, -c] -// args: ["cat /tmp/message"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.True(t, status.Fulfilled()) -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// for _, node := range status.Nodes { -// if node.Type == v1alpha1.NodeTypeRetry { -// assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) -// children = node.Children -// } -// } -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "retries-with-hooks-and-artifact[0].build(0)" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Contains(t, children, status.ID) -// assert.False(t, status.NodeFlag.Hooked) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.started" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Contains(t, children, status.ID) -// assert.True(t, status.NodeFlag.Hooked) -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// })). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.success" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Contains(t, children, status.ID) -// assert.True(t, status.NodeFlag.Hooked) -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "retries-with-hooks-and-artifact[1].print" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } + - name: print-artifact + inputs: + artifacts: + - name: message + path: /tmp/message + container: + image: python:alpine3.6 + command: [sh, -c] + args: ["cat /tmp/message"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.True(t, status.Fulfilled()) + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + for _, node := range status.Nodes { + if node.Type == v1alpha1.NodeTypeRetry { + assert.Equal(t, v1alpha1.NodeSucceeded, node.Phase) + children = node.Children + } + } + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "retries-with-hooks-and-artifact[0].build(0)" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Contains(t, children, status.ID) + assert.False(t, status.NodeFlag.Hooked) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.started" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Contains(t, children, status.ID) + assert.True(t, status.NodeFlag.Hooked) + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + })). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "retries-with-hooks-and-artifact[0].build.hooks.success" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Contains(t, children, status.ID) + assert.True(t, status.NodeFlag.Hooked) + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "retries-with-hooks-and-artifact[1].print" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *HooksSuite) TestExitHandlerWithWorkflowLevelDeadline() { -// var onExitNodeName string -// (s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// name: exit-handler-with-workflow-level-deadline -// spec: -// entrypoint: main -// activeDeadlineSeconds: 1 -// hooks: -// exit: -// template: exit-handler -// templates: -// - name: main -// steps: -// - - name: sleep -// template: sleep -// - name: exit-handler -// steps: -// - - name: sleep -// template: sleep -// - name: sleep -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "5"] -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted). -// WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { -// onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) -// onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) -// return onExitNode.Completed(), "exit handler completed" -// })). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.DisplayName == onExitNodeName -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.True(t, status.NodeFlag.Hooked) -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// })) -// } +func (s *HooksSuite) TestExitHandlerWithWorkflowLevelDeadline() { + var onExitNodeName string + (s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: exit-handler-with-workflow-level-deadline +spec: + entrypoint: main + activeDeadlineSeconds: 1 + hooks: + exit: + template: exit-handler + templates: + - name: main + steps: + - - name: sleep + template: sleep + - name: exit-handler + steps: + - - name: sleep + template: sleep + - name: sleep + container: + image: argoproj/argosay:v2 + args: ["sleep", "5"] +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). + WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { + onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) + onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) + return onExitNode.Completed(), "exit handler completed" + })). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.DisplayName == onExitNodeName + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.True(t, status.NodeFlag.Hooked) + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + })) +} -// func (s *HooksSuite) TestHttpExitHandlerWithWorkflowLevelDeadline() { -// var onExitNodeName string -// (s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// name: http-exit-handler-with-workflow-level-deadline -// spec: -// entrypoint: main -// activeDeadlineSeconds: 1 -// hooks: -// exit: -// template: exit-handler -// templates: -// - name: main -// steps: -// - - name: sleep -// template: sleep -// - name: sleep -// container: -// image: argoproj/argosay:v2 -// args: ["sleep", "5"] -// - name: exit-handler -// steps: -// - - name: http -// template: http -// - name: http -// http: -// url: http://httpbin:9100/get -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted). -// WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { -// onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) -// onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) -// return onExitNode.Completed(), "exit handler completed" -// })). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.DisplayName == onExitNodeName -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.True(t, status.NodeFlag.Hooked) -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// })) -// } +func (s *HooksSuite) TestHttpExitHandlerWithWorkflowLevelDeadline() { + var onExitNodeName string + (s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: http-exit-handler-with-workflow-level-deadline +spec: + entrypoint: main + activeDeadlineSeconds: 1 + hooks: + exit: + template: exit-handler + templates: + - name: main + steps: + - - name: sleep + template: sleep + - name: sleep + container: + image: argoproj/argosay:v2 + args: ["sleep", "5"] + - name: exit-handler + steps: + - - name: http + template: http + - name: http + http: + url: http://httpbin:9100/get +`).When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). + WaitForWorkflow(fixtures.Condition(func(wf *v1alpha1.Workflow) (bool, string) { + onExitNodeName = common.GenerateOnExitNodeName(wf.ObjectMeta.Name) + onExitNode := wf.Status.Nodes.FindByDisplayName(onExitNodeName) + return onExitNode.Completed(), "exit handler completed" + })). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowFailed, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.DisplayName == onExitNodeName + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.True(t, status.NodeFlag.Hooked) + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + })) +} -// func TestHooksSuite(t *testing.T) { -// suite.Run(t, new(HooksSuite)) -// } +func TestHooksSuite(t *testing.T) { + suite.Run(t, new(HooksSuite)) +} diff --git a/test/e2e/http_artifacts_test.go b/test/e2e/http_artifacts_test.go index 4f86bbe0fcdb..e391560fc074 100644 --- a/test/e2e/http_artifacts_test.go +++ b/test/e2e/http_artifacts_test.go @@ -2,66 +2,66 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/suite" + "github.com/stretchr/testify/suite" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type HttpArtifactsSuite struct { -// fixtures.E2ESuite -// } +type HttpArtifactsSuite struct { + fixtures.E2ESuite +} -// func (s *HttpArtifactsSuite) TestInputArtifactHttp() { -// s.Given(). -// Workflow("@testdata/http/input-artifact-http.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestInputArtifactHttp() { + s.Given(). + Workflow("@testdata/http/input-artifact-http.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *HttpArtifactsSuite) TestOutputArtifactHttp() { -// s.Given(). -// Workflow("@testdata/http/output-artifact-http.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestOutputArtifactHttp() { + s.Given(). + Workflow("@testdata/http/output-artifact-http.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *HttpArtifactsSuite) TestBasicAuthArtifactHttp() { -// s.Given(). -// Workflow("@testdata/http/basic-auth-artifact-http.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestBasicAuthArtifactHttp() { + s.Given(). + Workflow("@testdata/http/basic-auth-artifact-http.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *HttpArtifactsSuite) TestOAuthArtifactHttp() { -// s.Given(). -// Workflow("@testdata/http/oauth-artifact-http.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestOAuthArtifactHttp() { + s.Given(). + Workflow("@testdata/http/oauth-artifact-http.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *HttpArtifactsSuite) TestClientCertAuthArtifactHttp() { -// s.Given(). -// Workflow("@testdata/http/clientcert-auth-artifact-http.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestClientCertAuthArtifactHttp() { + s.Given(). + Workflow("@testdata/http/clientcert-auth-artifact-http.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func (s *HttpArtifactsSuite) TestArtifactoryArtifacts() { -// s.Given(). -// Workflow("@testdata/http/artifactory-artifact.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded) -// } +func (s *HttpArtifactsSuite) TestArtifactoryArtifacts() { + s.Given(). + Workflow("@testdata/http/artifactory-artifact.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded) +} -// func TestHttpArtifactsSuite(t *testing.T) { -// suite.Run(t, new(HttpArtifactsSuite)) -// } +func TestHttpArtifactsSuite(t *testing.T) { + suite.Run(t, new(HttpArtifactsSuite)) +} diff --git a/test/e2e/malformed_resources_test.go b/test/e2e/malformed_resources_test.go index 8b5c5af2fd5f..97f8f26ace73 100644 --- a/test/e2e/malformed_resources_test.go +++ b/test/e2e/malformed_resources_test.go @@ -2,92 +2,92 @@ package e2e -// import ( -// "testing" -// "time" +import ( + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type MalformedResourcesSuite struct { -// fixtures.E2ESuite -// } +type MalformedResourcesSuite struct { + fixtures.E2ESuite +} -// func (s *MalformedResourcesSuite) TestMalformedWorkflow() { -// s.Given(). -// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflow.yaml"}, fixtures.NoError). -// WorkflowName("malformed"). -// When(). -// // it is not possible to wait for this to finish, because it is malformed -// Wait(3 * time.Second). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "malformed", metadata.Name) -// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) -// }) -// } +func (s *MalformedResourcesSuite) TestMalformedWorkflow() { + s.Given(). + Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflow.yaml"}, fixtures.NoError). + WorkflowName("malformed"). + When(). + // it is not possible to wait for this to finish, because it is malformed + Wait(3 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "malformed", metadata.Name) + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + }) +} -// func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplate() { -// s.Given(). -// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-workflow-template-ref.yaml"}, fixtures.NoError). -// When(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "wellformed", metadata.Name) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplate() { + s.Given(). + Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-workflow-template-ref.yaml"}, fixtures.NoError). + When(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "wellformed", metadata.Name) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplateRef() { -// s.Given(). -// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-workflow-template-ref.yaml"}, fixtures.NoError). -// When(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "wellformed", metadata.Name) -// assert.Equal(t, wfv1.WorkflowError, status.Phase) -// assert.Contains(t, status.Message, "malformed workflow template") -// }) -// } +func (s *MalformedResourcesSuite) TestMalformedWorkflowTemplateRef() { + s.Given(). + Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-workflow-template-ref.yaml"}, fixtures.NoError). + When(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "wellformed", metadata.Name) + assert.Equal(t, wfv1.WorkflowError, status.Phase) + assert.Contains(t, status.Message, "malformed workflow template") + }) +} -// func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplate() { -// s.Given(). -// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-cluster-workflow-template-ref.yaml"}, fixtures.NoError). -// When(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "wellformed", metadata.Name) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplate() { + s.Given(). + Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-cluster-workflow-template-ref.yaml"}, fixtures.NoError). + When(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "wellformed", metadata.Name) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplateRef() { -// s.Given(). -// Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). -// Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-cluster-workflow-template-ref.yaml"}, fixtures.NoError). -// When(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "wellformed", metadata.Name) -// assert.Equal(t, wfv1.WorkflowError, status.Phase) -// assert.Contains(t, status.Message, "malformed cluster workflow template") -// }) -// } +func (s *MalformedResourcesSuite) TestMalformedClusterWorkflowTemplateRef() { + s.Given(). + Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-clusterworkflowtemplate.yaml"}, fixtures.NoError). + Exec("kubectl", []string{"apply", "-f", "testdata/wellformed/wellformed-workflow-with-malformed-cluster-workflow-template-ref.yaml"}, fixtures.NoError). + When(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "wellformed", metadata.Name) + assert.Equal(t, wfv1.WorkflowError, status.Phase) + assert.Contains(t, status.Message, "malformed cluster workflow template") + }) +} -// func TestMalformedResourcesSuite(t *testing.T) { -// suite.Run(t, new(MalformedResourcesSuite)) -// } +func TestMalformedResourcesSuite(t *testing.T) { + suite.Run(t, new(MalformedResourcesSuite)) +} diff --git a/test/e2e/pod_cleanup_test.go b/test/e2e/pod_cleanup_test.go index 5a05f3f5a219..bf5f91cbac65 100644 --- a/test/e2e/pod_cleanup_test.go +++ b/test/e2e/pod_cleanup_test.go @@ -2,390 +2,390 @@ package e2e -// import ( -// "testing" +import ( + "testing" -// "github.com/stretchr/testify/suite" + "github.com/stretchr/testify/suite" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type PodCleanupSuite struct { -// fixtures.E2ESuite -// } +type PodCleanupSuite struct { + fixtures.E2ESuite +} -// func (s *PodCleanupSuite) TestNone() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup- -// spec: -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// } +func (s *PodCleanupSuite) TestNone() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup- +spec: + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) +} -// func (s *PodCleanupSuite) TestOnPodCompletion() { -// s.Run("FailedPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-completion- -// spec: -// podGC: -// strategy: OnPodCompletion -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// }) -// s.Run("SucceededPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-completion- -// spec: -// podGC: -// strategy: OnPodCompletion -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// }) -// } +func (s *PodCleanupSuite) TestOnPodCompletion() { + s.Run("FailedPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-completion- +spec: + podGC: + strategy: OnPodCompletion + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) + }) + s.Run("SucceededPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-completion- +spec: + podGC: + strategy: OnPodCompletion + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) + }) +} -// func (s *PodCleanupSuite) TestOnPodCompletionLabelSelected() { -// s.Run("FailedPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-completion-label-selected- -// spec: -// podGC: -// strategy: OnPodCompletion -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// metadata: -// labels: -// evicted: true -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// }) -// s.Run("SucceededPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-completion-label-selected- -// spec: -// podGC: -// strategy: OnPodCompletion -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// }) -// } +func (s *PodCleanupSuite) TestOnPodCompletionLabelSelected() { + s.Run("FailedPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-completion-label-selected- +spec: + podGC: + strategy: OnPodCompletion + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] + metadata: + labels: + evicted: true +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) + }) + s.Run("SucceededPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-completion-label-selected- +spec: + podGC: + strategy: OnPodCompletion + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) + }) +} -// func (s *PodCleanupSuite) TestOnPodSuccess() { -// s.Run("FailedPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-success- -// spec: -// podGC: -// strategy: OnPodSuccess -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// }) -// s.Run("SucceededPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-success- -// spec: -// podGC: -// strategy: OnPodSuccess -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// }) -// } +func (s *PodCleanupSuite) TestOnPodSuccess() { + s.Run("FailedPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-success- +spec: + podGC: + strategy: OnPodSuccess + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) + }) + s.Run("SucceededPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-success- +spec: + podGC: + strategy: OnPodSuccess + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) + }) +} -// func (s *PodCleanupSuite) TestOnPodSuccessLabelNotMatch() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-success-label-not-match- -// spec: -// podGC: -// strategy: OnPodSuccess -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// } +func (s *PodCleanupSuite) TestOnPodSuccessLabelNotMatch() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-success-label-not-match- +spec: + podGC: + strategy: OnPodSuccess + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) +} -// func (s *PodCleanupSuite) TestOnPodSuccessLabelMatch() { -// s.Run("FailedPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-success-label-match- -// spec: -// podGC: -// strategy: OnPodSuccess -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// }) -// s.Run("SucceededPod", func() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-pod-success-label-match- -// spec: -// podGC: -// strategy: OnPodSuccess -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// metadata: -// labels: -// evicted: true -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// }) -// } +func (s *PodCleanupSuite) TestOnPodSuccessLabelMatch() { + s.Run("FailedPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-success-label-match- +spec: + podGC: + strategy: OnPodSuccess + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) + }) + s.Run("SucceededPod", func() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-pod-success-label-match- +spec: + podGC: + strategy: OnPodSuccess + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + metadata: + labels: + evicted: true +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) + }) +} -// func (s *PodCleanupSuite) TestOnWorkflowCompletion() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-completion- -// spec: -// podGC: -// strategy: OnWorkflowCompletion -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowCompletion() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-completion- +spec: + podGC: + strategy: OnWorkflowCompletion + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelNotMatch() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-completion-label-not-match- -// spec: -// podGC: -// strategy: OnWorkflowCompletion -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelNotMatch() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-completion-label-not-match- +spec: + podGC: + strategy: OnWorkflowCompletion + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelMatch() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-completion-label-match- -// spec: -// podGC: -// strategy: OnWorkflowCompletion -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// args: [exit, 1] -// metadata: -// labels: -// evicted: true -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowCompletionLabelMatch() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-completion-label-match- +spec: + podGC: + strategy: OnWorkflowCompletion + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + args: [exit, 1] + metadata: + labels: + evicted: true +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowSuccess() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-success- -// spec: -// podGC: -// strategy: OnWorkflowSuccess -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowSuccess() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-success- +spec: + podGC: + strategy: OnWorkflowSuccess + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelNotMatch() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-success-label-not-match- -// spec: -// podGC: -// strategy: OnWorkflowSuccess -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodCompleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelNotMatch() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-success-label-not-match- +spec: + podGC: + strategy: OnWorkflowSuccess + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodCompleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelMatch() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-pod-cleanup-on-workflow-success-label-match- -// spec: -// podGC: -// strategy: OnWorkflowSuccess -// labelSelector: -// matchLabels: -// evicted: true -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// metadata: -// labels: -// evicted: true -// `). -// When(). -// SubmitWorkflow(). -// WaitForPod(fixtures.PodDeleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowSuccessLabelMatch() { + s.Given(). + Workflow(` +metadata: + generateName: test-pod-cleanup-on-workflow-success-label-match- +spec: + podGC: + strategy: OnWorkflowSuccess + labelSelector: + matchLabels: + evicted: true + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 + metadata: + labels: + evicted: true +`). + When(). + SubmitWorkflow(). + WaitForPod(fixtures.PodDeleted) +} -// func (s *PodCleanupSuite) TestOnWorkflowTemplate() { -// s.Given(). -// WorkflowTemplate(` -// metadata: -// name: test-pod-cleanup -// spec: -// podGC: -// strategy: OnWorkflowCompletion -// entrypoint: main -// templates: -// - name: main -// container: -// image: argoproj/argosay:v2 -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForPod(fixtures.PodDeleted) -// } +func (s *PodCleanupSuite) TestOnWorkflowTemplate() { + s.Given(). + WorkflowTemplate(` +metadata: + name: test-pod-cleanup +spec: + podGC: + strategy: OnWorkflowCompletion + entrypoint: main + templates: + - name: main + container: + image: argoproj/argosay:v2 +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForPod(fixtures.PodDeleted) +} -// func TestPodCleanupSuite(t *testing.T) { -// suite.Run(t, new(PodCleanupSuite)) -// } +func TestPodCleanupSuite(t *testing.T) { + suite.Run(t, new(PodCleanupSuite)) +} diff --git a/test/e2e/progress_test.go b/test/e2e/progress_test.go index 2893d4deda42..d277042de3d6 100644 --- a/test/e2e/progress_test.go +++ b/test/e2e/progress_test.go @@ -2,58 +2,58 @@ package e2e -// import ( -// "fmt" -// "testing" -// "time" - -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) - -// type ProgressSuite struct { -// fixtures.E2ESuite -// } - -// func (s *ProgressSuite) TestDefaultProgress() { -// s.Given(). -// Workflow("@testdata/basic-workflow.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.Progress("1/1"), status.Progress) -// assert.Equal(t, wfv1.Progress("1/1"), status.Nodes[metadata.Name].Progress) -// }) -// } - -// func (s *ProgressSuite) TestLoggedProgress() { -// toHaveProgress := func(p wfv1.Progress) fixtures.Condition { -// return func(wf *wfv1.Workflow) (bool, string) { -// return wf.Status.Nodes[wf.Name].Progress == p && -// wf.Status.Nodes.FindByDisplayName("progress").Progress == p, fmt.Sprintf("progress is %s", p) -// } -// } - -// s.Given(). -// Workflow("@testdata/progress-workflow.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeRunning). -// WaitForWorkflow(toHaveProgress("0/100"), time.Minute). -// WaitForWorkflow(toHaveProgress("50/100"), time.Minute). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.Progress("100/100"), status.Nodes[metadata.Name].Progress) -// }) -// } - -// func TestProgressSuite(t *testing.T) { -// suite.Run(t, new(ProgressSuite)) -// } +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) + +type ProgressSuite struct { + fixtures.E2ESuite +} + +func (s *ProgressSuite) TestDefaultProgress() { + s.Given(). + Workflow("@testdata/basic-workflow.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.Progress("1/1"), status.Progress) + assert.Equal(t, wfv1.Progress("1/1"), status.Nodes[metadata.Name].Progress) + }) +} + +func (s *ProgressSuite) TestLoggedProgress() { + toHaveProgress := func(p wfv1.Progress) fixtures.Condition { + return func(wf *wfv1.Workflow) (bool, string) { + return wf.Status.Nodes[wf.Name].Progress == p && + wf.Status.Nodes.FindByDisplayName("progress").Progress == p, fmt.Sprintf("progress is %s", p) + } + } + + s.Given(). + Workflow("@testdata/progress-workflow.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeRunning). + WaitForWorkflow(toHaveProgress("0/100"), time.Minute). + WaitForWorkflow(toHaveProgress("50/100"), time.Minute). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.Progress("100/100"), status.Nodes[metadata.Name].Progress) + }) +} + +func TestProgressSuite(t *testing.T) { + suite.Run(t, new(ProgressSuite)) +} diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index 40d3bf22bb1a..c834ae208410 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -3,18 +3,18 @@ package e2e import ( - // "context" - // "io" - // "strings" + "context" + "io" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" - // apiv1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - // "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" ) @@ -23,214 +23,214 @@ type RetryTestSuite struct { fixtures.E2ESuite } -// func (s *RetryTestSuite) TestRetryLimit() { -// s.Given(). -// Workflow(` -// metadata: -// name: test-retry-limit -// spec: -// entrypoint: main -// templates: -// - name: main -// retryStrategy: -// limit: 0 -// backoff: -// duration: 2s -// factor: 2 -// maxDuration: 5m -// container: -// name: main -// image: 'argoproj/argosay:v2' -// args: [ exit, "1" ] -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) -// assert.Equal(t, "No more retries left", status.Message) -// assert.Equal(t, v1alpha1.Progress("0/1"), status.Progress) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-retry-limit" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) -// assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) -// assert.Nil(t, status.NodeFlag) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "test-retry-limit(0)" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeFailed, status.Phase) -// assert.True(t, status.NodeFlag.Retried) -// }) -// } +func (s *RetryTestSuite) TestRetryLimit() { + s.Given(). + Workflow(` +metadata: + name: test-retry-limit +spec: + entrypoint: main + templates: + - name: main + retryStrategy: + limit: 0 + backoff: + duration: 2s + factor: 2 + maxDuration: 5m + container: + name: main + image: 'argoproj/argosay:v2' + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) + assert.Equal(t, "No more retries left", status.Message) + assert.Equal(t, v1alpha1.Progress("0/1"), status.Progress) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-retry-limit" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeFailed, status.Phase) + assert.Equal(t, v1alpha1.NodeTypeRetry, status.Type) + assert.Nil(t, status.NodeFlag) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "test-retry-limit(0)" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeFailed, status.Phase) + assert.True(t, status.NodeFlag.Retried) + }) +} -// func (s *RetryTestSuite) TestRetryBackoff() { -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-backoff-strategy- -// spec: -// entrypoint: main -// templates: -// - name: main -// retryStrategy: -// limit: '10' -// backoff: -// duration: 10s -// maxDuration: 1m -// container: -// name: main -// image: 'argoproj/argosay:v2' -// args: [ exit, "1" ] -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(time.Second * 90). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) -// assert.LessOrEqual(t, len(status.Nodes), 10) -// }) -// s.Given(). -// Workflow(` -// metadata: -// generateName: test-backoff-strategy- -// spec: -// entrypoint: main -// templates: -// - name: main -// retryStrategy: -// limit: 10 -// backoff: -// duration: 10s -// maxDuration: 1m -// container: -// name: main -// image: 'argoproj/argosay:v2' -// args: [ exit, "1" ] -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(time.Second * 90). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) -// assert.LessOrEqual(t, len(status.Nodes), 10) -// }) -// } +func (s *RetryTestSuite) TestRetryBackoff() { + s.Given(). + Workflow(` +metadata: + generateName: test-backoff-strategy- +spec: + entrypoint: main + templates: + - name: main + retryStrategy: + limit: '10' + backoff: + duration: 10s + maxDuration: 1m + container: + name: main + image: 'argoproj/argosay:v2' + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(time.Second * 90). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) + assert.LessOrEqual(t, len(status.Nodes), 10) + }) + s.Given(). + Workflow(` +metadata: + generateName: test-backoff-strategy- +spec: + entrypoint: main + templates: + - name: main + retryStrategy: + limit: 10 + backoff: + duration: 10s + maxDuration: 1m + container: + name: main + image: 'argoproj/argosay:v2' + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(time.Second * 90). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowPhase("Failed"), status.Phase) + assert.LessOrEqual(t, len(status.Nodes), 10) + }) +} -// func (s *RetryTestSuite) TestWorkflowTemplateWithRetryStrategyInContainerSet() { -// var name string -// var ns string -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-with-containerset.yaml"). -// Workflow(` -// metadata: -// name: workflow-template-containerset -// spec: -// workflowTemplateRef: -// name: containerset-with-retrystrategy -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return status.Name == "workflow-template-containerset" -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// name = pod.GetName() -// ns = pod.GetNamespace() -// }) -// // Success, no need retry -// s.Run("ContainerLogs", func() { -// ctx := context.Background() -// podLogOptions := &apiv1.PodLogOptions{Container: "c1"} -// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) -// s.Require().NoError(err) -// defer stream.Close() -// logBytes, err := io.ReadAll(stream) -// s.Require().NoError(err) -// output := string(logBytes) -// count := strings.Count(output, "capturing logs") -// s.Equal(1, count) -// s.Contains(output, "hi") -// }) -// // Command err. No retry logic is entered. -// s.Run("ContainerLogs", func() { -// ctx := context.Background() -// podLogOptions := &apiv1.PodLogOptions{Container: "c2"} -// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) -// s.Require().NoError(err) -// defer stream.Close() -// logBytes, err := io.ReadAll(stream) -// s.Require().NoError(err) -// output := string(logBytes) -// count := strings.Count(output, "capturing logs") -// s.Equal(0, count) -// s.Contains(output, "executable file not found in $PATH") -// }) -// // Retry when err. -// s.Run("ContainerLogs", func() { -// ctx := context.Background() -// podLogOptions := &apiv1.PodLogOptions{Container: "c3"} -// stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) -// s.Require().NoError(err) -// defer stream.Close() -// logBytes, err := io.ReadAll(stream) -// s.Require().NoError(err) -// output := string(logBytes) -// count := strings.Count(output, "capturing logs") -// s.Equal(2, count) -// countFailureInfo := strings.Count(output, "intentional failure") -// s.Equal(2, countFailureInfo) -// }) -// } +func (s *RetryTestSuite) TestWorkflowTemplateWithRetryStrategyInContainerSet() { + var name string + var ns string + s.Given(). + WorkflowTemplate("@testdata/workflow-template-with-containerset.yaml"). + Workflow(` +metadata: + name: workflow-template-containerset +spec: + workflowTemplateRef: + name: containerset-with-retrystrategy +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return status.Name == "workflow-template-containerset" + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + name = pod.GetName() + ns = pod.GetNamespace() + }) + // Success, no need retry + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c1"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + s.Require().NoError(err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + s.Require().NoError(err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + s.Equal(1, count) + s.Contains(output, "hi") + }) + // Command err. No retry logic is entered. + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c2"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + s.Require().NoError(err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + s.Require().NoError(err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + s.Equal(0, count) + s.Contains(output, "executable file not found in $PATH") + }) + // Retry when err. + s.Run("ContainerLogs", func() { + ctx := context.Background() + podLogOptions := &apiv1.PodLogOptions{Container: "c3"} + stream, err := s.KubeClient.CoreV1().Pods(ns).GetLogs(name, podLogOptions).Stream(ctx) + s.Require().NoError(err) + defer stream.Close() + logBytes, err := io.ReadAll(stream) + s.Require().NoError(err) + output := string(logBytes) + count := strings.Count(output, "capturing logs") + s.Equal(2, count) + countFailureInfo := strings.Count(output, "intentional failure") + s.Equal(2, countFailureInfo) + }) +} -// func (s *RetryTestSuite) TestRetryNodeAntiAffinity() { -// s.Given(). -// Workflow(` -// metadata: -// name: test-nodeantiaffinity-strategy -// spec: -// entrypoint: main -// templates: -// - name: main -// retryStrategy: -// limit: '1' -// retryPolicy: "Always" -// affinity: -// nodeAntiAffinity: {} -// container: -// name: main -// image: 'argoproj/argosay:v2' -// args: [ exit, "1" ] -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToHaveFailedPod). -// Wait(5 * time.Second). -// Then(). -// ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// if status.Phase == wfv1.WorkflowFailed { -// nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") -// nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") -// assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) -// } -// if status.Phase == wfv1.WorkflowRunning { -// nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") -// nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") -// assert.Contains(t, nodeStatusRetry.Message, "didn't match Pod's node affinity/selector") -// assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) -// } -// }) -// } +func (s *RetryTestSuite) TestRetryNodeAntiAffinity() { + s.Given(). + Workflow(` +metadata: + name: test-nodeantiaffinity-strategy +spec: + entrypoint: main + templates: + - name: main + retryStrategy: + limit: '1' + retryPolicy: "Always" + affinity: + nodeAntiAffinity: {} + container: + name: main + image: 'argoproj/argosay:v2' + args: [ exit, "1" ] +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToHaveFailedPod). + Wait(5 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + if status.Phase == wfv1.WorkflowFailed { + nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") + nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") + assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) + } + if status.Phase == wfv1.WorkflowRunning { + nodeStatus := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(0)") + nodeStatusRetry := status.Nodes.FindByDisplayName("test-nodeantiaffinity-strategy(1)") + assert.Contains(t, nodeStatusRetry.Message, "didn't match Pod's node affinity/selector") + assert.NotEqual(t, nodeStatus.HostNodeName, nodeStatusRetry.HostNodeName) + } + }) +} func (s *RetryTestSuite) TestRetryDaemonContainer() { s.Given(). diff --git a/test/e2e/semaphore_test.go b/test/e2e/semaphore_test.go index 7ca3a9ee10fc..d1e41cdf3938 100644 --- a/test/e2e/semaphore_test.go +++ b/test/e2e/semaphore_test.go @@ -2,104 +2,104 @@ package e2e -// import ( -// "strings" -// "testing" -// "time" +import ( + "strings" + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type SemaphoreSuite struct { -// fixtures.E2ESuite -// } +type SemaphoreSuite struct { + fixtures.E2ESuite +} -// func (s *SemaphoreSuite) TestSynchronizationWfLevelMutex() { -// s.Given(). -// Workflow("@functional/synchronization-mutex-wf-level-1.yaml"). -// When(). -// SubmitWorkflow(). -// Given(). -// Workflow("@functional/synchronization-mutex-wf-level.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +func (s *SemaphoreSuite) TestSynchronizationWfLevelMutex() { + s.Given(). + Workflow("@functional/synchronization-mutex-wf-level-1.yaml"). + When(). + SubmitWorkflow(). + Given(). + Workflow("@functional/synchronization-mutex-wf-level.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// func (s *SemaphoreSuite) TestTemplateLevelMutex() { -// s.Given(). -// Workflow("@functional/synchronization-mutex-tmpl-level.yaml"). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +func (s *SemaphoreSuite) TestTemplateLevelMutex() { + s.Given(). + Workflow("@functional/synchronization-mutex-tmpl-level.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeWaitingOnAMutex, 90*time.Second). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// func (s *SemaphoreSuite) TestWorkflowLevelSemaphore() { -// s.Given(). -// Workflow("@testdata/semaphore-wf-level.yaml"). -// When(). -// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToHavePhase(wfv1.WorkflowUnknown), 90*time.Second). -// WaitForWorkflow(). -// DeleteConfigMap("my-config"). -// Then(). -// When(). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +func (s *SemaphoreSuite) TestWorkflowLevelSemaphore() { + s.Given(). + Workflow("@testdata/semaphore-wf-level.yaml"). + When(). + CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToHavePhase(wfv1.WorkflowUnknown), 90*time.Second). + WaitForWorkflow(). + DeleteConfigMap("my-config"). + Then(). + When(). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// func (s *SemaphoreSuite) TestTemplateLevelSemaphore() { -// s.Given(). -// Workflow("@testdata/semaphore-tmpl-level.yaml"). -// When(). -// CreateConfigMap("my-config", map[string]string{"template": "1"}, map[string]string{}). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeRunning, 90*time.Second). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.True(t, status.Nodes.Any(func(n wfv1.NodeStatus) bool { -// return strings.Contains(n.Message, "Waiting for") -// })) -// }). -// When(). -// WaitForWorkflow(time.Second * 90) -// } +func (s *SemaphoreSuite) TestTemplateLevelSemaphore() { + s.Given(). + Workflow("@testdata/semaphore-tmpl-level.yaml"). + When(). + CreateConfigMap("my-config", map[string]string{"template": "1"}, map[string]string{}). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeRunning, 90*time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.True(t, status.Nodes.Any(func(n wfv1.NodeStatus) bool { + return strings.Contains(n.Message, "Waiting for") + })) + }). + When(). + WaitForWorkflow(time.Second * 90) +} -// func (s *SemaphoreSuite) TestSynchronizationTmplLevelMutexAndSemaphore() { -// s.Given(). -// Workflow("@functional/synchronization-tmpl-level-mutex-semaphore.yaml"). -// When(). -// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +func (s *SemaphoreSuite) TestSynchronizationTmplLevelMutexAndSemaphore() { + s.Given(). + Workflow("@functional/synchronization-tmpl-level-mutex-semaphore.yaml"). + When(). + CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// func (s *SemaphoreSuite) TestSynchronizationMultiple() { -// s.Given(). -// Workflow("@functional/synchronization-multiple.yaml"). -// When(). -// CreateConfigMap("my-config", map[string]string{"workflow": "2"}, map[string]string{}). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +func (s *SemaphoreSuite) TestSynchronizationMultiple() { + s.Given(). + Workflow("@functional/synchronization-multiple.yaml"). + When(). + CreateConfigMap("my-config", map[string]string{"workflow": "2"}, map[string]string{}). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// // Legacy CRD entries: mutex and semaphore -// func (s *SemaphoreSuite) TestSynchronizationLegacyMutexAndSemaphore() { -// s.Given(). -// Workflow("@functional/synchronization-legacy-mutex-semaphore.yaml"). -// When(). -// CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) -// } +// Legacy CRD entries: mutex and semaphore +func (s *SemaphoreSuite) TestSynchronizationLegacyMutexAndSemaphore() { + s.Given(). + Workflow("@functional/synchronization-legacy-mutex-semaphore.yaml"). + When(). + CreateConfigMap("my-config", map[string]string{"workflow": "1"}, map[string]string{}). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded, 90*time.Second) +} -// func TestSemaphoreSuite(t *testing.T) { -// suite.Run(t, new(SemaphoreSuite)) -// } +func TestSemaphoreSuite(t *testing.T) { + suite.Run(t, new(SemaphoreSuite)) +} diff --git a/test/e2e/workflow_configmap_substitution_test.go b/test/e2e/workflow_configmap_substitution_test.go index a7fd0e286992..25655bebfbde 100644 --- a/test/e2e/workflow_configmap_substitution_test.go +++ b/test/e2e/workflow_configmap_substitution_test.go @@ -2,229 +2,229 @@ package e2e -// import ( -// "testing" -// "time" +import ( + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type WorkflowConfigMapSelectorSubstitutionSuite struct { -// fixtures.E2ESuite -// } +type WorkflowConfigMapSelectorSubstitutionSuite struct { + fixtures.E2ESuite +} -// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestKeySubstitution() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-configmapkeyselector-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// value: msg -// templates: -// - name: whalesay -// inputs: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: cmref-parameters -// key: '{{ workflow.parameters.message }}' -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"msg": "hello world"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestKeySubstitution() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-configmapkeyselector-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + value: msg + templates: + - name: whalesay + inputs: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: cmref-parameters + key: '{{ workflow.parameters.message }}' + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"msg": "hello world"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestNameSubstitution() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-configmapkeyselector-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: cm-name -// value: cmref-parameters -// templates: -// - name: whalesay -// inputs: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: '{{ workflow.parameters.cm-name}}' -// key: msg -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"msg": "hello world"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestNameSubstitution() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-configmapkeyselector-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: cm-name + value: cmref-parameters + templates: + - name: whalesay + inputs: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: '{{ workflow.parameters.cm-name}}' + key: msg + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"msg": "hello world"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestInvalidNameParameterSubstitution() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-configmapkeyselector-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: cm-name -// value: cmref-parameters -// templates: -// - name: whalesay -// inputs: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: '{{ workflow.parameters.cm-name }}' -// key: msg -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeErrored) -// } +func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestInvalidNameParameterSubstitution() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-configmapkeyselector-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: cm-name + value: cmref-parameters + templates: + - name: whalesay + inputs: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: '{{ workflow.parameters.cm-name }}' + key: msg + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeErrored) +} -// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestDefaultParamValueWhenNotFound() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-configmapkeyselector-wf-default-param- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// value: msg -// templates: -// - name: whalesay -// inputs: -// parameters: -// - name: message -// valueFrom: -// default: "default-val" -// configMapKeyRef: -// name: cmref-parameters -// key: not-existing-key -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"msg": "hello world"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestDefaultParamValueWhenNotFound() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-configmapkeyselector-wf-default-param- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + value: msg + templates: + - name: whalesay + inputs: + parameters: + - name: message + valueFrom: + default: "default-val" + configMapKeyRef: + name: cmref-parameters + key: not-existing-key + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"msg": "hello world"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestGlobalArgDefaultCMParamValueWhenNotFound() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-cmkeyselector-wf-global-arg-default-param- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: simple-global-param -// valueFrom: -// default: "default value" -// configMapKeyRef: -// name: not-existing-cm -// key: not-existing-key -// templates: -// - name: whalesay -// container: -// image: argoproj/argosay:v2 -// command: [sh, -c] -// args: ["sleep 1; echo -n {{workflow.parameters.simple-global-param}} > /tmp/message.txt"] -// outputs: -// parameters: -// - name: message -// valueFrom: -// path: /tmp/message.txt -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "default value", status.Nodes[metadata.Name].Outputs.Parameters[0].Value.String()) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowConfigMapSelectorSubstitutionSuite) TestGlobalArgDefaultCMParamValueWhenNotFound() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-cmkeyselector-wf-global-arg-default-param- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: simple-global-param + valueFrom: + default: "default value" + configMapKeyRef: + name: not-existing-cm + key: not-existing-key + templates: + - name: whalesay + container: + image: argoproj/argosay:v2 + command: [sh, -c] + args: ["sleep 1; echo -n {{workflow.parameters.simple-global-param}} > /tmp/message.txt"] + outputs: + parameters: + - name: message + valueFrom: + path: /tmp/message.txt +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "default value", status.Nodes[metadata.Name].Outputs.Parameters[0].Value.String()) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func TestConfigMapKeySelectorSubstitutionSuite(t *testing.T) { -// suite.Run(t, new(WorkflowConfigMapSelectorSubstitutionSuite)) -// } +func TestConfigMapKeySelectorSubstitutionSuite(t *testing.T) { + suite.Run(t, new(WorkflowConfigMapSelectorSubstitutionSuite)) +} diff --git a/test/e2e/workflow_inputs_orverridable_test.go b/test/e2e/workflow_inputs_orverridable_test.go index 49321219978d..a5a99f9505ab 100644 --- a/test/e2e/workflow_inputs_orverridable_test.go +++ b/test/e2e/workflow_inputs_orverridable_test.go @@ -2,201 +2,201 @@ package e2e -// import ( -// "testing" -// "time" +import ( + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type WorkflowInputsOverridableSuite struct { -// fixtures.E2ESuite -// } +type WorkflowInputsOverridableSuite struct { + fixtures.E2ESuite +} -// func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValueFrom() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-inputs-overridable-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// value: arg-value -// templates: -// - name: whalesay -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// inputs: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: cmref-parameters -// key: cmref-key -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"cmref-key": "input-value"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValueFrom() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-inputs-overridable-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + value: arg-value + templates: + - name: whalesay + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" + inputs: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: cmref-parameters + key: cmref-key +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"cmref-key": "input-value"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValueFrom() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-inputs-overridable-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: new-cmref-parameters -// key: cmref-key -// templates: -// - name: whalesay -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// inputs: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: cmref-parameters -// key: cmref-key -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"cmref-key": "input-value"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// CreateConfigMap( -// "new-cmref-parameters", -// map[string]string{"cmref-key": "arg-value"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// DeleteConfigMap("new-cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValueFrom() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-inputs-overridable-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: new-cmref-parameters + key: cmref-key + templates: + - name: whalesay + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" + inputs: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: cmref-parameters + key: cmref-key +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"cmref-key": "input-value"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + CreateConfigMap( + "new-cmref-parameters", + map[string]string{"cmref-key": "arg-value"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + DeleteConfigMap("new-cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValue() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// value: arg-value -// templates: -// - name: whalesay -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// inputs: -// parameters: -// - name: message -// value: input-value -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowInputsOverridableSuite) TestArgsValueParamsOverrideInputParamsValue() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + value: arg-value + templates: + - name: whalesay + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" + inputs: + parameters: + - name: message + value: input-value +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValue() { -// s.Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-inputs-overridable-wf- -// label: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: whalesay -// arguments: -// parameters: -// - name: message -// valueFrom: -// configMapKeyRef: -// name: cmref-parameters -// key: cmref-key -// templates: -// - name: whalesay -// container: -// image: argoproj/argosay:v2 -// args: -// - echo -// - "{{inputs.parameters.message}}" -// inputs: -// parameters: -// - name: message -// value: input-value -// `). -// When(). -// CreateConfigMap( -// "cmref-parameters", -// map[string]string{"cmref-key": "arg-value"}, -// map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). -// Wait(1 * time.Second). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// DeleteConfigMap("cmref-parameters"). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowInputsOverridableSuite) TestArgsValueFromParamsOverrideInputParamsValue() { + s.Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-inputs-overridable-wf- + label: + workflows.argoproj.io/test: "true" +spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: cmref-parameters + key: cmref-key + templates: + - name: whalesay + container: + image: argoproj/argosay:v2 + args: + - echo + - "{{inputs.parameters.message}}" + inputs: + parameters: + - name: message + value: input-value +`). + When(). + CreateConfigMap( + "cmref-parameters", + map[string]string{"cmref-key": "arg-value"}, + map[string]string{"workflows.argoproj.io/configmap-type": "Parameter"}). + Wait(1 * time.Second). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + DeleteConfigMap("cmref-parameters"). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "arg-value", status.Nodes[metadata.Name].Inputs.Parameters[0].Value.String()) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func TestWorkflowInputsOverridableSuiteSuite(t *testing.T) { -// suite.Run(t, new(WorkflowInputsOverridableSuite)) -// } +func TestWorkflowInputsOverridableSuiteSuite(t *testing.T) { + suite.Run(t, new(WorkflowInputsOverridableSuite)) +} diff --git a/test/e2e/workflow_template_test.go b/test/e2e/workflow_template_test.go index 373f6268cd48..e4ba111dc1fe 100644 --- a/test/e2e/workflow_template_test.go +++ b/test/e2e/workflow_template_test.go @@ -2,183 +2,183 @@ package e2e -// import ( -// "strings" -// "testing" +import ( + "strings" + "testing" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// apiv1 "k8s.io/api/core/v1" -// v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// ) + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" +) -// type WorkflowTemplateSuite struct { -// fixtures.E2ESuite -// } +type WorkflowTemplateSuite struct { + fixtures.E2ESuite +} -// func (s *WorkflowTemplateSuite) TestNestedWorkflowTemplate() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-nested-template.yaml"). -// WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml"). -// When(). -// CreateWorkflowTemplates(). -// Given(). -// Workflow(`apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: workflow-template-nested- -// spec: -// entrypoint: whalesay -// templates: -// - name: whalesay -// steps: -// - - name: call-whalesay-template -// templateRef: -// name: workflow-template-nested-template -// template: whalesay-template -// arguments: -// parameters: -// - name: message -// value: "hello from nested" -// `).When(). -// SubmitWorkflow(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowTemplateSuite) TestNestedWorkflowTemplate() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-nested-template.yaml"). + WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml"). + When(). + CreateWorkflowTemplates(). + Given(). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-nested- +spec: + entrypoint: whalesay + templates: + - name: whalesay + steps: + - - name: call-whalesay-template + templateRef: + name: workflow-template-nested-template + template: whalesay-template + arguments: + parameters: + - name: message + value: "hello from nested" +`).When(). + SubmitWorkflow(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithEnum() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-with-enum-values.yaml"). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithEnum() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-with-enum-values.yaml"). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWorkflowMetadataSubstitution() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-sub-test.yaml"). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWorkflowMetadataSubstitution() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-sub-test.yaml"). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateResourceUnquotedExpressions() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-with-resource-expr.yaml"). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateResourceUnquotedExpressions() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-with-resource-expr.yaml"). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithParallelStepsRequiringPVC() { -// s.Given(). -// WorkflowTemplate("@testdata/loops-steps-limited-parallelism-pvc.yaml"). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflowsFromWorkflowTemplates(). -// WaitForWorkflow(). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }). -// ExpectPVCDeleted() -// } +func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithParallelStepsRequiringPVC() { + s.Given(). + WorkflowTemplate("@testdata/loops-steps-limited-parallelism-pvc.yaml"). + When(). + CreateWorkflowTemplates(). + SubmitWorkflowsFromWorkflowTemplates(). + WaitForWorkflow(). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }). + ExpectPVCDeleted() +} -// func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidOnExit() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-invalid-onexit.yaml"). -// Workflow(` -// metadata: -// generateName: workflow-template-invalid-onexit- -// spec: -// workflowTemplateRef: -// name: workflow-template-invalid-onexit -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeErrored). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowError, status.Phase) -// assert.Contains(t, status.Message, "error in exit template execution") -// }). -// ExpectPVCDeleted() -// } +func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidOnExit() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-invalid-onexit.yaml"). + Workflow(` +metadata: + generateName: workflow-template-invalid-onexit- +spec: + workflowTemplateRef: + name: workflow-template-invalid-onexit +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeErrored). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowError, status.Phase) + assert.Contains(t, status.Message, "error in exit template execution") + }). + ExpectPVCDeleted() +} -// func (s *WorkflowTemplateSuite) TestWorkflowTemplateWithHook() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-templates/success-hook.yaml"). -// Workflow(` -// metadata: -// generateName: workflow-template-hook- -// spec: -// workflowTemplateRef: -// name: hook -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "hooks.running") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "hooks.succeed") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) -// }) -// } +func (s *WorkflowTemplateSuite) TestWorkflowTemplateWithHook() { + s.Given(). + WorkflowTemplate("@testdata/workflow-templates/success-hook.yaml"). + Workflow(` +metadata: + generateName: workflow-template-hook- +spec: + workflowTemplateRef: + name: hook +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "hooks.running") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "hooks.succeed") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.Equal(t, v1alpha1.NodeSucceeded, status.Phase) + }) +} -// func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidEntryPoint() { -// s.Given(). -// WorkflowTemplate("@testdata/workflow-template-invalid-entrypoint.yaml"). -// Workflow(` -// metadata: -// generateName: workflow-template-invalid-entrypoint- -// spec: -// workflowTemplateRef: -// name: workflow-template-invalid-entrypoint -// `). -// When(). -// CreateWorkflowTemplates(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeErrored). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { -// assert.Equal(t, v1alpha1.WorkflowError, status.Phase) -// assert.Contains(t, status.Message, "error in entry template execution") -// }). -// ExpectPVCDeleted() -// } +func (s *WorkflowTemplateSuite) TestWorkflowTemplateInvalidEntryPoint() { + s.Given(). + WorkflowTemplate("@testdata/workflow-template-invalid-entrypoint.yaml"). + Workflow(` +metadata: + generateName: workflow-template-invalid-entrypoint- +spec: + workflowTemplateRef: + name: workflow-template-invalid-entrypoint +`). + When(). + CreateWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeErrored). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, v1alpha1.WorkflowError, status.Phase) + assert.Contains(t, status.Message, "error in entry template execution") + }). + ExpectPVCDeleted() +} -// func TestWorkflowTemplateSuite(t *testing.T) { -// suite.Run(t, new(WorkflowTemplateSuite)) -// } +func TestWorkflowTemplateSuite(t *testing.T) { + suite.Run(t, new(WorkflowTemplateSuite)) +} diff --git a/test/e2e/workflow_test.go b/test/e2e/workflow_test.go index 00c856af552c..76a0b6dc6a2b 100644 --- a/test/e2e/workflow_test.go +++ b/test/e2e/workflow_test.go @@ -2,227 +2,227 @@ package e2e -// import ( -// "strings" -// "testing" -// "time" +import ( + "strings" + "testing" + "time" -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/suite" -// apiv1 "k8s.io/api/core/v1" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -// "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" -// "github.com/argoproj/argo-workflows/v3/workflow/common" -// ) + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" + "github.com/argoproj/argo-workflows/v3/workflow/common" +) -// type WorkflowSuite struct { -// fixtures.E2ESuite -// } +type WorkflowSuite struct { + fixtures.E2ESuite +} -// func (s *WorkflowSuite) TestContainerTemplateAutomountServiceAccountTokenDisabled() { -// s.Given().Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: get-resources-via-container-template- -// namespace: argo -// spec: -// serviceAccountName: argo -// automountServiceAccountToken: false -// executor: -// serviceAccountName: get-cm -// entrypoint: main -// templates: -// - name: main -// container: -// name: main -// image: bitnami/kubectl -// command: -// - sh -// args: -// - -c -// - | -// kubectl get cm -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowSuite) TestContainerTemplateAutomountServiceAccountTokenDisabled() { + s.Given().Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: get-resources-via-container-template- + namespace: argo +spec: + serviceAccountName: argo + automountServiceAccountToken: false + executor: + serviceAccountName: get-cm + entrypoint: main + templates: + - name: main + container: + name: main + image: bitnami/kubectl + command: + - sh + args: + - -c + - | + kubectl get cm +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowSuite) TestScriptTemplateAutomountServiceAccountTokenDisabled() { -// s.Given().Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: get-resources-via-script-template- -// namespace: argo -// spec: -// serviceAccountName: argo -// automountServiceAccountToken: false -// executor: -// serviceAccountName: get-cm -// entrypoint: main -// templates: -// - name: main -// script: -// name: main -// image: bitnami/kubectl -// command: -// - sh -// source: -// kubectl get cm -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }) -// } +func (s *WorkflowSuite) TestScriptTemplateAutomountServiceAccountTokenDisabled() { + s.Given().Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: get-resources-via-script-template- + namespace: argo +spec: + serviceAccountName: argo + automountServiceAccountToken: false + executor: + serviceAccountName: get-cm + entrypoint: main + templates: + - name: main + script: + name: main + image: bitnami/kubectl + command: + - sh + source: + kubectl get cm +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded, time.Minute*11). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }) +} -// func (s *WorkflowSuite) TestWorkflowFailedWhenAllPodSetFailedFromPending() { -// (s.Given().Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: active-deadline-fanout-template-level- -// namespace: argo -// spec: -// entrypoint: entrypoint -// templates: -// - name: entrypoint -// steps: -// - - name: fanout -// template: echo -// arguments: -// parameters: -// - name: item -// value: "{{item}}" -// withItems: -// - 1 -// - 2 -// - 3 -// - 4 -// - name: echo -// inputs: -// parameters: -// - name: item -// container: -// image: centos:latest -// imagePullPolicy: Always -// command: -// - sh -// - '-c' -// args: -// - echo -// - 'workflow number {{inputs.parameters.item}}' -// - sleep -// - '20' -// activeDeadlineSeconds: 2 # defined on template level, not workflow level ! -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeFailed, time.Minute*11). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowFailed, status.Phase) -// for _, node := range status.Nodes { -// if node.Type == wfv1.NodeTypePod { -// assert.Equal(t, wfv1.NodeFailed, node.Phase) -// assert.Contains(t, node.Message, "Pod was active on the node longer than the specified deadline") -// } -// } -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "fanout(0:1)") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// for _, c := range pod.Status.ContainerStatuses { -// if c.Name == common.WaitContainerName && c.State.Terminated == nil { -// assert.NotNil(t, c.State.Waiting) -// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") -// assert.Nil(t, c.State.Running) -// } -// } -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "fanout(1:2)") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// for _, c := range pod.Status.ContainerStatuses { -// if c.Name == common.WaitContainerName && c.State.Terminated == nil { -// assert.NotNil(t, c.State.Waiting) -// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") -// assert.Nil(t, c.State.Running) -// } -// } -// })). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "fanout(2:3)") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// for _, c := range pod.Status.ContainerStatuses { -// if c.Name == common.WaitContainerName && c.State.Terminated == nil { -// assert.NotNil(t, c.State.Waiting) -// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") -// assert.Nil(t, c.State.Running) -// } -// } -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "fanout(3:4)") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// for _, c := range pod.Status.ContainerStatuses { -// if c.Name == common.WaitContainerName && c.State.Terminated == nil { -// assert.NotNil(t, c.State.Waiting) -// assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") -// assert.Nil(t, c.State.Running) -// } -// } -// }) -// } +func (s *WorkflowSuite) TestWorkflowFailedWhenAllPodSetFailedFromPending() { + (s.Given().Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: active-deadline-fanout-template-level- + namespace: argo +spec: + entrypoint: entrypoint + templates: + - name: entrypoint + steps: + - - name: fanout + template: echo + arguments: + parameters: + - name: item + value: "{{item}}" + withItems: + - 1 + - 2 + - 3 + - 4 + - name: echo + inputs: + parameters: + - name: item + container: + image: centos:latest + imagePullPolicy: Always + command: + - sh + - '-c' + args: + - echo + - 'workflow number {{inputs.parameters.item}}' + - sleep + - '20' + activeDeadlineSeconds: 2 # defined on template level, not workflow level ! +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed, time.Minute*11). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + for _, node := range status.Nodes { + if node.Type == wfv1.NodeTypePod { + assert.Equal(t, wfv1.NodeFailed, node.Phase) + assert.Contains(t, node.Message, "Pod was active on the node longer than the specified deadline") + } + } + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "fanout(0:1)") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == common.WaitContainerName && c.State.Terminated == nil { + assert.NotNil(t, c.State.Waiting) + assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") + assert.Nil(t, c.State.Running) + } + } + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "fanout(1:2)") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == common.WaitContainerName && c.State.Terminated == nil { + assert.NotNil(t, c.State.Waiting) + assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") + assert.Nil(t, c.State.Running) + } + } + })). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "fanout(2:3)") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == common.WaitContainerName && c.State.Terminated == nil { + assert.NotNil(t, c.State.Waiting) + assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") + assert.Nil(t, c.State.Running) + } + } + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "fanout(3:4)") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == common.WaitContainerName && c.State.Terminated == nil { + assert.NotNil(t, c.State.Waiting) + assert.Contains(t, c.State.Waiting.Reason, "PodInitializing") + assert.Nil(t, c.State.Running) + } + } + }) +} -// func (s *WorkflowSuite) TestWorkflowInlinePodName() { -// s.Given().Workflow(` -// apiVersion: argoproj.io/v1alpha1 -// kind: Workflow -// metadata: -// generateName: steps-inline- -// labels: -// workflows.argoproj.io/test: "true" -// spec: -// entrypoint: main -// templates: -// - name: main -// steps: -// - - name: a -// inline: -// container: -// image: argoproj/argosay:v2 -// command: -// - cowsay -// args: -// - "foo" -// `). -// When(). -// SubmitWorkflow(). -// WaitForWorkflow(fixtures.ToBeCompleted, time.Minute*1). -// Then(). -// ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { -// assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) -// }). -// ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { -// return strings.Contains(status.Name, "a") -// }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { -// assert.NotContains(t, pod.Name, "--") -// }) -// } +func (s *WorkflowSuite) TestWorkflowInlinePodName() { + s.Given().Workflow(` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: steps-inline- + labels: + workflows.argoproj.io/test: "true" +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: a + inline: + container: + image: argoproj/argosay:v2 + command: + - cowsay + args: + - "foo" +`). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted, time.Minute*1). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) + }). + ExpectWorkflowNode(func(status v1alpha1.NodeStatus) bool { + return strings.Contains(status.Name, "a") + }, func(t *testing.T, status *v1alpha1.NodeStatus, pod *apiv1.Pod) { + assert.NotContains(t, pod.Name, "--") + }) +} -// func TestWorkflowSuite(t *testing.T) { -// suite.Run(t, new(WorkflowSuite)) -// } +func TestWorkflowSuite(t *testing.T) { + suite.Run(t, new(WorkflowSuite)) +} From fccfcdcea15b995d12fe4bf9071e2ea9f58e57ab Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 16:57:22 +0300 Subject: [PATCH 39/50] fix(tests): removed e2e test for now, because it only works locally Signed-off-by: MenD32 --- test/e2e/agent_test.go | 2 +- test/e2e/fixtures/when.go | 29 ------------------- test/e2e/retry_test.go | 60 --------------------------------------- 3 files changed, 1 insertion(+), 90 deletions(-) diff --git a/test/e2e/agent_test.go b/test/e2e/agent_test.go index 2a071f4b4f4f..4296227e3b61 100644 --- a/test/e2e/agent_test.go +++ b/test/e2e/agent_test.go @@ -64,7 +64,7 @@ spec: ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) // Ensure that the workflow ran for less than 10 seconds - assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) + // assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) var finishedTimes []time.Time var startTimes []time.Time diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 993e2b55b1ac..e33208b380ff 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -21,7 +21,6 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/common" "github.com/argoproj/argo-workflows/v3/workflow/hydrator" - "github.com/argoproj/argo-workflows/v3/workflow/util" ) type When struct { @@ -523,34 +522,6 @@ func (w *When) DeleteConfigMap(name string) *When { return w } -func (w *When) DeletePod(name string) *When { - w.t.Helper() - ctx := context.Background() - fmt.Printf("deleting pod %s\n", name) - _, err := w.kubeClient.CoreV1().Pods(Namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - w.t.Fatalf("pod %s not found", name) - } - err = w.kubeClient.CoreV1().Pods(Namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if err != nil { - w.t.Fatal(err) - } - return w -} - -func (w *When) DeleteNodePod(name string) *When { - w.t.Helper() - node, err := w.wf.GetNodeByName(name) - if err != nil { - w.t.Fatal(err) - } - fmt.Printf("deleting pod %s from node %s\n", "", name) - podName := util.GeneratePodName(w.wf.Name, name, node.GetTemplateName(), node.ID, util.GetWorkflowPodNameVersion(w.wf)) - w.DeletePod(podName) - - return w -} - func (w *When) PodsQuota(podLimit int) *When { w.t.Helper() ctx := context.Background() diff --git a/test/e2e/retry_test.go b/test/e2e/retry_test.go index c834ae208410..f55caf262b89 100644 --- a/test/e2e/retry_test.go +++ b/test/e2e/retry_test.go @@ -232,66 +232,6 @@ spec: }) } -func (s *RetryTestSuite) TestRetryDaemonContainer() { - s.Given(). - Workflow(` -metadata: - name: test-stepsdaemonretry-strategy -spec: - entrypoint: main - templates: - - name: main - steps: - - - name: server - template: server - - - name: client - template: client - arguments: - parameters: - - name: server-ip - value: "{{steps.server.ip}}" - withSequence: - count: "3" - - name: server - retryStrategy: - limit: "10" - daemon: true - container: - image: nginx:1.13 - readinessProbe: - httpGet: - path: / - port: 80 - initialDelaySeconds: 2 - timeoutSeconds: 1 - - name: client - inputs: - parameters: - - name: server-ip - synchronization: - mutex: - name: client-{{workflow.uid}} - container: - image: appropriate/curl:latest - command: ["/bin/sh", "-c"] - args: ["echo curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && curl --silent -G http://{{inputs.parameters.server-ip}}:80/ && sleep 10"] -`). - When(). - SubmitWorkflow(). - WaitForWorkflow((fixtures.Condition)(func(wf *wfv1.Workflow) (bool, string) { - return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { - return node.GetTemplateName() == "client" && node.Phase == wfv1.NodeSucceeded - }), "waiting for at least one client to succeed" - })).DeleteNodePod("test-stepsdaemonretry-strategy[0].server(0)"). - Wait(10 * time.Second). - WaitForWorkflow(fixtures.ToBeSucceeded). - Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - node := status.Nodes.FindByName("test-stepsdaemonretry-strategy[0].server(1)") - assert.NotNil(t, node) - }) -} - func TestRetrySuite(t *testing.T) { suite.Run(t, new(RetryTestSuite)) } From 82928905132e74907a50061613ef75eb14eca3b2 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 16:57:57 +0300 Subject: [PATCH 40/50] fix(tests): removed e2e test for now, because it only works locally Signed-off-by: MenD32 --- test/e2e/agent_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/agent_test.go b/test/e2e/agent_test.go index 4296227e3b61..2a071f4b4f4f 100644 --- a/test/e2e/agent_test.go +++ b/test/e2e/agent_test.go @@ -64,7 +64,7 @@ spec: ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) // Ensure that the workflow ran for less than 10 seconds - // assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) + assert.Less(t, status.FinishedAt.Sub(status.StartedAt.Time), time.Duration(10*fixtures.EnvFactor)*time.Second) var finishedTimes []time.Time var startTimes []time.Time From 837c461f0274028c255795c2d7e8cc054828342f Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 18:25:15 +0300 Subject: [PATCH 41/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 43 +++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index c92bcd29bd51..7d9e688b0d14 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -8,9 +8,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" ) @@ -177,6 +179,47 @@ func (s *DaemonPodSuite) TestMarkDaemonedPodSucceeded() { }) } +func (s *DaemonPodSuite) TestDaemonPodRetry() { + s.Given(). + Workflow(` +metadata: + name: daemon-retry +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: daemoned + template: daemoned + - name: whale + dependencies: [daemoned] + template: whale-tmpl + - name: daemoned + retryStrategy: + limit: 2 + daemon: true + container: + image: argoproj/argosay:v2 + command: ["bash"] + args: ["-c", "sleep 5 && exit 1"] + - name: whale-tmpl + container: + image: argoproj/argosay:v2 + command: ["bash"] + args: ["-c", "echo hi & sleep 15 && echo bye"] +`). + When(). + SubmitWorkflow(). + Then(). + WaitForWorkflow(fixtures.Succeeded). + ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + node := status.Nodes.FindByDisplayName("daemoned(1)") + require.NotNil(t, node) + assert.Equal(t, wfv1.NodeSucceeded, node.Phase) + }) +} + func TestDaemonPodSuite(t *testing.T) { suite.Run(t, new(DaemonPodSuite)) } From 551562f8841a69bafb8aa9337f05dd415dd95ac2 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 18:26:36 +0300 Subject: [PATCH 42/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index 7d9e688b0d14..30c421ea0c1e 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -212,7 +212,7 @@ spec: When(). SubmitWorkflow(). Then(). - WaitForWorkflow(fixtures.Succeeded). + WaitForWorkflow(fixtures.ToBeSucceeded). ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("daemoned(1)") require.NotNil(t, node) From 2bb5c64ebc08a3a469d2138452fe2a1d29fddacc Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 18:28:13 +0300 Subject: [PATCH 43/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- Makefile | 8 ++++---- test/e2e/daemon_pod_test.go | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index f3acdfd3c550..14be3b0849c7 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - grep '127.0.0.1.*azurite' /etc/hosts - grep '127.0.0.1.*minio' /etc/hosts - grep '127.0.0.1.*postgres' /etc/hosts - grep '127.0.0.1.*mysql' /etc/hosts + # grep '127.0.0.1.*azurite' /etc/hosts + # grep '127.0.0.1.*minio' /etc/hosts + # grep '127.0.0.1.*postgres' /etc/hosts + # grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index 30c421ea0c1e..bd97dbb973a1 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -210,9 +210,8 @@ spec: args: ["-c", "echo hi & sleep 15 && echo bye"] `). When(). - SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeCompleted). Then(). - WaitForWorkflow(fixtures.ToBeSucceeded). ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("daemoned(1)") require.NotNil(t, node) From a76980f68d132cbf7b1ee697ac435a41131ae256 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 18:46:55 +0300 Subject: [PATCH 44/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index bd97dbb973a1..ebc2ba04abb7 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -210,7 +210,8 @@ spec: args: ["-c", "echo hi & sleep 15 && echo bye"] `). When(). - WaitForWorkflow(fixtures.ToBeCompleted). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeSucceeded). Then(). ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("daemoned(1)") From 8278b72008b21fd04b867aa63d1509e6bb58b149 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 18:50:51 +0300 Subject: [PATCH 45/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 14be3b0849c7..f3acdfd3c550 100644 --- a/Makefile +++ b/Makefile @@ -558,10 +558,10 @@ endif ifeq ($(AUTH_MODE),sso) grep '127.0.0.1.*dex' /etc/hosts endif - # grep '127.0.0.1.*azurite' /etc/hosts - # grep '127.0.0.1.*minio' /etc/hosts - # grep '127.0.0.1.*postgres' /etc/hosts - # grep '127.0.0.1.*mysql' /etc/hosts + grep '127.0.0.1.*azurite' /etc/hosts + grep '127.0.0.1.*minio' /etc/hosts + grep '127.0.0.1.*postgres' /etc/hosts + grep '127.0.0.1.*mysql' /etc/hosts ifeq ($(RUN_MODE),local) env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) PROFILE=$(PROFILE) kit $(TASKS) endif From a5eba4dd870f91f93dc7b0883d49609258765340 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 19:06:50 +0300 Subject: [PATCH 46/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index ebc2ba04abb7..1eee8bc2e894 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -216,7 +216,8 @@ spec: ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("daemoned(1)") require.NotNil(t, node) - assert.Equal(t, wfv1.NodeSucceeded, node.Phase) + assert.Equal(t, wfv1.NodeFailed, node.Phase) + assert.Equal(t, status.Phase, wfv1.WorkflowSucceeded) }) } From 02171da8291a9c9868f78ed3b6fd63e4d0a0f27d Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 19:07:38 +0300 Subject: [PATCH 47/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index 1eee8bc2e894..c12ea092ff0f 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -216,7 +216,7 @@ spec: ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("daemoned(1)") require.NotNil(t, node) - assert.Equal(t, wfv1.NodeFailed, node.Phase) + assert.Equal(t, wfv1.NodeSucceeded, node.Phase) assert.Equal(t, status.Phase, wfv1.WorkflowSucceeded) }) } From 8e30a716d2c6c31374b623600d97d84be8a14dcc Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 19:10:40 +0300 Subject: [PATCH 48/50] fix(tests): e2e test for daemon retry Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index c12ea092ff0f..02107576d9bd 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -202,7 +202,7 @@ spec: container: image: argoproj/argosay:v2 command: ["bash"] - args: ["-c", "sleep 5 && exit 1"] + args: ["-c", "sleep 10 && exit 1"] - name: whale-tmpl container: image: argoproj/argosay:v2 @@ -214,9 +214,12 @@ spec: WaitForWorkflow(fixtures.ToBeSucceeded). Then(). ExpectWorkflow(func(t *testing.T, md *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - node := status.Nodes.FindByDisplayName("daemoned(1)") - require.NotNil(t, node) - assert.Equal(t, wfv1.NodeSucceeded, node.Phase) + failedNode := status.Nodes.FindByDisplayName("daemoned(0)") + succeededNode := status.Nodes.FindByDisplayName("daemoned(1)") + require.NotNil(t, failedNode) + require.NotNil(t, succeededNode) + assert.Equal(t, wfv1.NodeFailed, failedNode.Phase) + assert.Equal(t, wfv1.NodeSucceeded, succeededNode.Phase) assert.Equal(t, status.Phase, wfv1.WorkflowSucceeded) }) } From 80ffbb50c5a2b8301b12ca5077e1c44550566c42 Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sat, 19 Oct 2024 19:25:25 +0300 Subject: [PATCH 49/50] fix(tests): lint does not allow yoda syntax Signed-off-by: MenD32 --- test/e2e/daemon_pod_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/daemon_pod_test.go b/test/e2e/daemon_pod_test.go index 02107576d9bd..cc8e62423baf 100644 --- a/test/e2e/daemon_pod_test.go +++ b/test/e2e/daemon_pod_test.go @@ -220,7 +220,7 @@ spec: require.NotNil(t, succeededNode) assert.Equal(t, wfv1.NodeFailed, failedNode.Phase) assert.Equal(t, wfv1.NodeSucceeded, succeededNode.Phase) - assert.Equal(t, status.Phase, wfv1.WorkflowSucceeded) + assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) }) } From ec06bdb9614828d48d5766c578b55efecdf9f34b Mon Sep 17 00:00:00 2001 From: MenD32 Date: Sun, 8 Dec 2024 15:40:37 +0200 Subject: [PATCH 50/50] fix: fields.md Signed-off-by: MenD32 --- docs/fields.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/fields.md b/docs/fields.md index 2c07f10ebd7b..4f3b0cd2b458 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -6463,6 +6463,8 @@ ImageVolumeSource represents a image volume resource. - [`dag-custom-metrics.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-custom-metrics.yaml) +- [`dag-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-retry-strategy.yaml) + - [`dag-daemon-task.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-daemon-task.yaml) - [`dag-diamond-steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/dag-diamond-steps.yaml) @@ -6671,6 +6673,8 @@ ImageVolumeSource represents a image volume resource. - [`step-level-timeout.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/step-level-timeout.yaml) +- [`steps-daemon-retry-strategy.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-daemon-retry-strategy.yaml) + - [`steps-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps-inline-workflow.yaml) - [`steps.yaml`](https://github.com/argoproj/argo-workflows/blob/main/examples/steps.yaml)