接口定义
31 分钟阅读
简要概述
数据结构
Workflow
https://github.com/argoproj/argo-workflows/blob/main/pkg/apis/workflow/v1alpha1/workflow_types.go
// Workflow is the definition of a workflow resource
type Workflow struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec "`
Status WorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
WorkflowTemplate
type WorkflowTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
ClusterWorkflowTemplate
type ClusterWorkflowTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
CronWorkflow
// CronWorkflow is the definition of a scheduled workflow resource
type CronWorkflow struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
Spec CronWorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
Status CronWorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// CronWorkflowSpec is the specification of a CronWorkflow
type CronWorkflowSpec struct {
// WorkflowSpec is the spec of the workflow to be run
WorkflowSpec WorkflowSpec `json:"workflowSpec" protobuf:"bytes,1,opt,name=workflowSpec,casttype=WorkflowSpec"`
// Schedule is a schedule to run the Workflow in Cron format
// 必填,同 linux cron 语法:https://en.wikipedia.org/wiki/Cron
Schedule string `json:"schedule" protobuf:"bytes,2,opt,name=schedule"`
// ConcurrencyPolicy is the K8s-style concurrency policy that will be used
// 同时存在多个工作流时要执行的策略,可取:Allow Replace Forbid
// Allow 允许新任务同时执行
// Replace 移除所有旧任务之后在执行新工作流
// Forbid 如果旧任务未完成则不允许继续创建
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
// Suspend is a flag that will stop new CronWorkflows from running if set to true
// 设置为 'true' 则停止该工作流调度
Suspend bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"`
// StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its
// original scheduled time if it is missed.
// 上次成功运行至当前的最大秒数,如在此期间错过的任务将被执行,比如 "workflow-controller" 故障导致
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=startingDeadlineSeconds"`
// SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time
// 对已成功的工作流保留历史数量
SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"`
// FailedJobsHistoryLimit is the number of failed jobs to be kept at a time
// 对已失败的工作流保留历史数量
FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"`
// Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time.
// 计划任务依赖的时区
Timezone string `json:"timezone,omitempty" protobuf:"bytes,8,opt,name=timezone"`
// WorkflowMetadata contains some metadata of the workflow to be run
WorkflowMetadata *metav1.ObjectMeta `json:"workflowMetadata,omitempty" protobuf:"bytes,9,opt,name=workflowMeta"`
}
type ConcurrencyPolicy string
const (
AllowConcurrent ConcurrencyPolicy = "Allow"
ForbidConcurrent ConcurrencyPolicy = "Forbid"
ReplaceConcurrent ConcurrencyPolicy = "Replace"
)
WorkflowSpec
// WorkflowSpec is the specification of a Workflow.
type WorkflowSpec struct {
// Templates is a list of workflow templates used in a workflow
// +patchStrategy=merge
// +patchMergeKey=name
// 一个工作流由这些子任务组成
Templates []Template `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=templates"`
// Entrypoint is a template reference to the starting point of the workflow.
// 该工作流执行的入口,类似函数的 "main" 功能
Entrypoint string `json:"entrypoint,omitempty" protobuf:"bytes,2,opt,name=entrypoint"`
// Arguments contain the parameters and artifacts sent to the workflow entrypoint
// Parameters are referencable globally using the 'workflow' variable prefix.
// e.g. {{workflow.parameters.myparam}}
// 用于配置请求或附件参数,可在下游通过变量 "{{workflow.parameters.myparam}}" 获取
Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"`
// ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.
// 流水线 POD 运行的 sa 账户
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,4,opt,name=serviceAccountName"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.
// ServiceAccountName of ExecutorConfig must be specified if this value is false.
// 是否挂载 sa 的 token 至容器内
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,28,opt,name=automountServiceAccountToken"`
// Executor holds configurations of executor containers of the workflow.
// TODO;
Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,29,opt,name=executor"`
// Volumes is a list of volumes that can be mounted by containers in a workflow.
// +patchStrategy=merge
// +patchMergeKey=name
// 挂载至工作流任务中 POD 的卷,见[Pod Volume](/docs/concepts/workloads/pods-volume/)
Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,5,opt,name=volumes"`
// VolumeClaimTemplates is a list of claims that containers are allowed to reference.
// The Workflow controller will create the claims at the beginning of the workflow
// and delete the claims upon completion of the workflow
VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,6,opt,name=volumeClaimTemplates"`
// Parallelism limits the max total parallel pods that can execute at the same time in a workflow
// 同一时间内最大可并行执行的任务数量
Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"`
// ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.
ArtifactRepositoryRef *ArtifactRepositoryRef `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,8,opt,name=artifactRepositoryRef"`
// Suspend will suspend the workflow and prevent execution of any future steps in the workflow
// 挂起工作流
Suspend *bool `json:"suspend,omitempty" protobuf:"bytes,9,opt,name=suspend"`
// NodeSelector is a selector which will result in all pods of the workflow
// to be scheduled on the selected node(s). This is able to be overridden by
// a nodeSelector specified in the template.
// 调度任务至关联的主机节点
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,10,opt,name=nodeSelector"`
// Affinity sets the scheduling constraints for all pods in the workflow.
// Can be overridden by an affinity specified in the template
// 亲和性配置,见 [Pod Affinity](/docs/concepts/workloads/pods-affinity/)
Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,11,opt,name=affinity"`
// Tolerations to apply to workflow pods.
// +patchStrategy=merge
// +patchMergeKey=key
// 污点,见 [Toleration](/docs/concepts/workloads/pods/#toleration)
Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,12,opt,name=tolerations"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +patchStrategy=merge
// +patchMergeKey=name
// 拉取镜像的密钥文件
ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,13,opt,name=imagePullSecrets"`
// Host networking requested for this workflow pod. Default to false.
// 工作流使用主机网络
HostNetwork *bool `json:"hostNetwork,omitempty" protobuf:"bytes,14,opt,name=hostNetwork"`
// Set DNS policy for workflow pods.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// 工作流 pod 使用的 dns 策略,见[PodDNSConfig](/docs/concepts/workloads/pods/#poddnsconfig)
DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,15,opt,name=dnsPolicy"`
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
// 工作流 pod 使用的 dns 策略,见[PodDNSConfig](/docs/concepts/workloads/pods/#poddnsconfig)
DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,16,opt,name=dnsConfig"`
// OnExit is a template reference which is invoked at the end of the
// workflow, irrespective of the success, failure, or error of the
// primary workflow.
// 在工作流结束时调用,无论是否执行成功或失败、错误,一般用于消息推送
OnExit string `json:"onExit,omitempty" protobuf:"bytes,17,opt,name=onExit"`
// TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it
// Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be
// deleted after the time to live expires. If this field is unset,
// the controller config map will hold the default values.
// 工作流生命周期
TTLStrategy *TTLStrategy `json:"ttlStrategy,omitempty" protobuf:"bytes,30,opt,name=ttlStrategy"`
// Optional duration in seconds relative to the workflow start time which the workflow is
// allowed to run before the controller terminates the workflow. A value of zero is used to
// terminate a Running workflow
// 允许工作流最长执行时间,默认没有时间限制
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,19,opt,name=activeDeadlineSeconds"`
// Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.
// 并行处理有限数量的工作流程中,优先级越高越先执行
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,20,opt,name=priority"`
// Set scheduler name for all pods.
// Will be overridden if container/script template's scheduler name is set.
// Default scheduler will be used if neither specified.
// +optional
// 使用 k8s 集群调度程序名称
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,21,opt,name=schedulerName"`
// PodGC describes the strategy to use when deleting completed pods
// 任务完成后 pod 是否删除策略
PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,22,opt,name=podGC"`
// PriorityClassName to apply to workflow pods.
// TODO;
PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,23,opt,name=podPriorityClassName"`
// Priority to apply to workflow pods.
// DEPRECATED: Use PodPriorityClassName instead.
PodPriority *int32 `json:"podPriority,omitempty" protobuf:"bytes,24,opt,name=podPriority"`
// +patchStrategy=merge
// +patchMergeKey=ip
// 见 [PodSpec](/docs/concepts/workloads/pods/#podspec)
HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,25,opt,name=hostAliases"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
// 容器安全上下文,见 [Pod Security](/docs/concepts/workloads/pods-security/)
SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,26,opt,name=securityContext"`
// PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of
// container fields which are not strings (e.g. resource limits).
PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,27,opt,name=podSpecPatch"`
// PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods.
// Controller will automatically add the selector with workflow name, if selector is empty.
// Optional: Defaults to empty.
// +optional
PodDisruptionBudget *policyv1.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"`
// Metrics are a list of metrics emitted from this Workflow
// 该工作流关联的指标
Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,32,opt,name=metrics"`
// Shutdown will shutdown the workflow according to its ShutdownStrategy
// TODO;关闭的策略
Shutdown ShutdownStrategy `json:"shutdown,omitempty" protobuf:"bytes,33,opt,name=shutdown,casttype=ShutdownStrategy"`
// WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution
// TODO;
WorkflowTemplateRef *WorkflowTemplateRef `json:"workflowTemplateRef,omitempty" protobuf:"bytes,34,opt,name=workflowTemplateRef"`
// Synchronization holds synchronization lock configuration for this Workflow
// 工作流的同步锁配置
Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,35,opt,name=synchronization,casttype=Synchronization"`
// VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows
// 工作流程删除卷策略
VolumeClaimGC *VolumeClaimGC `json:"volumeClaimGC,omitempty" protobuf:"bytes,36,opt,name=volumeClaimGC,casttype=VolumeClaimGC"`
// RetryStrategy for all templates in the workflow.
// 重试策略
RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,37,opt,name=retryStrategy"`
// PodMetadata defines additional metadata that should be applied to workflow pods
// 植入额外的 labels 与 annotations 至工作流关联 POD
PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,38,opt,name=podMetadata"`
// TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level
// 各项子任务的默认值
TemplateDefaults *Template `json:"templateDefaults,omitempty" protobuf:"bytes,39,opt,name=templateDefaults"`
// ArchiveLogs indicates if the container logs should be archived
// 容器运行日志是否被归档保存
ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,40,opt,name=archiveLogs"`
// Hooks holds the lifecycle hook which is invoked at lifecycle of
// step, irrespective of the success, failure, or error status of the primary step
// TODO;
Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,41,opt,name=hooks"`
// WorkflowMetadata contains some metadata of the workflow to refer to
// 关联工作流的元数据
WorkflowMetadata *WorkflowMetadata `json:"workflowMetadata,omitempty" protobuf:"bytes,42,opt,name=workflowMetadata"`
// ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts
// unless Artifact.ArtifactGC is specified, which overrides this)
// 删除工作流之后,附件的保留策略
ArtifactGC *ArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,43,opt,name=artifactGC"`
}
Template
// Template is a reusable and composable unit of execution in a workflow
type Template struct {
// Name is the name of the template
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Inputs describe what inputs parameters and artifacts are supplied to this template
Inputs Inputs `json:"inputs,omitempty" protobuf:"bytes,5,opt,name=inputs"`
// Outputs describe the parameters and artifacts that this template produces
Outputs Outputs `json:"outputs,omitempty" protobuf:"bytes,6,opt,name=outputs"`
// NodeSelector is a selector to schedule this step of the workflow to be
// run on the selected node(s). Overrides the selector set at the workflow level.
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,opt,name=nodeSelector"`
// Affinity sets the pod's scheduling constraints
// Overrides the affinity set at the workflow level (if any)
Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,8,opt,name=affinity"`
// Metdata sets the pods's metadata, i.e. annotations and labels
Metadata Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"`
// Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness
Daemon *bool `json:"daemon,omitempty" protobuf:"bytes,10,opt,name=daemon"`
// Steps define a series of sequential/parallel workflow steps
Steps []ParallelSteps `json:"steps,omitempty" protobuf:"bytes,11,opt,name=steps"`
// Container is the main container image to run in the pod
Container *apiv1.Container `json:"container,omitempty" protobuf:"bytes,12,opt,name=container"`
// ContainerSet groups multiple containers within a single pod.
ContainerSet *ContainerSetTemplate `json:"containerSet,omitempty" protobuf:"bytes,40,opt,name=containerSet"`
// Script runs a portion of code against an interpreter
Script *ScriptTemplate `json:"script,omitempty" protobuf:"bytes,13,opt,name=script"`
// Resource template subtype which can run k8s resources
Resource *ResourceTemplate `json:"resource,omitempty" protobuf:"bytes,14,opt,name=resource"`
// DAG template subtype which runs a DAG
DAG *DAGTemplate `json:"dag,omitempty" protobuf:"bytes,15,opt,name=dag"`
// Suspend template subtype which can suspend a workflow when reaching the step
Suspend *SuspendTemplate `json:"suspend,omitempty" protobuf:"bytes,16,opt,name=suspend"`
// Data is a data template
Data *Data `json:"data,omitempty" protobuf:"bytes,39,opt,name=data"`
// HTTP makes a HTTP request
HTTP *HTTP `json:"http,omitempty" protobuf:"bytes,42,opt,name=http"`
// Plugin is a plugin template
Plugin *Plugin `json:"plugin,omitempty" protobuf:"bytes,43,opt,name=plugin"`
// Volumes is a list of volumes that can be mounted by containers in a template.
// +patchStrategy=merge
// +patchMergeKey=name
Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,17,opt,name=volumes"`
// InitContainers is a list of containers which run before the main container.
// +patchStrategy=merge
// +patchMergeKey=name
InitContainers []UserContainer `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,18,opt,name=initContainers"`
// Sidecars is a list of containers which run alongside the main container
// Sidecars are automatically killed when the main container completes
// +patchStrategy=merge
// +patchMergeKey=name
Sidecars []UserContainer `json:"sidecars,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,19,opt,name=sidecars"`
// Location in which all files related to the step will be stored (logs, artifacts, etc...).
// Can be overridden by individual items in Outputs. If omitted, will use the default
// artifact repository location configured in the controller, appended with the
// <workflowname>/<nodename> in the key.
ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,20,opt,name=archiveLocation"`
// Optional duration in seconds relative to the StartTime that the pod may be active on a node
// before the system actively tries to terminate the pod; value must be positive integer
// This field is only applicable to container and script templates.
ActiveDeadlineSeconds *intstr.IntOrString `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,21,opt,name=activeDeadlineSeconds"`
// RetryStrategy describes how to retry a template when it fails
RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,22,opt,name=retryStrategy"`
// Parallelism limits the max total parallel pods that can execute at the same time within the
// boundaries of this template invocation. If additional steps/dag templates are invoked, the
// pods created by those templates will not be counted towards this total.
Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,23,opt,name=parallelism"`
// FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this
// template is expanded with `withItems`, etc.
FailFast *bool `json:"failFast,omitempty" protobuf:"varint,41,opt,name=failFast"`
// Tolerations to apply to workflow pods.
// +patchStrategy=merge
// +patchMergeKey=key
Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,24,opt,name=tolerations"`
// If specified, the pod will be dispatched by specified scheduler.
// Or it will be dispatched by workflow scope scheduler if specified.
// If neither specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,25,opt,name=schedulerName"`
// PriorityClassName to apply to workflow pods.
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,26,opt,name=priorityClassName"`
// Priority to apply to workflow pods.
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,27,opt,name=priority"`
// ServiceAccountName to apply to workflow pods
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,28,opt,name=serviceAccountName"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.
// ServiceAccountName of ExecutorConfig must be specified if this value is false.
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,32,opt,name=automountServiceAccountToken"`
// Executor holds configurations of the executor container.
Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,33,opt,name=executor"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod spec
// +patchStrategy=merge
// +patchMergeKey=ip
HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,29,opt,name=hostAliases"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,30,opt,name=securityContext"`
// PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of
// container fields which are not strings (e.g. resource limits).
PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,31,opt,name=podSpecPatch"`
// Metrics are a list of metrics emitted from this template
Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,35,opt,name=metrics"`
// Synchronization holds synchronization lock configuration for this template
Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,36,opt,name=synchronization,casttype=Synchronization"`
// Memoize allows templates to use outputs generated from already executed templates
Memoize *Memoize `json:"memoize,omitempty" protobuf:"bytes,37,opt,name=memoize"`
// Timeout allows to set the total node execution timeout duration counting from the node's start time.
// This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.
Timeout string `json:"timeout,omitempty" protobuf:"bytes,38,opt,name=timeout"`
}
Inputs
// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another
type Inputs struct {
// Parameters are a list of parameters passed as inputs
// +patchStrategy=merge
// +patchMergeKey=name
Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=parameters"`
// Artifact are a list of artifacts passed as inputs
// +patchStrategy=merge
// +patchMergeKey=name
Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,opt,name=artifacts"`
}
Outputs
// Outputs hold parameters, artifacts, and results from a step
type Outputs struct {
// Parameters holds the list of output parameters produced by a step
// +patchStrategy=merge
// +patchMergeKey=name
Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"`
// Artifacts holds the list of output artifacts produced by a step
// +patchStrategy=merge
// +patchMergeKey=name
Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"`
// Result holds the result (stdout) of a script template
Result *string `json:"result,omitempty" protobuf:"bytes,3,opt,name=result"`
// ExitCode holds the exit code of a script template
ExitCode *string `json:"exitCode,omitempty" protobuf:"bytes,4,opt,name=exitCode"`
}
Metadata
// Pod metdata
type Metadata struct {
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,opt,name=annotations"`
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,2,opt,name=labels"`
}
ParallelSteps
// +kubebuilder:validation:Type=array
type ParallelSteps struct {
Steps []WorkflowStep `json:"-" protobuf:"bytes,1,rep,name=steps"`
}
apiv1.Container
ContainerSetTemplate
type ContainerSetTemplate struct {
Containers []ContainerNode `json:"containers" protobuf:"bytes,4,rep,name=containers"`
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,3,rep,name=volumeMounts"`
// RetryStrategy describes how to retry a container nodes in the container set if it fails.
// Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.
RetryStrategy *ContainerSetRetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,5,opt,name=retryStrategy"`
}
ScriptTemplate
// ScriptTemplate is a template subtype to enable scripting through code steps
type ScriptTemplate struct {
apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"`
// Source contains the source code of the script to execute
Source string `json:"source" protobuf:"bytes,2,opt,name=source"`
}
ResourceTemplate
// ResourceTemplate is a template subtype to manipulate kubernetes resources
type ResourceTemplate struct {
// Action is the action to perform to the resource.
// Must be one of: get, create, apply, delete, replace, patch
Action string `json:"action" protobuf:"bytes,1,opt,name=action"`
// MergeStrategy is the strategy used to merge a patch. It defaults to "strategic"
// Must be one of: strategic, merge, json
MergeStrategy string `json:"mergeStrategy,omitempty" protobuf:"bytes,2,opt,name=mergeStrategy"`
// Manifest contains the kubernetes manifest
Manifest string `json:"manifest,omitempty" protobuf:"bytes,3,opt,name=manifest"`
// ManifestFrom is the source for a single kubernetes manifest
ManifestFrom *ManifestFrom `json:"manifestFrom,omitempty" protobuf:"bytes,8,opt,name=manifestFrom"`
// SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.
SetOwnerReference bool `json:"setOwnerReference,omitempty" protobuf:"varint,4,opt,name=setOwnerReference"`
// SuccessCondition is a label selector expression which describes the conditions
// of the k8s resource in which it is acceptable to proceed to the following step
SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,5,opt,name=successCondition"`
// FailureCondition is a label selector expression which describes the conditions
// of the k8s resource in which the step was considered failed
FailureCondition string `json:"failureCondition,omitempty" protobuf:"bytes,6,opt,name=failureCondition"`
// Flags is a set of additional options passed to kubectl before submitting a resource
// I.e. to disable resource validation:
// flags: [
// "--validate=false" # disable resource validation
// ]
Flags []string `json:"flags,omitempty" protobuf:"varint,7,opt,name=flags"`
}
DAGTemplate
// DAGTemplate is a template subtype for directed acyclic graph templates
type DAGTemplate struct {
// Target are one or more names of targets to execute in a DAG
Target string `json:"target,omitempty" protobuf:"bytes,1,opt,name=target"`
// Tasks are a list of DAG tasks
// +patchStrategy=merge
// +patchMergeKey=name
Tasks []DAGTask `json:"tasks" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tasks"`
// This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps,
// as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed
// before failing the DAG itself.
// The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to
// completion (either success or failure), regardless of the failed outcomes of branches in the DAG.
// More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442
FailFast *bool `json:"failFast,omitempty" protobuf:"varint,3,opt,name=failFast"`
}
// DAGTask represents a node in the graph during DAG execution
type DAGTask struct {
// Name is the name of the target
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Name of template to execute
Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
// Inline is the template. Template must be empty if this is declared (and vice-versa).
Inline *Template `json:"inline,omitempty" protobuf:"bytes,14,opt,name=inline"`
// Arguments are the parameter and artifact arguments to the template
Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"`
// TemplateRef is the reference to the template resource to execute.
TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"`
// Dependencies are name of other targets which this depends on
Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,5,rep,name=dependencies"`
// WithItems expands a task into multiple parallel tasks from the items in the list
WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,6,rep,name=withItems"`
// WithParam expands a task into multiple parallel tasks from the value in the parameter,
// which is expected to be a JSON list.
WithParam string `json:"withParam,omitempty" protobuf:"bytes,7,opt,name=withParam"`
// WithSequence expands a task into a numeric sequence
WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,8,opt,name=withSequence"`
// When is an expression in which the task should conditionally execute
When string `json:"when,omitempty" protobuf:"bytes,9,opt,name=when"`
// ContinueOn makes argo to proceed with the following step even if this step fails.
// Errors and Failed states can be specified
ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,10,opt,name=continueOn"`
// OnExit is a template reference which is invoked at the end of the
// template, irrespective of the success, failure, or error of the
// primary template.
// DEPRECATED: Use Hooks[exit].Template instead.
OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"`
// Depends are name of other targets which this depends on
Depends string `json:"depends,omitempty" protobuf:"bytes,12,opt,name=depends"`
// Hooks hold the lifecycle hook which is invoked at lifecycle of
// task, irrespective of the success, failure, or error status of the primary task
Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,13,opt,name=hooks"`
}
SuspendTemplate
// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time
type SuspendTemplate struct {
// Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds.
// Could also be a Duration, e.g.: "2m", "6h", "1d"
Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"`
}
Data
// Data is a data template
type Data struct {
// Source sources external data into a data template
Source DataSource `json:"source" protobuf:"bytes,1,opt,name=source"`
// Transformation applies a set of transformations
Transformation Transformation `json:"transformation" protobuf:"bytes,2,rep,name=transformation"`
}
type Transformation []TransformationStep
type TransformationStep struct {
// Expression defines an expr expression to apply
Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"`
}
// DataSource sources external data into a data template
type DataSource struct {
// ArtifactPaths is a data transformation that collects a list of artifact paths
ArtifactPaths *ArtifactPaths `json:"artifactPaths,omitempty" protobuf:"bytes,1,opt,name=artifactPaths"`
}
// ArtifactPaths expands a step from a collection of artifacts
type ArtifactPaths struct {
// Artifact is the artifact location from which to source the artifacts, it can be a directory
Artifact `json:",inline" protobuf:"bytes,1,opt,name=artifact"`
}
HTTP
type HTTP struct {
// Method is HTTP methods for HTTP Request
Method string `json:"method,omitempty" protobuf:"bytes,1,opt,name=method"`
// URL of the HTTP Request
URL string `json:"url" protobuf:"bytes,2,opt,name=url"`
// Headers are an optional list of headers to send with HTTP requests
Headers HTTPHeaders `json:"headers,omitempty" protobuf:"bytes,3,rep,name=headers"`
// TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"bytes,4,opt,name=timeoutSeconds"`
// SuccessCondition is an expression if evaluated to true is considered successful
SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,6,opt,name=successCondition"`
// Body is content of the HTTP Request
Body string `json:"body,omitempty" protobuf:"bytes,5,opt,name=body"`
// BodyFrom is content of the HTTP Request as Bytes
BodyFrom *HTTPBodySource `json:"bodyFrom,omitempty" protobuf:"bytes,8,opt,name=bodyFrom"`
// InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty" protobuf:"bytes,7,opt,name=insecureSkipVerify"`
}
type HTTPHeader struct {
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
ValueFrom *HTTPHeaderSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
type HTTPHeaderSource struct {
SecretKeyRef *v1.SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,1,opt,name=secretKeyRef"`
}
type HTTPHeaders []HTTPHeader
// HTTPBodySource contains the source of the HTTP body.
type HTTPBodySource struct {
Bytes []byte `json:"bytes,omitempty" protobuf:"bytes,1,opt,name=bytes"`
}
Plugin
// Plugin is an Object with exactly one key
type Plugin struct {
Object `json:",inline" protobuf:"bytes,1,opt,name=object"`
}
apiv1.Volume
见 POD
UserContainer
// UserContainer is a container specified by a user.
type UserContainer struct {
apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"`
// MirrorVolumeMounts will mount the same volumes specified in the main container
// to the container (including artifacts), at the same mountPaths. This enables
// dind daemon to partially see the same filesystem as the main container in
// order to use features such as docker volume binding
MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty" protobuf:"varint,2,opt,name=mirrorVolumeMounts"`
}
ArtifactLocation
// ArtifactLocation describes a location for a single or multiple artifacts.
// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname).
// It is also used to describe the location of multiple artifacts such as the archive location
// of a single workflow step, which the executor will use as a default location to store its files.
type ArtifactLocation struct {
// ArchiveLogs indicates if the container logs should be archived
ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"`
// S3 contains S3 artifact location details
S3 *S3Artifact `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"`
// Git contains git artifact location details
Git *GitArtifact `json:"git,omitempty" protobuf:"bytes,3,opt,name=git"`
// HTTP contains HTTP artifact location details
HTTP *HTTPArtifact `json:"http,omitempty" protobuf:"bytes,4,opt,name=http"`
// Artifactory contains artifactory artifact location details
Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty" protobuf:"bytes,5,opt,name=artifactory"`
// HDFS contains HDFS artifact location details
HDFS *HDFSArtifact `json:"hdfs,omitempty" protobuf:"bytes,6,opt,name=hdfs"`
// Raw contains raw artifact location details
Raw *RawArtifact `json:"raw,omitempty" protobuf:"bytes,7,opt,name=raw"`
// OSS contains OSS artifact location details
OSS *OSSArtifact `json:"oss,omitempty" protobuf:"bytes,8,opt,name=oss"`
// GCS contains GCS artifact location details
GCS *GCSArtifact `json:"gcs,omitempty" protobuf:"bytes,9,opt,name=gcs"`
// Azure contains Azure Storage artifact location details
Azure *AzureArtifact `json:"azure,omitempty" protobuf:"bytes,10,opt,name=azure"`
}
RetryStrategy
// RetryStrategy provides controls on how to retry a workflow step
type RetryStrategy struct {
// Limit is the maximum number of retry attempts when retrying a container. It does not include the original
// container; the maximum number of total attempts will be `limit + 1`.
Limit *intstr.IntOrString `json:"limit,omitempty" protobuf:"varint,1,opt,name=limit"`
// RetryPolicy is a policy of NodePhase statuses that will be retried
RetryPolicy RetryPolicy `json:"retryPolicy,omitempty" protobuf:"bytes,2,opt,name=retryPolicy,casttype=RetryPolicy"`
// Backoff is a backoff strategy
Backoff *Backoff `json:"backoff,omitempty" protobuf:"bytes,3,opt,name=backoff,casttype=Backoff"`
// Affinity prevents running workflow's step on the same host
Affinity *RetryAffinity `json:"affinity,omitempty" protobuf:"bytes,4,opt,name=affinity"`
// Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not
// be retried and the retry strategy will be ignored
Expression string `json:"expression,omitempty" protobuf:"bytes,5,opt,name=expression"`
}
apiv1.Toleration
见 TODO;
ExecutorConfig
// ExecutorConfig holds configurations of an executor container.
type ExecutorConfig struct {
// ServiceAccountName specifies the service account name of the executor container.
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,1,opt,name=serviceAccountName"`
}
apiv1.HostAlias
见 TODO;
apiv1.PodSecurityContext
Metrics
const (
MetricTypeGauge MetricType = "Gauge"
MetricTypeHistogram MetricType = "Histogram"
MetricTypeCounter MetricType = "Counter"
MetricTypeUnknown MetricType = "Unknown"
)
// Metrics are a list of metrics emitted from a Workflow/Template
type Metrics struct {
// Prometheus is a list of prometheus metrics to be emitted
Prometheus []*Prometheus `json:"prometheus" protobuf:"bytes,1,rep,name=prometheus"`
}
// Prometheus is a prometheus metric to be emitted
type Prometheus struct {
// Name is the name of the metric
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Labels is a list of metric labels
Labels []*MetricLabel `json:"labels,omitempty" protobuf:"bytes,2,rep,name=labels"`
// Help is a string that describes the metric
Help string `json:"help" protobuf:"bytes,3,opt,name=help"`
// When is a conditional statement that decides when to emit the metric
When string `json:"when,omitempty" protobuf:"bytes,4,opt,name=when"`
// Gauge is a gauge metric
Gauge *Gauge `json:"gauge,omitempty" protobuf:"bytes,5,opt,name=gauge"`
// Histogram is a histogram metric
Histogram *Histogram `json:"histogram,omitempty" protobuf:"bytes,6,opt,name=histogram"`
// Counter is a counter metric
Counter *Counter `json:"counter,omitempty" protobuf:"bytes,7,opt,name=counter"`
}
Synchronization
// SynchronizationStatus stores the status of semaphore and mutex.
type SynchronizationStatus struct {
// Semaphore stores this workflow's Semaphore holder details
Semaphore *SemaphoreStatus `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"`
// Mutex stores this workflow's mutex holder details
Mutex *MutexStatus `json:"mutex,omitempty" protobuf:"bytes,2,opt,name=mutex"`
}
Memoize
// Memoization enables caching for the Outputs of the template
type Memoize struct {
// Key is the key to use as the caching key
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Cache sets and configures the kind of cache
Cache *Cache `json:"cache" protobuf:"bytes,2,opt,name=cache"`
// MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older
// than the MaxAge, it will be ignored.
MaxAge string `json:"maxAge" protobuf:"bytes,3,opt,name=maxAge"`
}
// Cache is the configuration for the type of cache to be used
type Cache struct {
// ConfigMap sets a ConfigMap-based cache
ConfigMap *apiv1.ConfigMapKeySelector `json:"configMap" protobuf:"bytes,1,opt,name=configMap"`
}
Parameter
// Parameter indicate a passed string parameter to a service template with an optional default value
type Parameter struct {
// Name is the parameter name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Default is the default value to use for an input parameter if a value was not supplied
Default *AnyString `json:"default,omitempty" protobuf:"bytes,2,opt,name=default"`
// Value is the literal value to use for the parameter.
// If specified in the context of an input parameter, the value takes precedence over any passed values
Value *AnyString `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// ValueFrom is the source for the output parameter's value
ValueFrom *ValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,4,opt,name=valueFrom"`
// GlobalName exports an output parameter to the global scope, making it available as
// '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters
GlobalName string `json:"globalName,omitempty" protobuf:"bytes,5,opt,name=globalName"`
// Enum holds a list of string values to choose from, for the actual value of the parameter
Enum []AnyString `json:"enum,omitempty" protobuf:"bytes,6,rep,name=enum"`
// Description is the parameter description
Description *AnyString `json:"description,omitempty" protobuf:"bytes,7,opt,name=description"`
}
Artifacts
type Artifacts []Artifact
// Artifact indicates an artifact to place at a specified path
type Artifact struct {
// name of the artifact. must be unique within a template's inputs/outputs.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Path is the container path to the artifact
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// mode bits to use on this file, must be a value between 0 and 0777
// set when loading input artifacts.
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
// From allows an artifact to reference an artifact from a previous step
From string `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"`
// ArtifactLocation contains the location of the artifact
ArtifactLocation `json:",inline" protobuf:"bytes,5,opt,name=artifactLocation"`
// GlobalName exports an output artifact to the global scope, making it available as
// '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts
GlobalName string `json:"globalName,omitempty" protobuf:"bytes,6,opt,name=globalName"`
// Archive controls how the artifact will be saved to the artifact repository.
Archive *ArchiveStrategy `json:"archive,omitempty" protobuf:"bytes,7,opt,name=archive"`
// Make Artifacts optional, if Artifacts doesn't generate or exist
Optional bool `json:"optional,omitempty" protobuf:"varint,8,opt,name=optional"`
// SubPath allows an artifact to be sourced from a subpath within the specified source
SubPath string `json:"subPath,omitempty" protobuf:"bytes,9,opt,name=subPath"`
// If mode is set, apply the permission recursively into the artifact if it is a folder
RecurseMode bool `json:"recurseMode,omitempty" protobuf:"varint,10,opt,name=recurseMode"`
// FromExpression, if defined, is evaluated to specify the value for the artifact
FromExpression string `json:"fromExpression,omitempty" protobuf:"bytes,11,opt,name=fromExpression"`
// ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows
ArtifactGC *ArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,12,opt,name=artifactGC"`
// Has this been deleted?
Deleted bool `json:"deleted,omitempty" protobuf:"varint,13,opt,name=deleted"`
}
WorkflowStep
// WorkflowStep is a reference to a template to execute in a series of step
type WorkflowStep struct {
// Name of the step
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Template is the name of the template to execute as the step
Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
// Inline is the template. Template must be empty if this is declared (and vice-versa).
Inline *Template `json:"inline,omitempty" protobuf:"bytes,13,opt,name=inline"`
// Arguments hold arguments to the template
Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"`
// TemplateRef is the reference to the template resource to execute as the step.
TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"`
// WithItems expands a step into multiple parallel steps from the items in the list
WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,5,rep,name=withItems"`
// WithParam expands a step into multiple parallel steps from the value in the parameter,
// which is expected to be a JSON list.
WithParam string `json:"withParam,omitempty" protobuf:"bytes,6,opt,name=withParam"`
// WithSequence expands a step into a numeric sequence
WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,7,opt,name=withSequence"`
// When is an expression in which the step should conditionally execute
When string `json:"when,omitempty" protobuf:"bytes,8,opt,name=when"`
// ContinueOn makes argo to proceed with the following step even if this step fails.
// Errors and Failed states can be specified
ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,9,opt,name=continueOn"`
// OnExit is a template reference which is invoked at the end of the
// template, irrespective of the success, failure, or error of the
// primary template.
// DEPRECATED: Use Hooks[exit].Template instead.
OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"`
// Hooks holds the lifecycle hook which is invoked at lifecycle of
// step, irrespective of the success, failure, or error status of the primary step
Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,12,opt,name=hooks"`
}
S3Artifact
// S3Artifact is the location of an S3 artifact
type S3Artifact struct {
S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"`
// Key is the key in the bucket where the artifact resides
Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"`
}
// S3Bucket contains the access information required for interfacing with an S3 bucket
type S3Bucket struct {
// Endpoint is the hostname of the bucket endpoint
Endpoint string `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"`
// Bucket is the name of the bucket
Bucket string `json:"bucket,omitempty" protobuf:"bytes,2,opt,name=bucket"`
// Region contains the optional bucket region
Region string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"`
// Insecure will connect to the service with TLS
Insecure *bool `json:"insecure,omitempty" protobuf:"varint,4,opt,name=insecure"`
// AccessKeySecret is the secret selector to the bucket's access key
AccessKeySecret *apiv1.SecretKeySelector `json:"accessKeySecret,omitempty" protobuf:"bytes,5,opt,name=accessKeySecret"`
// SecretKeySecret is the secret selector to the bucket's secret key
SecretKeySecret *apiv1.SecretKeySelector `json:"secretKeySecret,omitempty" protobuf:"bytes,6,opt,name=secretKeySecret"`
// RoleARN is the Amazon Resource Name (ARN) of the role to assume.
RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,7,opt,name=roleARN"`
// UseSDKCreds tells the driver to figure out credentials based on sdk defaults.
UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"`
// CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.
CreateBucketIfNotPresent *CreateS3BucketOptions `json:"createBucketIfNotPresent,omitempty" protobuf:"bytes,9,opt,name=createBucketIfNotPresent"`
EncryptionOptions *S3EncryptionOptions `json:"encryptionOptions,omitempty" protobuf:"bytes,10,opt,name=encryptionOptions"`
}
GitArtifact
// GitArtifact is the location of an git artifact
type GitArtifact struct {
// Repo is the git repository
Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"`
// Revision is the git commit, tag, branch to checkout
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Depth specifies clones/fetches should be shallow and include the given
// number of commits from the branch tip
Depth *uint64 `json:"depth,omitempty" protobuf:"bytes,3,opt,name=depth"`
// Fetch specifies a number of refs that should be fetched before checkout
Fetch []string `json:"fetch,omitempty" protobuf:"bytes,4,rep,name=fetch"`
// UsernameSecret is the secret selector to the repository username
UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,5,opt,name=usernameSecret"`
// PasswordSecret is the secret selector to the repository password
PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,6,opt,name=passwordSecret"`
// SSHPrivateKeySecret is the secret selector to the repository ssh private key
SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty" protobuf:"bytes,7,opt,name=sshPrivateKeySecret"`
// InsecureIgnoreHostKey disables SSH strict host key checking during git clone
InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"varint,8,opt,name=insecureIgnoreHostKey"`
// DisableSubmodules disables submodules during git clone
DisableSubmodules bool `json:"disableSubmodules,omitempty" protobuf:"varint,9,opt,name=disableSubmodules"`
// SingleBranch enables single branch clone, using the `branch` parameter
SingleBranch bool `json:"singleBranch,omitempty" protobuf:"varint,10,opt,name=singleBranch"`
// Branch is the branch to fetch when `SingleBranch` is enabled
Branch string `json:"branch,omitempty" protobuf:"bytes,11,opt,name=branch"`
}
HTTPArtifact
// HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container
type HTTPArtifact struct {
// URL of the artifact
URL string `json:"url" protobuf:"bytes,1,opt,name=url"`
// Headers are an optional list of headers to send with HTTP requests for artifacts
Headers []Header `json:"headers,omitempty" protobuf:"bytes,2,rep,name=headers"`
// Auth contains information for client authentication
Auth *HTTPAuth `json:"auth,omitempty" protobuf:"bytes,3,opt,name=auth"`
}
Arguments
// Arguments to a template
type Arguments struct {
// Parameters is the list of parameters to pass to the template or workflow
// +patchStrategy=merge
// +patchMergeKey=name
Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"`
// Artifacts is the list of artifacts to pass to the template or workflow
// +patchStrategy=merge
// +patchMergeKey=name
Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"`
}
DAGTask
// DAGTask represents a node in the graph during DAG execution
type DAGTask struct {
// Name is the name of the target
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Name of template to execute
Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
// Inline is the template. Template must be empty if this is declared (and vice-versa).
Inline *Template `json:"inline,omitempty" protobuf:"bytes,14,opt,name=inline"`
// Arguments are the parameter and artifact arguments to the template
Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"`
// TemplateRef is the reference to the template resource to execute.
TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"`
// Dependencies are name of other targets which this depends on
Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,5,rep,name=dependencies"`
// WithItems expands a task into multiple parallel tasks from the items in the list
WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,6,rep,name=withItems"`
// WithParam expands a task into multiple parallel tasks from the value in the parameter,
// which is expected to be a JSON list.
WithParam string `json:"withParam,omitempty" protobuf:"bytes,7,opt,name=withParam"`
// WithSequence expands a task into a numeric sequence
WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,8,opt,name=withSequence"`
// When is an expression in which the task should conditionally execute
When string `json:"when,omitempty" protobuf:"bytes,9,opt,name=when"`
// ContinueOn makes argo to proceed with the following step even if this step fails.
// Errors and Failed states can be specified
ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,10,opt,name=continueOn"`
// OnExit is a template reference which is invoked at the end of the
// template, irrespective of the success, failure, or error of the
// primary template.
// DEPRECATED: Use Hooks[exit].Template instead.
OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"`
// Depends are name of other targets which this depends on
Depends string `json:"depends,omitempty" protobuf:"bytes,12,opt,name=depends"`
// Hooks hold the lifecycle hook which is invoked at lifecycle of
// task, irrespective of the success, failure, or error status of the primary task
Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,13,opt,name=hooks"`
}
ContainerNode
type ContainerNode struct {
corev1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"`
Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,2,rep,name=dependencies"`
}
ArchiveStrategy
// ArchiveStrategy describes how to archive files/directory when saving artifacts
type ArchiveStrategy struct {
Tar *TarStrategy `json:"tar,omitempty" protobuf:"bytes,1,opt,name=tar"`
None *NoneStrategy `json:"none,omitempty" protobuf:"bytes,2,opt,name=none"`
Zip *ZipStrategy `json:"zip,omitempty" protobuf:"bytes,3,opt,name=zip"`
}
// TarStrategy will tar and gzip the file or directory when saving
type TarStrategy struct {
// CompressionLevel specifies the gzip compression level to use for the artifact.
// Defaults to gzip.DefaultCompression.
CompressionLevel *int32 `json:"compressionLevel,omitempty" protobuf:"varint,1,opt,name=compressionLevel"`
}
// ZipStrategy will unzip zipped input artifacts
type ZipStrategy struct{}
// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent
// files. Note that if the artifact is a directory, the artifact driver must support the ability to
// save/load the directory appropriately.
type NoneStrategy struct{}
ValueFrom
// ValueFrom describes a location in which to obtain the value to a parameter
type ValueFrom struct {
// Path in the container to retrieve an output parameter value from in container templates
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// JSONPath of a resource to retrieve an output parameter value from in resource templates
JSONPath string `json:"jsonPath,omitempty" protobuf:"bytes,2,opt,name=jsonPath"`
// JQFilter expression against the resource object in resource templates
JQFilter string `json:"jqFilter,omitempty" protobuf:"bytes,3,opt,name=jqFilter"`
// Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`
Event string `json:"event,omitempty" protobuf:"bytes,7,opt,name=event"`
// Parameter reference to a step or dag task in which to retrieve an output parameter value from
// (e.g. '{{steps.mystep.outputs.myparam}}')
Parameter string `json:"parameter,omitempty" protobuf:"bytes,4,opt,name=parameter"`
// Supplied value to be filled in directly, either through the CLI, API, etc.
Supplied *SuppliedValueFrom `json:"supplied,omitempty" protobuf:"bytes,6,opt,name=supplied"`
// ConfigMapKeyRef is configmap selector for input parameter configuration
ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,9,opt,name=configMapKeyRef"`
// Default specifies a value to be used if retrieving the value from the specified source fails
Default *AnyString `json:"default,omitempty" protobuf:"bytes,5,opt,name=default"`
// Expression, if defined, is evaluated to specify the value for the parameter
Expression string `json:"expression,omitempty" protobuf:"bytes,8,rep,name=expression"`
}
WorkflowTemplateRef
// WorkflowTemplateRef is a reference to a WorkflowTemplate resource.
type WorkflowTemplateRef struct {
// Name is the resource name of the workflow template.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).
ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,2,opt,name=clusterScope"`
}
PodGC
// PodGC describes how to delete completed pods as they complete
type PodGC struct {
// Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods
Strategy PodGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=PodGCStrategy"`
// LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,2,opt,name=labelSelector"`
// DeleteDelayDuration specifies the duration before pods in the GC queue get deleted.
DeleteDelayDuration *metav1.Duration `json:"deleteDelayDuration,omitempty" protobuf:"bytes,3,opt,name=deleteDelayDuration"`
}
PodGCStrategy
// PodGCStrategy is the strategy when to delete completed pods for GC.
type PodGCStrategy string
// PodGCStrategy
const (
PodGCOnPodNone PodGCStrategy = ""
PodGCOnPodCompletion PodGCStrategy = "OnPodCompletion"
PodGCOnPodSuccess PodGCStrategy = "OnPodSuccess"
PodGCOnWorkflowCompletion PodGCStrategy = "OnWorkflowCompletion"
PodGCOnWorkflowSuccess PodGCStrategy = "OnWorkflowSuccess"
)
TTLStrategy
// TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed
type TTLStrategy struct {
// SecondsAfterCompletion is the number of seconds to live after completion
SecondsAfterCompletion *int32 `json:"secondsAfterCompletion,omitempty" protobuf:"bytes,1,opt,name=secondsAfterCompletion"`
// SecondsAfterSuccess is the number of seconds to live after success
SecondsAfterSuccess *int32 `json:"secondsAfterSuccess,omitempty" protobuf:"bytes,2,opt,name=secondsAfterSuccess"`
// SecondsAfterFailure is the number of seconds to live after failure
SecondsAfterFailure *int32 `json:"secondsAfterFailure,omitempty" protobuf:"bytes,3,opt,name=secondsAfterFailure"`
}
ArtifactGC
// ArtifactGCStrategy is the strategy when to delete artifacts for GC.
type ArtifactGCStrategy string
// ArtifactGCStrategy
const (
ArtifactGCOnWorkflowCompletion ArtifactGCStrategy = "OnWorkflowCompletion"
ArtifactGCOnWorkflowDeletion ArtifactGCStrategy = "OnWorkflowDeletion"
ArtifactGCNever ArtifactGCStrategy = "Never"
ArtifactGCStrategyUndefined ArtifactGCStrategy = ""
)
// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed
type ArtifactGC struct {
// Strategy is the strategy to use.
// +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never
Strategy ArtifactGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=ArtifactGCStategy"`
// PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion
PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,2,opt,name=podMetadata"`
// ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,3,opt,name=serviceAccountName"`
}
最后修改 2024.11.19: feat: add argo sso rbac (adef0d8)