From e23763d386a5720118ca5d3fe1f64be0b45e3b66 Mon Sep 17 00:00:00 2001 From: Zettat123 Date: Wed, 6 May 2026 23:25:36 -0600 Subject: [PATCH] reusable workflow --- models/actions/run.go | 91 ---- models/actions/run_job.go | 352 +++++++++++++- models/actions/run_test.go | 101 ++++ models/actions/task.go | 4 +- models/migrations/migrations.go | 1 + models/migrations/v1_27/v332.go | 31 ++ models/perm/access/repo_permission.go | 17 + models/secret/main_test.go | 14 + models/secret/secret.go | 64 ++- models/secret/secret_test.go | 181 ++++++++ modules/actions/jobparser/model.go | 19 + modules/actions/jobparser/uses.go | 75 +++ modules/actions/jobparser/uses_test.go | 167 +++++++ modules/actions/jobparser/workflow_call.go | 405 ++++++++++++++++ .../actions/jobparser/workflow_call_test.go | 435 ++++++++++++++++++ modules/structs/hook.go | 14 + routers/web/repo/actions/view.go | 9 + services/actions/concurrency.go | 36 +- services/actions/context.go | 134 +++++- services/actions/context_test.go | 188 ++++++++ services/actions/helper.go | 92 ++++ services/actions/job_emitter.go | 84 +++- services/actions/job_emitter_test.go | 46 +- services/actions/rerun.go | 210 +++++++-- services/actions/rerun_test.go | 242 ++++++++++ services/actions/reusable_workflow.go | 314 +++++++++++++ services/actions/run.go | 32 +- web_src/js/components/ActionRunJobView.vue | 79 +++- .../js/components/ActionRunSummaryView.vue | 9 +- web_src/js/components/ActionRunView.ts | 22 + web_src/js/components/RepoActionView.vue | 107 ++++- web_src/js/components/WorkflowGraph.vue | 122 ++--- web_src/js/modules/gitea-actions.ts | 5 + 33 files changed, 3416 insertions(+), 286 deletions(-) create mode 100644 models/migrations/v1_27/v332.go create mode 100644 models/secret/main_test.go create mode 100644 models/secret/secret_test.go create mode 100644 modules/actions/jobparser/uses.go create mode 100644 modules/actions/jobparser/uses_test.go create mode 100644 modules/actions/jobparser/workflow_call.go create mode 100644 modules/actions/jobparser/workflow_call_test.go create mode 100644 services/actions/helper.go create mode 100644 services/actions/reusable_workflow.go diff --git a/models/actions/run.go b/models/actions/run.go index a44b0ff343e89..f02d55b1518f0 100644 --- a/models/actions/run.go +++ b/models/actions/run.go @@ -16,7 +16,6 @@ import ( user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/json" - "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/timeutil" @@ -250,96 +249,6 @@ func UpdateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) err return err } -// CancelPreviousJobs cancels all previous jobs of the same repository, reference, workflow, and event. -// It's useful when a new run is triggered, and all previous runs needn't be continued anymore. -func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID string, event webhook_module.HookEventType) ([]*ActionRunJob, error) { - // Find all runs in the specified repository, reference, and workflow with non-final status - runs, total, err := db.FindAndCount[ActionRun](ctx, FindRunOptions{ - RepoID: repoID, - Ref: ref, - WorkflowID: workflowID, - TriggerEvent: event, - Status: []Status{StatusRunning, StatusWaiting, StatusBlocked}, - }) - if err != nil { - return nil, err - } - - // If there are no runs found, there's no need to proceed with cancellation, so return nil. - if total == 0 { - return nil, nil - } - - cancelledJobs := make([]*ActionRunJob, 0, total) - - // Iterate over each found run and cancel its associated jobs. - for _, run := range runs { - // Find all jobs associated with the current run. - jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{ - RunID: run.ID, - }) - if err != nil { - return cancelledJobs, err - } - - cjs, err := CancelJobs(ctx, jobs) - if err != nil { - return cancelledJobs, err - } - cancelledJobs = append(cancelledJobs, cjs...) - } - - // Return nil to indicate successful cancellation of all running and waiting jobs. - return cancelledJobs, nil -} - -func CancelJobs(ctx context.Context, jobs []*ActionRunJob) ([]*ActionRunJob, error) { - cancelledJobs := make([]*ActionRunJob, 0, len(jobs)) - // Iterate over each job and attempt to cancel it. - for _, job := range jobs { - // Skip jobs that are already in a terminal state (completed, cancelled, etc.). - status := job.Status - if status.IsDone() { - continue - } - - // If the job has no associated task (probably an error), set its status to 'Cancelled' and stop it. - if job.TaskID == 0 { - job.Status = StatusCancelled - job.Stopped = timeutil.TimeStampNow() - - // Update the job's status and stopped time in the database. - n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped") - if err != nil { - return cancelledJobs, err - } - - // If the update affected 0 rows, it means the job has changed in the meantime - if n == 0 { - log.Error("Failed to cancel job %d because it has changed", job.ID) - continue - } - - cancelledJobs = append(cancelledJobs, job) - // Continue with the next job. - continue - } - - // If the job has an associated task, try to stop the task, effectively cancelling the job. - if err := StopTask(ctx, job.TaskID, StatusCancelled); err != nil { - return cancelledJobs, err - } - updatedJob, err := GetRunJobByRunAndID(ctx, job.RunID, job.ID) - if err != nil { - return cancelledJobs, fmt.Errorf("get job: %w", err) - } - cancelledJobs = append(cancelledJobs, updatedJob) - } - - // Return nil to indicate successful cancellation of all running and waiting jobs. - return cancelledJobs, nil -} - func GetRunByRepoAndID(ctx context.Context, repoID, runID int64) (*ActionRun, error) { var run ActionRun has, err := db.GetEngine(ctx).Where("id=? AND repo_id=?", runID, repoID).Get(&run) diff --git a/models/actions/run_job.go b/models/actions/run_job.go index f0d41ef4b4703..73c5368d60d41 100644 --- a/models/actions/run_job.go +++ b/models/actions/run_job.go @@ -12,8 +12,10 @@ import ( "code.gitea.io/gitea/models/db" repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" + webhook_module "code.gitea.io/gitea/modules/webhook" "xorm.io/builder" ) @@ -75,14 +77,52 @@ type ActionRunJob struct { // A value of 0 indicates a legacy job created before ActionRunAttempt existed. AttemptJobID int64 `xorm:"index NOT NULL DEFAULT 0"` + // IsReusableCaller marks this job as a reusable workflow caller. + // Caller jobs do not run on a runner; their status is derived from their child jobs. + IsReusableCaller bool `xorm:"index NOT NULL DEFAULT FALSE"` + // IsCallerExpanded reports whether expandReusableWorkflowCaller has finished expanding this caller. + // When true, the caller's children rows exist and CallPayload holds the resolved WorkflowCallPayload. + // Only meaningful when IsReusableCaller is true. + IsCallerExpanded bool `xorm:"NOT NULL DEFAULT FALSE"` + // CallUses stores the raw "uses:" string of a reusable workflow caller job. + // Only set when IsReusableCaller is true. + CallUses string `xorm:"VARCHAR(512) NOT NULL DEFAULT ''"` + // ReusableWorkflowContent is the content of the reusable workflow specified by "uses:". + // Only set when IsReusableCaller is true. + ReusableWorkflowContent []byte `xorm:"LONGBLOB"` + // CallSecrets encodes the reusable workflow caller's "secrets:" section: + // - "" : no "secrets:" section (children only see auto-generated tokens). + // - "inherit" : the caller wrote "secrets: inherit". + // - JSON object : explicit mapping {alias: source_name}; names only, no values. + // Only set when IsReusableCaller is true. + CallSecrets string `xorm:"LONGTEXT"` + // CallPayload is the JSON-encoded WorkflowCallPayload exposed to children as gitea.event. + // Populated atomically with IsCallerExpanded at the end of expandReusableWorkflowCaller. + // Only set when IsReusableCaller is true. + CallPayload string `xorm:"LONGTEXT 'call_payload'"` + + // ParentCallJobID is the ID of the direct reusable workflow caller job, or 0 for top-level jobs. + ParentCallJobID int64 `xorm:"index NOT NULL DEFAULT 0"` + Started timeutil.TimeStamp Stopped timeutil.TimeStamp Created timeutil.TimeStamp `xorm:"created"` Updated timeutil.TimeStamp `xorm:"updated index"` } +// ActionRunAttemptJobIDIndex backs the run-wide AttemptJobID counter, keyed by ActionRun.ID. +// Use GetNextAttemptJobID to allocate the next ID for a run. +type ActionRunAttemptJobIDIndex db.ResourceIndex + +// GetNextAttemptJobID atomically allocates the next AttemptJobID fo a job in the given run. +// AttemptJobIDs are unique within a single attempt and stable across attempts for the same logical job +func GetNextAttemptJobID(ctx context.Context, runID int64) (int64, error) { + return db.GetNextResourceIndex(ctx, "action_run_attempt_job_id_index", runID) +} + func init() { db.RegisterModel(new(ActionRunJob)) + db.RegisterModel(new(ActionRunAttemptJobIDIndex)) } func (job *ActionRunJob) Duration() time.Duration { @@ -218,6 +258,105 @@ func GetRunJobsByRunAndAttemptID(ctx context.Context, runID, runAttemptID int64) return jobs, nil } +// GetReusableCallerPriorAttemptChildren returns the direct children of the caller identified by callerAttemptJobID +// in the attempt immediately preceding currentAttempt, indexed first by child JobID and then by child Name. +// Used by the reusable workflow caller expansion to preserve AttemptJobID across attempts: +// when expansion produces a child whose (JobID, Name) matches a template child's, the new row reuses the template's AttemptJobID. +// +// The two-level structure is required because matrix expansion produces multiple ActionRunJob rows that share +// the same JobID (e.g., 3 rows all with JobID="work") but distinct Names ("work (alpha)", "work (beta)", ...); +// a flat map keyed by JobID would collapse those rows onto a single entry and yield duplicate AttemptJobIDs +// when the new attempt re-expands the same matrix. +// +// Returns (nil, nil) when: +// - currentAttempt <= 1 (this is the first attempt; no prior to match against) +// - the prior caller row doesn't exist (e.g., this caller is brand-new in the current attempt) +func GetReusableCallerPriorAttemptChildren(ctx context.Context, runID, currentAttempt, callerAttemptJobID int64) (map[string]map[string]*ActionRunJob, error) { + if currentAttempt <= 1 { + return nil, nil //nolint:nilnil // signals "no prior attempt to match against" + } + + var priorAttempt ActionRunAttempt + has, err := db.GetEngine(ctx). + Where("run_id = ? AND attempt = ?", runID, currentAttempt-1). + Get(&priorAttempt) + if err != nil { + return nil, fmt.Errorf("find prior attempt: %w", err) + } + if !has { + // Should not happen. Reaching here means the DB is in an inconsistent state. + // Attempts are generated sequentially so currentAttempt-1 must exist when currentAttempt > 1. + return nil, fmt.Errorf("prior attempt %d for run %d not found", currentAttempt-1, runID) + } + + var priorCaller ActionRunJob + has, err = db.GetEngine(ctx). + Where("run_id = ? AND run_attempt_id = ? AND attempt_job_id = ?", runID, priorAttempt.ID, callerAttemptJobID). + Get(&priorCaller) + if err != nil { + return nil, fmt.Errorf("find prior caller: %w", err) + } + if !has { + return nil, nil //nolint:nilnil // caller is brand new in this attempt; no template to match + } + + var children []*ActionRunJob + if err := db.GetEngine(ctx). + Where("run_id = ? AND parent_call_job_id = ?", runID, priorCaller.ID). + Find(&children); err != nil { + return nil, fmt.Errorf("find prior children: %w", err) + } + + out := make(map[string]map[string]*ActionRunJob) + for _, c := range children { + if out[c.JobID] == nil { + out[c.JobID] = make(map[string]*ActionRunJob) + } + out[c.JobID][c.Name] = c + } + return out, nil +} + +// GetReusableCallerDirectChildJobs returns the direct child jobs of a reusable workflow caller job. +func GetReusableCallerDirectChildJobs(ctx context.Context, callerJob *ActionRunJob) (ActionJobList, error) { + var jobs []*ActionRunJob + if err := db.GetEngine(ctx). + Where("run_id=? AND parent_call_job_id=?", callerJob.RunID, callerJob.ID). + OrderBy("id"). + Find(&jobs); err != nil { + return nil, err + } + return jobs, nil +} + +// CollectReusableCallerAllChildJobs returns every job in `allJobs` that lives under caller's subtree (recursively), excluding `caller` itself +func CollectReusableCallerAllChildJobs(caller *ActionRunJob, allJobs []*ActionRunJob) []*ActionRunJob { + parents := map[int64]bool{caller.ID: true} + for { + grew := false + for _, j := range allJobs { + if j.ParentCallJobID == 0 { + continue + } + if parents[j.ParentCallJobID] && !parents[j.ID] { + parents[j.ID] = true + grew = true + } + } + if !grew { + break + } + } + out := make([]*ActionRunJob, 0) + for _, j := range allJobs { + if j.ID == caller.ID || !parents[j.ID] { + continue + } + out = append(out, j) + } + return out +} + func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, cols ...string) (int64, error) { e := db.GetEngine(ctx) @@ -235,11 +374,13 @@ func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, col return 0, err } - if affected == 0 || (!slices.Contains(cols, "status") && job.Status == 0) { + statusChanged := slices.Contains(cols, "status") || (len(cols) == 0 && job.Status != 0) + + if affected == 0 || (!statusChanged && job.Status == 0) { return affected, nil } - if slices.Contains(cols, "status") && job.Status.IsWaiting() { + if statusChanged && job.Status.IsWaiting() { // if the status of job changes to waiting again, increase tasks version. if err := IncreaseTaskVersion(ctx, job.OwnerID, job.RepoID); err != nil { return 0, err @@ -253,6 +394,11 @@ func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, col } } + // Reusable workflow caller's children cascade their status changes upward to the parent caller + if statusChanged && job.ParentCallJobID > 0 { + return affected, cascadeCallerStatus(ctx, job) + } + { // Other goroutines may aggregate the status of the attempt/run and update it too. // So we need to load the current jobs before updating the aggregate state. @@ -305,6 +451,83 @@ func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, col return affected, nil } +// cascadeCallerStatus re-derives the parent reusable workflow caller's Status from its children +func cascadeCallerStatus(ctx context.Context, child *ActionRunJob) error { + parent, err := GetRunJobByRunAndID(ctx, child.RunID, child.ParentCallJobID) + if err != nil { + return fmt.Errorf("load parent caller %d: %w", child.ParentCallJobID, err) + } + if !parent.IsReusableCaller { + return nil + } + children, err := GetReusableCallerDirectChildJobs(ctx, parent) + if err != nil { + return err + } + newStatus := aggregateReusableCallerStatus(children) + cols := make([]string, 0, 3) + if parent.Status != newStatus { + parent.Status = newStatus + cols = append(cols, "status") + } + // Skipped subtrees never executed - leave Started/Stopped untouched + if newStatus != StatusSkipped { + now := timeutil.TimeStampNow() + if parent.Started.IsZero() && newStatus != StatusBlocked { + parent.Started = now + cols = append(cols, "started") + } + if parent.Stopped.IsZero() && newStatus.IsDone() { + parent.Stopped = now + cols = append(cols, "stopped") + } + } + if len(cols) == 0 { + return nil + } + _, err = UpdateRunJob(ctx, parent, nil, cols...) + return err +} + +// aggregateReusableCallerStatus derives a reusable workflow caller's status from its direct children. +// +// Unlike AggregateJobStatus, a reusable workflow caller can only be Done when all its children are Done. +// +// Two-stage rule: +// 1. If any child is not Done, return Running > Waiting > Blocked. The caller is still in progress. +// 2. Once every child is Done, defer to AggregateJobStatus for the terminal status. +func aggregateReusableCallerStatus(jobs []*ActionRunJob) Status { + var hasRunning, hasWaiting, hasBlocked, allDone bool + allDone = len(jobs) != 0 + for _, j := range jobs { + if j.Status.IsDone() { + continue + } + allDone = false + switch j.Status { + case StatusRunning: + hasRunning = true + case StatusWaiting: + hasWaiting = true + case StatusBlocked: + hasBlocked = true + } + } + if !allDone { + switch { + case hasRunning: + return StatusRunning + case hasWaiting: + return StatusWaiting + case hasBlocked: + return StatusBlocked + default: + return StatusUnknown // it shouldn't happen + } + } + return AggregateJobStatus(jobs) +} + func AggregateJobStatus(jobs []*ActionRunJob) Status { allSuccessOrSkipped := len(jobs) != 0 allSkipped := len(jobs) != 0 @@ -338,6 +561,49 @@ func AggregateJobStatus(jobs []*ActionRunJob) Status { } } +// CancelPreviousJobs cancels all previous jobs of the same repository, reference, workflow, and event. +// It's useful when a new run is triggered, and all previous runs needn't be continued anymore. +func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID string, event webhook_module.HookEventType) ([]*ActionRunJob, error) { + // Find all runs in the specified repository, reference, and workflow with non-final status + runs, total, err := db.FindAndCount[ActionRun](ctx, FindRunOptions{ + RepoID: repoID, + Ref: ref, + WorkflowID: workflowID, + TriggerEvent: event, + Status: []Status{StatusRunning, StatusWaiting, StatusBlocked}, + }) + if err != nil { + return nil, err + } + + // If there are no runs found, there's no need to proceed with cancellation, so return nil. + if total == 0 { + return nil, nil + } + + cancelledJobs := make([]*ActionRunJob, 0, total) + + // Iterate over each found run and cancel its associated jobs. + for _, run := range runs { + // Find all jobs associated with the current run. + jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{ + RunID: run.ID, + }) + if err != nil { + return cancelledJobs, err + } + + cjs, err := CancelJobs(ctx, jobs) + if err != nil { + return cancelledJobs, err + } + cancelledJobs = append(cancelledJobs, cjs...) + } + + // Return nil to indicate successful cancellation of all running and waiting jobs. + return cancelledJobs, nil +} + func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) (jobsToCancel []*ActionRunJob, _ error) { if job.RawConcurrency == "" { return nil, nil @@ -374,3 +640,85 @@ func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) return CancelJobs(ctx, jobsToCancel) } + +func CancelJobs(ctx context.Context, jobs []*ActionRunJob) ([]*ActionRunJob, error) { + cancelledJobs := make([]*ActionRunJob, 0, len(jobs)) + + for _, job := range jobs { + if job.IsReusableCaller { + sub, err := cancelReusableCaller(ctx, job) + if err != nil { + return cancelledJobs, err + } + cancelledJobs = append(cancelledJobs, sub...) + continue + } + + c, err := cancelOneJob(ctx, job) + if err != nil { + return cancelledJobs, err + } + if c != nil { + cancelledJobs = append(cancelledJobs, c) + } + } + return cancelledJobs, nil +} + +// cancelOneJob cancels a single job and returns the post-cancel row +func cancelOneJob(ctx context.Context, job *ActionRunJob) (*ActionRunJob, error) { + if job.Status.IsDone() { + return nil, nil //nolint:nilnil // signal "nothing to cancel; not an error" + } + // No associated task — mark Cancelled directly. This includes reusable + // caller rows (no runner task) and jobs that never reached PickTask. + if job.TaskID == 0 { + job.Status = StatusCancelled + job.Stopped = timeutil.TimeStampNow() + n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped") + if err != nil { + return nil, err + } + if n == 0 { + log.Error("Failed to cancel job %d because it has changed", job.ID) + return nil, nil //nolint:nilnil // signal "nothing to cancel; not an error" + } + return job, nil + } + // Has a task: stop the task and re-read the row. + if err := StopTask(ctx, job.TaskID, StatusCancelled); err != nil { + return nil, err + } + updated, err := GetRunJobByRunAndID(ctx, job.RunID, job.ID) + if err != nil { + return nil, fmt.Errorf("get job: %w", err) + } + return updated, nil +} + +// cancelReusableCaller cancels `caller` and all its child jobs +func cancelReusableCaller(ctx context.Context, caller *ActionRunJob) ([]*ActionRunJob, error) { + cancelledJobs := make([]*ActionRunJob, 0) + + if c, err := cancelOneJob(ctx, caller); err != nil { + return cancelledJobs, err + } else if c != nil { + cancelledJobs = append(cancelledJobs, c) + } + + attemptJobs, err := GetRunJobsByRunAndAttemptID(ctx, caller.RunID, caller.RunAttemptID) + if err != nil { + return cancelledJobs, err + } + + for _, c := range CollectReusableCallerAllChildJobs(caller, attemptJobs) { + cancelled, err := cancelOneJob(ctx, c) + if err != nil { + return cancelledJobs, err + } + if cancelled != nil { + cancelledJobs = append(cancelledJobs, cancelled) + } + } + return cancelledJobs, nil +} diff --git a/models/actions/run_test.go b/models/actions/run_test.go index e82cbe84b515c..54c7b9bc79e8a 100644 --- a/models/actions/run_test.go +++ b/models/actions/run_test.go @@ -13,6 +13,7 @@ import ( "code.gitea.io/gitea/modules/timeutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestUpdateRepoRunsNumbers(t *testing.T) { @@ -45,3 +46,103 @@ func TestActionRun_Duration_NonNegative(t *testing.T) { } assert.Equal(t, time.Duration(0), run.Duration()) } + +// TestGetReusableCallerPriorAttemptChildren_MatrixJobs tests jobs in one matrix share JobID but have distinct Names +func TestGetReusableCallerPriorAttemptChildren_MatrixJobs(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + ctx := t.Context() + + // Prior attempt: caller + 3 matrix instances of "work" + a non-matrix sibling "summary". + // Same-JobID-different-Name shape mirrors what jobparser emits when expanding a matrix axis. + run := &ActionRun{ + Title: "matrix-prior-test", + RepoID: 4, + Index: 9501, + OwnerID: 1, + WorkflowID: "matrix.yaml", + TriggerUserID: 1, + Ref: "refs/heads/master", + CommitSHA: "c2d72f548424103f01ee1dc02889c1e2bff816b0", + Event: "push", + TriggerEvent: "push", + EventPayload: "{}", + Status: StatusSuccess, + } + require.NoError(t, db.Insert(ctx, run)) + + priorAttempt := &ActionRunAttempt{ + RepoID: run.RepoID, + RunID: run.ID, + Attempt: 1, + TriggerUserID: 1, + Status: StatusSuccess, + } + require.NoError(t, db.Insert(ctx, priorAttempt)) + + const callerAttemptJobID int64 = 9001 + priorCaller := &ActionRunJob{ + RunID: run.ID, + RunAttemptID: priorAttempt.ID, + RepoID: run.RepoID, + OwnerID: run.OwnerID, + CommitSHA: run.CommitSHA, + Name: "caller", + JobID: "caller", + Attempt: 1, + Status: StatusSuccess, + AttemptJobID: callerAttemptJobID, + IsReusableCaller: true, + IsCallerExpanded: true, + } + require.NoError(t, db.Insert(ctx, priorCaller)) + + insertChild := func(t *testing.T, name, jobID string, attemptJobID int64) { + t.Helper() + c := &ActionRunJob{ + RunID: run.ID, + RunAttemptID: priorAttempt.ID, + RepoID: run.RepoID, + OwnerID: run.OwnerID, + CommitSHA: run.CommitSHA, + Name: name, + JobID: jobID, + Attempt: 1, + Status: StatusSuccess, + AttemptJobID: attemptJobID, + ParentCallJobID: priorCaller.ID, + } + require.NoError(t, db.Insert(ctx, c)) + } + insertChild(t, "work (alpha)", "work", 101) + insertChild(t, "work (beta)", "work", 102) + insertChild(t, "work (gamma)", "work", 103) + insertChild(t, "summary", "summary", 104) + + currentAttempt := &ActionRunAttempt{ + RepoID: run.RepoID, + RunID: run.ID, + Attempt: 2, + TriggerUserID: 1, + Status: StatusRunning, + } + require.NoError(t, db.Insert(ctx, currentAttempt)) + + out, err := GetReusableCallerPriorAttemptChildren(ctx, run.ID, currentAttempt.Attempt, callerAttemptJobID) + require.NoError(t, err) + + // Outer map has one entry per JobID; "work" carries 3 matrix instances, "summary" carries 1. + assert.Len(t, out, 2, "outer map keyed by JobID") + assert.Len(t, out["work"], 3, "matrix instances must each get their own inner-map entry") + assert.Len(t, out["summary"], 1) + + require.NotNil(t, out["work"]["work (alpha)"]) + require.NotNil(t, out["work"]["work (beta)"]) + require.NotNil(t, out["work"]["work (gamma)"]) + require.NotNil(t, out["summary"]["summary"]) + + // AttemptJobID does not change in different attempts + assert.Equal(t, int64(101), out["work"]["work (alpha)"].AttemptJobID) + assert.Equal(t, int64(102), out["work"]["work (beta)"].AttemptJobID) + assert.Equal(t, int64(103), out["work"]["work (gamma)"].AttemptJobID) + assert.Equal(t, int64(104), out["summary"]["summary"].AttemptJobID) +} diff --git a/models/actions/task.go b/models/actions/task.go index 7a97eadc798ad..eede06f68c4bd 100644 --- a/models/actions/task.go +++ b/models/actions/task.go @@ -248,7 +248,7 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask } var jobs []*ActionRunJob - if err := e.Where("task_id=? AND status=?", 0, StatusWaiting).And(jobCond).Asc("updated", "id").Find(&jobs); err != nil { + if err := e.Where("task_id=? AND status=? AND is_reusable_caller=?", 0, StatusWaiting, false).And(jobCond).Asc("updated", "id").Find(&jobs); err != nil { return nil, false, err } @@ -384,7 +384,7 @@ func UpdateTaskByState(ctx context.Context, runnerID int64, state *runnerv1.Task RepoID: task.RepoID, Status: task.Status, Stopped: task.Stopped, - }, nil); err != nil { + }, nil, "status", "stopped"); err != nil { return nil, err } } else { diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go index c3a8f08b5d763..5efa081f8c01c 100644 --- a/models/migrations/migrations.go +++ b/models/migrations/migrations.go @@ -409,6 +409,7 @@ func prepareMigrationTasks() []*migration { // Gitea 1.26.0 ends at migration ID number 330 (database version 331) newMigration(331, "Add ActionRunAttempt model and related action fields", v1_27.AddActionRunAttemptModel), + newMigration(332, "Add reusable workflow fields and action_run_attempt_job_id_index table for ActionRunJob", v1_27.AddReusableWorkflowFieldsToActionRunJob), } return preparedMigrations } diff --git a/models/migrations/v1_27/v332.go b/models/migrations/v1_27/v332.go new file mode 100644 index 0000000000000..11cd8c29442ad --- /dev/null +++ b/models/migrations/v1_27/v332.go @@ -0,0 +1,31 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_27 + +import ( + "code.gitea.io/gitea/models/db" + + "xorm.io/xorm" +) + +// AddReusableWorkflowFieldsToActionRunJob adds the ActionRunJob columns that describe the reusable workflow caller hierarchy, +// and the ActionRunAttemptJobIDIndex table backing run-wide AttemptJobID allocation. +func AddReusableWorkflowFieldsToActionRunJob(x *xorm.Engine) error { + type ActionRunJob struct { + IsReusableCaller bool `xorm:"index NOT NULL DEFAULT FALSE"` + ParentCallJobID int64 `xorm:"index NOT NULL DEFAULT 0"` + CallUses string `xorm:"VARCHAR(512) NOT NULL DEFAULT ''"` + CallSecrets string `xorm:"LONGTEXT"` + CallPayload string `xorm:"LONGTEXT 'call_payload'"` + IsCallerExpanded bool `xorm:"NOT NULL DEFAULT FALSE"` + ReusableWorkflowContent []byte `xorm:"LONGBLOB"` + } + + type ActionRunAttemptJobIDIndex db.ResourceIndex + + if _, err := x.SyncWithOptions(xorm.SyncOptions{IgnoreDropIndices: true}, new(ActionRunJob)); err != nil { + return err + } + return x.Sync(new(ActionRunAttemptJobIDIndex)) +} diff --git a/models/perm/access/repo_permission.go b/models/perm/access/repo_permission.go index 21821f17466e2..4e06e056be9a7 100644 --- a/models/perm/access/repo_permission.go +++ b/models/perm/access/repo_permission.go @@ -655,3 +655,20 @@ func CheckRepoUnitUser(ctx context.Context, repo *repo_model.Repository, user *u func PermissionNoAccess() Permission { return Permission{AccessMode: perm_model.AccessModeNone} } + +// CanReadWorkflowCrossRepo checks whether the run can read workflow files from targetRepo +func CanReadWorkflowCrossRepo(ctx context.Context, targetRepo *repo_model.Repository, run *actions_model.ActionRun) (bool, error) { + if err := run.LoadRepo(ctx); err != nil { + return false, err + } + + if checkSameOwnerCrossRepoAccess(ctx, run.Repo, targetRepo, run.IsForkPullRequest) { + return true, nil + } + + botPerm, err := GetIndividualUserRepoPermission(ctx, targetRepo, user_model.NewActionsUser()) + if err != nil { + return false, err + } + return botPerm.AccessMode >= perm_model.AccessModeRead, nil +} diff --git a/models/secret/main_test.go b/models/secret/main_test.go new file mode 100644 index 0000000000000..045468134d826 --- /dev/null +++ b/models/secret/main_test.go @@ -0,0 +1,14 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package secret + +import ( + "testing" + + "code.gitea.io/gitea/models/unittest" +) + +func TestMain(m *testing.M) { + unittest.MainTest(m) +} diff --git a/models/secret/secret.go b/models/secret/secret.go index a82a924c39303..dcc7c86346309 100644 --- a/models/secret/secret.go +++ b/models/secret/secret.go @@ -11,6 +11,8 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" actions_module "code.gitea.io/gitea/modules/actions" + "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/log" secret_module "code.gitea.io/gitea/modules/secret" "code.gitea.io/gitea/modules/setting" @@ -152,16 +154,16 @@ func UpdateSecret(ctx context.Context, secretID int64, data, description string) } func GetSecretsOfTask(ctx context.Context, task *actions_model.ActionTask) (map[string]string, error) { - secrets := map[string]string{} + baseSecrets := map[string]string{} - secrets["GITHUB_TOKEN"] = task.Token - secrets["GITEA_TOKEN"] = task.Token + baseSecrets["GITHUB_TOKEN"] = task.Token + baseSecrets["GITEA_TOKEN"] = task.Token if task.Job.Run.IsForkPullRequest && task.Job.Run.TriggerEvent != actions_module.GithubEventPullRequestTarget { // ignore secrets for fork pull request, except GITHUB_TOKEN and GITEA_TOKEN which are automatically generated. // for the tasks triggered by pull_request_target event, they could access the secrets because they will run in the context of the base branch // see the documentation: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target - return secrets, nil + return baseSecrets, nil } ownerSecrets, err := db.Find[Secret](ctx, FindSecretsOptions{OwnerID: task.Job.Run.Repo.OwnerID}) @@ -181,10 +183,60 @@ func GetSecretsOfTask(ctx context.Context, task *actions_model.ActionTask) (map[ log.Error("Unable to decrypt Actions secret %v %q, maybe SECRET_KEY is wrong: %v", secret.ID, secret.Name, err) continue } - secrets[secret.Name] = v + baseSecrets[secret.Name] = v } - return secrets, nil + return getScopedSecretsForJob(ctx, task.Job, baseSecrets) +} + +// getScopedSecretsForJob walks up the caller chain (ParentCallJobID) and applies +// each caller's secrets policy: +// - "secrets: inherit" passes the parent scope's secrets through unchanged. +// - explicit mapping {alias: SOURCE} only forwards the named secrets, plus the auto-generated tokens. +// +// For top-level jobs (ParentCallJobID == 0) the base secrets are returned as-is. +func getScopedSecretsForJob(ctx context.Context, job *actions_model.ActionRunJob, baseSecrets map[string]string) (map[string]string, error) { + if job.ParentCallJobID == 0 { + return baseSecrets, nil + } + + caller, err := actions_model.GetRunJobByRunAndID(ctx, job.RunID, job.ParentCallJobID) + if err != nil { + return nil, fmt.Errorf("load caller job %d: %w", job.ParentCallJobID, err) + } + + parentScope, err := getScopedSecretsForJob(ctx, caller, baseSecrets) + if err != nil { + return nil, err + } + + if caller.CallSecrets == jobparser.SecretsInherit { + return parentScope, nil + } + + // Empty or explicit-mapping path: only auto-tokens + (any) mapped aliases are exposed. + scoped := map[string]string{ + "GITHUB_TOKEN": baseSecrets["GITHUB_TOKEN"], + "GITEA_TOKEN": baseSecrets["GITEA_TOKEN"], + } + if caller.CallSecrets == "" { + return scoped, nil + } + var mapping map[string]string + if err := json.Unmarshal([]byte(caller.CallSecrets), &mapping); err != nil { + return nil, fmt.Errorf("decode caller %d secret map: %w", caller.ID, err) + } + for alias, source := range mapping { + if v, ok := parentScope[source]; ok { + scoped[alias] = v + continue + } + // Secret names are case-insensitive in storage (uppercased). + if v, ok := parentScope[strings.ToUpper(source)]; ok { + scoped[alias] = v + } + } + return scoped, nil } func CountWrongRepoLevelSecrets(ctx context.Context) (int64, error) { diff --git a/models/secret/secret_test.go b/models/secret/secret_test.go new file mode 100644 index 0000000000000..c820b09fab811 --- /dev/null +++ b/models/secret/secret_test.go @@ -0,0 +1,181 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package secret + +import ( + "testing" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/models/db" + "code.gitea.io/gitea/models/unittest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetScopedSecretsForJob(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + ctx := t.Context() + + base := map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + "PROD_API_KEY": "prod-secret", + "DEV_API_KEY": "dev-secret", + } + + // insertCaller create an ActionRunJob caller row with the given CallSecrets policy + insertCaller := func(t *testing.T, runID, parentCallJobID int64, callSecrets string) *actions_model.ActionRunJob { + t.Helper() + job := &actions_model.ActionRunJob{ + RunID: runID, + RepoID: 1, + IsReusableCaller: true, + ParentCallJobID: parentCallJobID, + CallSecrets: callSecrets, + Status: actions_model.StatusBlocked, + } + require.NoError(t, db.Insert(t.Context(), job)) + return job + } + + t.Run("TopLevelJob_ReturnsBaseUnchanged", func(t *testing.T) { + const runID = 9001 + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: 0} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, base, got, "top-level jobs should see the full base scope") + }) + + t.Run("CallerInherit_PassesParentScopeThrough", func(t *testing.T) { + const runID = 9002 + caller := insertCaller(t, runID, 0, "inherit") + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: caller.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, base, got, "secrets: inherit forwards everything from parent scope") + }) + + t.Run("CallerEmptySecrets_ExposesOnlyAutoTokens", func(t *testing.T) { + const runID = 9003 + caller := insertCaller(t, runID, 0, "") + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: caller.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + }, got) + }) + + t.Run("CallerMapping_OnlyMappedAliasesPlusTokens", func(t *testing.T) { + const runID = 9004 + // {alias: source} — the called workflow sees `secrets.MY_KEY` resolved to PROD_API_KEY's value. + caller := insertCaller(t, runID, 0, `{"MY_KEY":"PROD_API_KEY"}`) + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: caller.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + "MY_KEY": "prod-secret", + // no "dev-secret" + }, got) + }) + + t.Run("CallerMapping_CaseInsensitiveSource", func(t *testing.T) { + const runID = 9005 + caller := insertCaller(t, runID, 0, `{"alias":"prod_api_key"}`) + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: caller.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, "prod-secret", got["alias"]) + }) + + t.Run("CallerMapping_UnknownSourceDropsAlias", func(t *testing.T) { + const runID = 9006 + // MAPPED_ALIAS points at a secret name that doesn't exist in baseSecrets. + // The alias should NOT appear in the result. + caller := insertCaller(t, runID, 0, `{"MAPPED_ALIAS":"DOES_NOT_EXIST"}`) + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: caller.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + _, present := got["MAPPED_ALIAS"] + assert.False(t, present) + }) + + t.Run("Nested_InheritThenInherit_FullScope", func(t *testing.T) { + const runID = 9007 + outer := insertCaller(t, runID, 0, "inherit") + inner := insertCaller(t, runID, outer.ID, "inherit") + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: inner.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, base, got, "inherit-then-inherit should pass the full base scope through") + }) + + t.Run("Nested_InheritThenMapping_InnerNarrows", func(t *testing.T) { + const runID = 9008 + // outer: inherit (sees full base) + // inner: mapping {ALIAS_OUT: PROD_API_KEY} + // leaf scope = inner mapping result. + outer := insertCaller(t, runID, 0, "inherit") + inner := insertCaller(t, runID, outer.ID, `{"ALIAS_OUT":"PROD_API_KEY"}`) + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: inner.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + "ALIAS_OUT": "prod-secret", + // no "dev-secret" + }, got) + }) + + t.Run("Nested_MappingThenInherit_OuterNarrows", func(t *testing.T) { + const runID = 9009 + // outer: mapping {OUTER_ALIAS: PROD_API_KEY} + // inner: inherit + // leaf can therefore only see auto-tokens + OUTER_ALIAS. + outer := insertCaller(t, runID, 0, `{"OUTER_ALIAS":"PROD_API_KEY"}`) + inner := insertCaller(t, runID, outer.ID, "inherit") + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: inner.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + "OUTER_ALIAS": "prod-secret", + // no "dev-secret" + }, got) + }) + + t.Run("Nested_MappingThenMapping_InnerSourceMustExistInOuterScope", func(t *testing.T) { + const runID = 9010 + // outer mapping forwards only PROD_API_KEY as ALIAS_A. + // inner mapping tries to forward DEV_API_KEY as ALIAS_B - but DEV_API_KEY is not in outer's narrowed scope, so it must be dropped. + // inner can still forward ALIAS_A as ALIAS_C (renaming). + outer := insertCaller(t, runID, 0, `{"ALIAS_A":"PROD_API_KEY"}`) + inner := insertCaller(t, runID, outer.ID, `{"ALIAS_B":"DEV_API_KEY","ALIAS_C":"ALIAS_A"}`) + leaf := &actions_model.ActionRunJob{RunID: runID, ParentCallJobID: inner.ID} + + got, err := getScopedSecretsForJob(ctx, leaf, base) + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "GITHUB_TOKEN": "tok", + "GITEA_TOKEN": "tok", + "ALIAS_C": "prod-secret", + // no "dev-secret" + }, got) + }) +} diff --git a/modules/actions/jobparser/model.go b/modules/actions/jobparser/model.go index 2c4bd1f93a819..0af72ad0bf12e 100644 --- a/modules/actions/jobparser/model.go +++ b/modules/actions/jobparser/model.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" + "gitea.com/gitea/runner/act/exprparser" "gitea.com/gitea/runner/act/model" "go.yaml.in/yaml/v4" ) @@ -454,6 +455,24 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) { } } +func EvaluateJobIfExpression(jobID string, job *Job, gitCtx map[string]any, results map[string]*JobResult, vars map[string]string, inputs map[string]any) (bool, error) { + actJob := &model.Job{} + if job != nil { + actJob.Strategy = &model.Strategy{ + FailFastString: job.Strategy.FailFastString, + MaxParallelString: job.Strategy.MaxParallelString, + RawMatrix: job.Strategy.RawMatrix, + } + } + evaluator := NewExpressionEvaluator(NewInterpeter(jobID, actJob, nil, toGitContext(gitCtx), results, vars, inputs)) + expr, _ := rewriteSubExpression(job.If.Value, false) + result, err := evaluator.evaluate(expr, exprparser.DefaultStatusCheckSuccess) + if err != nil { + return false, err + } + return exprparser.IsTruthy(result), nil +} + // parseMappingNode parse a mapping node and preserve order. func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) { if node.Kind != yaml.MappingNode { diff --git a/modules/actions/jobparser/uses.go b/modules/actions/jobparser/uses.go new file mode 100644 index 0000000000000..a9550faebcfaa --- /dev/null +++ b/modules/actions/jobparser/uses.go @@ -0,0 +1,75 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package jobparser + +import ( + "errors" + "fmt" + "path" + "regexp" + "strings" +) + +// UsesKind enumerates the supported forms of a reusable workflow "uses:" value. +type UsesKind int + +const ( + // UsesKindLocalSameRepo is "./.gitea/workflows/foo.yml" - a path inside the calling repository. + UsesKindLocalSameRepo UsesKind = iota + 1 + // UsesKindLocalCrossRepo is "owner/repo/.gitea/workflows/foo.yml@ref" - a workflow in another repo on the same instance. + UsesKindLocalCrossRepo +) + +// UsesRef is the parsed form of a reusable workflow "uses:" value. +type UsesRef struct { + Kind UsesKind + Owner string // empty for UsesKindLocalSameRepo + Repo string // empty for UsesKindLocalSameRepo + Path string // workflow file path inside the source repo + Ref string // git ref; empty for UsesKindLocalSameRepo +} + +var ( + reLocalSameRepo = regexp.MustCompile(`^\./\.(gitea|github)/workflows/([^@]+\.ya?ml)$`) + reLocalCrossRepo = regexp.MustCompile(`^([^/]+)/([^/]+)/\.(gitea|github)/workflows/([^@]+\.ya?ml)@(.+)$`) +) + +// ParseUses parses a reusable workflow "uses:" value. +// Only two forms are supported: +// - "./.gitea/workflows/foo.yml" (UsesKindLocalSameRepo, no @ref) +// - "OWNER/REPO/.gitea/workflows/foo.yml@REF" (UsesKindLocalCrossRepo) +func ParseUses(s string) (*UsesRef, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, errors.New("empty uses value") + } + + if strings.HasPrefix(s, "./") { + m := reLocalSameRepo.FindStringSubmatch(s) + if m == nil { + return nil, fmt.Errorf(`invalid local "uses:" %q (expect ./.gitea/workflows/.yml)`, s) + } + p := fmt.Sprintf(".%s/workflows/%s", m[1], m[2]) + if path.Clean(p) != p { + return nil, fmt.Errorf("invalid workflow path %q", s) + } + return &UsesRef{Kind: UsesKindLocalSameRepo, Path: p}, nil + } + + m := reLocalCrossRepo.FindStringSubmatch(s) + if m == nil { + return nil, fmt.Errorf(`invalid cross-repo "uses:" %q (expect owner/repo/.gitea/workflows/.yml@ref)`, s) + } + p := fmt.Sprintf(".%s/workflows/%s", m[3], m[4]) + if path.Clean(p) != p { + return nil, fmt.Errorf("invalid workflow path %q", s) + } + return &UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: m[1], + Repo: m[2], + Path: p, + Ref: m[5], + }, nil +} diff --git a/modules/actions/jobparser/uses_test.go b/modules/actions/jobparser/uses_test.go new file mode 100644 index 0000000000000..281f910d0b682 --- /dev/null +++ b/modules/actions/jobparser/uses_test.go @@ -0,0 +1,167 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package jobparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseUses(t *testing.T) { + t.Run("LocalSameRepo", func(t *testing.T) { + cases := []struct { + name string + in string + want UsesRef + }{ + { + name: "gitea dir, .yml", + in: "./.gitea/workflows/build.yml", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".gitea/workflows/build.yml"}, + }, + { + name: "github dir, .yml", + in: "./.github/workflows/build.yml", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".github/workflows/build.yml"}, + }, + { + name: "gitea dir, .yaml", + in: "./.gitea/workflows/build.yaml", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".gitea/workflows/build.yaml"}, + }, + { + name: "filename containing dots is allowed", + in: "./.gitea/workflows/foo..bar.yml", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".gitea/workflows/foo..bar.yml"}, + }, + { + name: "nested subdirectory", + in: "./.gitea/workflows/sub/build.yml", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".gitea/workflows/sub/build.yml"}, + }, + { + name: "leading/trailing whitespace is trimmed", + in: " ./.gitea/workflows/build.yml ", + want: UsesRef{Kind: UsesKindLocalSameRepo, Path: ".gitea/workflows/build.yml"}, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := ParseUses(c.in) + require.NoError(t, err) + assert.Equal(t, c.want, *got) + }) + } + }) + + t.Run("LocalCrossRepo", func(t *testing.T) { + cases := []struct { + name string + in string + want UsesRef + }{ + { + name: "gitea dir, simple ref", + in: "owner/repo/.gitea/workflows/build.yml@v1", + want: UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: "owner", + Repo: "repo", + Path: ".gitea/workflows/build.yml", + Ref: "v1", + }, + }, + { + name: "github dir, branch ref", + in: "owner/repo/.github/workflows/build.yml@main", + want: UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: "owner", + Repo: "repo", + Path: ".github/workflows/build.yml", + Ref: "main", + }, + }, + { + name: ".yaml extension", + in: "owner/repo/.gitea/workflows/build.yaml@abc123", + want: UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: "owner", + Repo: "repo", + Path: ".gitea/workflows/build.yaml", + Ref: "abc123", + }, + }, + { + name: "ref with slashes (refs/heads/feature)", + in: "owner/repo/.gitea/workflows/build.yml@refs/heads/feature", + want: UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: "owner", + Repo: "repo", + Path: ".gitea/workflows/build.yml", + Ref: "refs/heads/feature", + }, + }, + { + name: "nested subdirectory under workflows", + in: "owner/repo/.gitea/workflows/sub/build.yml@v1", + want: UsesRef{ + Kind: UsesKindLocalCrossRepo, + Owner: "owner", + Repo: "repo", + Path: ".gitea/workflows/sub/build.yml", + Ref: "v1", + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := ParseUses(c.in) + require.NoError(t, err) + assert.Equal(t, c.want, *got) + }) + } + }) + + t.Run("Errors", func(t *testing.T) { + cases := []struct { + name string + in string + }{ + {name: "empty string", in: ""}, + {name: "whitespace only", in: " "}, + + // Same-repo malformed + {name: "same-repo with @ref", in: "./.gitea/workflows/build.yml@v1"}, + {name: "same-repo wrong directory", in: "./not-workflows/build.yml"}, + {name: "same-repo wrong extension", in: "./.gitea/workflows/build.txt"}, + {name: "same-repo missing extension", in: "./.gitea/workflows/build"}, + {name: "same-repo absolute path", in: "/.gitea/workflows/build.yml"}, + {name: "same-repo path traversal", in: "./.gitea/workflows/../escape.yml"}, + {name: "same-repo double slash", in: "./.gitea/workflows//build.yml"}, + {name: "same-repo redundant ./", in: "./.gitea/workflows/./build.yml"}, + {name: "same-repo no filename", in: "./.gitea/workflows/.yml"}, + + // Cross-repo malformed + {name: "cross-repo missing @ref", in: "owner/repo/.gitea/workflows/build.yml"}, + {name: "cross-repo empty ref", in: "owner/repo/.gitea/workflows/build.yml@"}, + {name: "cross-repo missing owner", in: "/repo/.gitea/workflows/build.yml@v1"}, + {name: "cross-repo missing repo", in: "owner//.gitea/workflows/build.yml@v1"}, + {name: "cross-repo wrong workflows dir", in: "owner/repo/workflows/build.yml@v1"}, + {name: "cross-repo wrong extension", in: "owner/repo/.gitea/workflows/build.txt@v1"}, + {name: "cross-repo path traversal", in: "owner/repo/.gitea/workflows/../escape.yml@v1"}, + {name: "cross-repo double slash in path", in: "owner/repo/.gitea/workflows//build.yml@v1"}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + _, err := ParseUses(c.in) + assert.Error(t, err) + }) + } + }) +} diff --git a/modules/actions/jobparser/workflow_call.go b/modules/actions/jobparser/workflow_call.go new file mode 100644 index 0000000000000..24793e5d09134 --- /dev/null +++ b/modules/actions/jobparser/workflow_call.go @@ -0,0 +1,405 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package jobparser + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "code.gitea.io/gitea/modules/util" + + "gitea.com/gitea/runner/act/exprparser" + "gitea.com/gitea/runner/act/model" + "go.yaml.in/yaml/v4" +) + +// InputType enumerates the allowed types for a workflow_call input. +type InputType string + +const ( + InputTypeString InputType = "string" + InputTypeBoolean InputType = "boolean" + InputTypeNumber InputType = "number" +) + +// InputSpec describes a single workflow_call input declaration. +type InputSpec struct { + Description string `yaml:"description"` + Required bool `yaml:"required"` + Default yaml.Node `yaml:"default"` + Type InputType `yaml:"type"` +} + +// SecretSpec describes a single workflow_call secret declaration. +type SecretSpec struct { + Description string `yaml:"description"` + Required bool `yaml:"required"` +} + +// OutputSpec describes a single workflow_call output declaration. +type OutputSpec struct { + Description string `yaml:"description"` + Value string `yaml:"value"` +} + +// WorkflowCallSpec is the parsed "on.workflow_call" schema of a called workflow. +type WorkflowCallSpec struct { + Inputs map[string]InputSpec + Secrets map[string]SecretSpec + Outputs map[string]OutputSpec +} + +// JobOutputs is the per-job-id outputs map used for evaluating workflow_call outputs. +type JobOutputs map[string]map[string]string + +// ParseWorkflowCallSpec extracts on.workflow_call.{inputs,secrets,outputs} from a workflow YAML. +// Returns an error if the workflow does not declare on.workflow_call at all. +func ParseWorkflowCallSpec(content []byte) (*WorkflowCallSpec, error) { + var doc struct { + On yaml.Node `yaml:"on"` + } + if err := yaml.Unmarshal(content, &doc); err != nil { + return nil, fmt.Errorf("parse workflow yaml: %w", err) + } + + wcNode, ok := findWorkflowCallNode(&doc.On) + if !ok { + return nil, errors.New("workflow does not declare on.workflow_call") + } + + spec := &WorkflowCallSpec{ + Inputs: map[string]InputSpec{}, + Secrets: map[string]SecretSpec{}, + Outputs: map[string]OutputSpec{}, + } + + if wcNode == nil || wcNode.Kind != yaml.MappingNode { + return spec, nil + } + + for i := 0; i+1 < len(wcNode.Content); i += 2 { + key := wcNode.Content[i] + val := wcNode.Content[i+1] + switch key.Value { + case "inputs": + if err := decodeWorkflowCallMapping(val, spec.Inputs); err != nil { + return nil, fmt.Errorf("parse workflow_call.inputs: %w", err) + } + case "secrets": + if err := decodeWorkflowCallMapping(val, spec.Secrets); err != nil { + return nil, fmt.Errorf("parse workflow_call.secrets: %w", err) + } + case "outputs": + if err := decodeWorkflowCallMapping(val, spec.Outputs); err != nil { + return nil, fmt.Errorf("parse workflow_call.outputs: %w", err) + } + } + } + + for name, in := range spec.Inputs { + if in.Type == "" { + return nil, fmt.Errorf("workflow_call input %q is missing required field \"type\"", name) + } + switch in.Type { + case InputTypeString, InputTypeBoolean, InputTypeNumber: + default: + return nil, fmt.Errorf("workflow_call input %q has unsupported type %q", name, in.Type) + } + } + + return spec, nil +} + +// findWorkflowCallNode walks the "on:" node and returns the value mapping (or nil) for "workflow_call". +// "ok" is true when the workflow declares workflow_call (even with an empty body). +func findWorkflowCallNode(on *yaml.Node) (val *yaml.Node, ok bool) { + if on == nil || on.Kind == 0 { + return nil, false + } + switch on.Kind { + case yaml.ScalarNode: + return nil, on.Value == "workflow_call" + case yaml.SequenceNode: + for _, item := range on.Content { + if item.Kind == yaml.ScalarNode && item.Value == "workflow_call" { + return nil, true + } + } + return nil, false + case yaml.MappingNode: + for i := 0; i+1 < len(on.Content); i += 2 { + k := on.Content[i] + v := on.Content[i+1] + if k.Value != "workflow_call" { + continue + } + if v.Kind == yaml.MappingNode { + return v, true + } + return nil, true + } + } + return nil, false +} + +func decodeWorkflowCallMapping[T any](node *yaml.Node, dst map[string]T) error { + if node == nil || node.Kind != yaml.MappingNode { + return nil + } + for i := 0; i+1 < len(node.Content); i += 2 { + name := node.Content[i].Value + var v T + if err := node.Content[i+1].Decode(&v); err != nil { + return fmt.Errorf("%q: %w", name, err) + } + dst[name] = v + } + return nil +} + +// EvaluateCallerWith evaluates the caller-side expressions in `job.With` against the provided contexts +func EvaluateCallerWith( + jobID string, + job *Job, + gitCtx map[string]any, + results map[string]*JobResult, + vars map[string]string, + inputs map[string]any, +) (map[string]any, error) { + actJob := &model.Job{Strategy: &model.Strategy{ + FailFastString: job.Strategy.FailFastString, + MaxParallelString: job.Strategy.MaxParallelString, + RawMatrix: job.Strategy.RawMatrix, + }} + + var matrix map[string]any + matrixes, err := actJob.GetMatrixes() + if err != nil { + return nil, fmt.Errorf("get caller %q matrix: %w", jobID, err) + } + if len(matrixes) > 0 { + matrix = matrixes[0] + } + + evaluator := NewExpressionEvaluator(NewInterpeter(jobID, actJob, matrix, toGitContext(gitCtx), results, vars, inputs)) + + out := make(map[string]any, len(job.With)) + for k, raw := range job.With { + var evaluated any + switch v := raw.(type) { + case string: + node := yaml.Node{} + if err := node.Encode(v); err != nil { + return nil, fmt.Errorf("encode caller %q with[%q]: %w", jobID, k, err) + } + if err := evaluator.EvaluateYamlNode(&node); err != nil { + return nil, fmt.Errorf("evaluate caller %q with[%q]: %w", jobID, k, err) + } + if err := node.Decode(&evaluated); err != nil { + return nil, fmt.Errorf("decode caller %q with[%q]: %w", jobID, k, err) + } + default: + evaluated = v + } + out[k] = evaluated + } + return out, nil +} + +// MatchCallerInputsAgainstSpec checks the caller's already-evaluated `with:` values against the callee's declared `on.workflow_call.inputs` schema +func MatchCallerInputsAgainstSpec(spec *WorkflowCallSpec, evaluated map[string]any) (map[string]any, error) { + resolved := make(map[string]any, len(spec.Inputs)) + + // fill defaults first + for name, in := range spec.Inputs { + if in.Default.IsZero() { + continue + } + v, err := decodeWorkflowCallInputDefault(name, in) + if err != nil { + return nil, err + } + resolved[name] = v + } + + for k, raw := range evaluated { + inputSpec, ok := spec.Inputs[k] + if !ok { + // ignore unknown "with:" keys + continue + } + converted, err := coerceWorkflowCallInput(k, inputSpec.Type, raw) + if err != nil { + return nil, err + } + resolved[k] = converted + } + + for name, in := range spec.Inputs { + if !in.Required { + continue + } + // resolved[name] is set when caller provided it OR when spec has a non-zero default - both satisfy "required". + if _, ok := resolved[name]; ok { + continue + } + return nil, fmt.Errorf("workflow_call input %q is required", name) + } + + return resolved, nil +} + +func decodeWorkflowCallInputDefault(name string, in InputSpec) (any, error) { + var raw string + if err := in.Default.Decode(&raw); err != nil { + // non-scalar default - decode into "any" + var anyVal any + if err := in.Default.Decode(&anyVal); err != nil { + return nil, fmt.Errorf("decode workflow_call input %q default: %w", name, err) + } + return coerceWorkflowCallInput(name, in.Type, anyVal) + } + return coerceWorkflowCallInput(name, in.Type, raw) +} + +func coerceWorkflowCallInput(name string, typ InputType, v any) (any, error) { + switch typ { + case InputTypeString: + return toString(v), nil + case InputTypeBoolean: + switch b := v.(type) { + case bool: + return b, nil + case string: + parsed, err := strconv.ParseBool(b) + if err != nil { + return false, fmt.Errorf("workflow_call input %q expects boolean, got %q", name, b) + } + return parsed, nil + default: + return false, fmt.Errorf("workflow_call input %q expects boolean", name) + } + case InputTypeNumber: + return util.ToFloat64(v) + default: + return nil, fmt.Errorf("workflow_call input %q has unsupported type %q", name, typ) + } +} + +// SecretsInherit is the literal keyword used in a caller's `secrets: inherit` directive +const SecretsInherit = "inherit" + +// callerSecretValueRegexp matches the `${{ secrets.NAME }}` form expected for each value in a caller's `secrets:` mapping. +var callerSecretValueRegexp = regexp.MustCompile(`^\s*\$\{\{\s*secrets\.([A-Za-z_][A-Za-z0-9_]*)\s*\}\}\s*$`) + +// ParseCallerSecrets decodes a caller's "secrets:" YAML node into one of two forms: +// - inherit == true: the caller wrote `secrets: inherit`; mapping is nil +// - inherit == false, mapping == {alias: source_name}: explicit mapping. Each value must be of the form `${{ secrets.NAME }}`. +func ParseCallerSecrets(node yaml.Node) (inherit bool, mapping map[string]string, err error) { + if node.IsZero() { + return false, nil, nil + } + if node.Kind == yaml.ScalarNode && strings.TrimSpace(node.Value) == SecretsInherit { + return true, nil, nil + } + if node.Kind != yaml.MappingNode { + return false, nil, errors.New("invalid secrets: section, expected mapping or 'inherit'") + } + out := make(map[string]string, len(node.Content)/2) + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + v := node.Content[i+1] + var sv string + if err := v.Decode(&sv); err != nil { + return false, nil, fmt.Errorf("decode secret %q: %w", k.Value, err) + } + matches := callerSecretValueRegexp.FindStringSubmatch(sv) + if len(matches) != 2 { + return false, nil, fmt.Errorf("caller secret %q value must be of the form ${{ secrets.NAME }}", k.Value) + } + out[k.Value] = matches[1] + } + return false, out, nil +} + +// ValidateCallerSecrets checks a caller's parsed explicit-mapping `secrets:` against the called workflow's declared `on.workflow_call.secrets` schema. +func ValidateCallerSecrets(spec *WorkflowCallSpec, mapping map[string]string) error { + if spec == nil { + return errors.New("ValidateCallerSecrets: nil workflow_call spec") + } + for alias := range mapping { + if _, ok := spec.Secrets[alias]; !ok { + return fmt.Errorf("caller secret %q is not declared in the called workflow's on.workflow_call.secrets", alias) + } + } + for name, sec := range spec.Secrets { + if !sec.Required { + continue + } + if _, ok := mapping[name]; !ok { + return fmt.Errorf("required secret %q is not provided by the caller", name) + } + } + return nil +} + +// EvaluateWorkflowCallOutputs evaluates a called workflow's "on.workflow_call.outputs..value" expressions against the provided contexts. +func EvaluateWorkflowCallOutputs(spec *WorkflowCallSpec, gitCtx *model.GithubContext, vars map[string]string, inputs map[string]any, jobOutputs JobOutputs) (map[string]string, error) { + if spec == nil || len(spec.Outputs) == 0 { + return map[string]string{}, nil + } + + jobsCtx := make(map[string]*model.WorkflowCallResult, len(jobOutputs)) + for jobID, outputs := range jobOutputs { + jobsCtx[jobID] = &model.WorkflowCallResult{Outputs: outputs} + } + + // See `on.workflow_call.outputs..value` in https://docs.github.com/en/actions/reference/workflows-and-actions/contexts#context-availability + env := &exprparser.EvaluationEnvironment{ + Github: gitCtx, + Jobs: &jobsCtx, + Vars: vars, + Inputs: inputs, + } + interpreter := exprparser.NewInterpeter(env, exprparser.Config{}) + + out := make(map[string]string, len(spec.Outputs)) + for name, o := range spec.Outputs { + v, err := evaluateWorkflowCallOutputValue(interpreter, o.Value) + if err != nil { + return nil, fmt.Errorf("workflow_call output %q: %w", name, err) + } + out[name] = v + } + return out, nil +} + +func evaluateWorkflowCallOutputValue(interpreter exprparser.Interpreter, value string) (string, error) { + if !strings.Contains(value, "${{") || !strings.Contains(value, "}}") { + return value, nil + } + expr, err := rewriteSubExpression(value, true) + if err != nil { + return "", err + } + evaluated, err := interpreter.Evaluate(expr, exprparser.DefaultStatusCheckNone) + if err != nil { + return "", err + } + return toString(evaluated), nil +} + +func toString(v any) string { + switch s := v.(type) { + case string: + return s + case nil: + return "" + default: + return fmt.Sprintf("%v", s) + } +} diff --git a/modules/actions/jobparser/workflow_call_test.go b/modules/actions/jobparser/workflow_call_test.go new file mode 100644 index 0000000000000..8472eb27f03bc --- /dev/null +++ b/modules/actions/jobparser/workflow_call_test.go @@ -0,0 +1,435 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package jobparser + +import ( + "maps" + "testing" + + "gitea.com/gitea/runner/act/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v4" +) + +func TestParseWorkflowCallSpec(t *testing.T) { + t.Run("workflow without on.workflow_call is rejected", func(t *testing.T) { + notCallable := []byte(`name: ordinary +on: push +jobs: + noop: + runs-on: ubuntu-latest + steps: + - run: echo +`) + _, err := ParseWorkflowCallSpec(notCallable) + require.Error(t, err) + assert.Contains(t, err.Error(), "does not declare on.workflow_call") + }) + + t.Run("input missing the required type field is rejected", func(t *testing.T) { + content := callableWorkflow(t, `inputs: + x: + description: missing type +`) + _, err := ParseWorkflowCallSpec(content) + require.Error(t, err) + assert.Contains(t, err.Error(), `missing required field "type"`) + }) + + t.Run("inputs/secrets/outputs are decoded", func(t *testing.T) { + content := callableWorkflow(t, `inputs: + env: + type: string + required: true + secrets: + DEPLOY_KEY: + required: true + outputs: + sha: + value: ${{ jobs.build.outputs.commit }} +`) + spec, err := ParseWorkflowCallSpec(content) + require.NoError(t, err) + assert.Equal(t, InputTypeString, spec.Inputs["env"].Type) + assert.True(t, spec.Inputs["env"].Required) + assert.True(t, spec.Secrets["DEPLOY_KEY"].Required) + assert.Equal(t, "${{ jobs.build.outputs.commit }}", spec.Outputs["sha"].Value) + }) +} + +func TestEvaluateCallerWith(t *testing.T) { + t.Run("empty with: returns empty map", func(t *testing.T) { + out, err := EvaluateCallerWith("caller", &Job{}, nil, callerResults("caller", nil, nil), nil, nil) + require.NoError(t, err) + assert.Empty(t, out) + }) + + t.Run("non-string raw values pass through unchanged", func(t *testing.T) { + job := &Job{With: map[string]any{ + "already_bool": true, + "already_int": 42, + "already_slice": []any{"a", "b"}, + }} + out, err := EvaluateCallerWith("caller", job, nil, callerResults("caller", nil, nil), nil, nil) + require.NoError(t, err) + assert.Equal(t, true, out["already_bool"]) + assert.Equal(t, 42, out["already_int"]) + assert.Equal(t, []any{"a", "b"}, out["already_slice"]) + }) + + t.Run("expressions resolve against vars/inputs/results", func(t *testing.T) { + job := &Job{With: map[string]any{ + "env_name": "${{ vars.ENV }}", + "from_inputs": "${{ inputs.PARENT_VAR }}", + "from_needs": "${{ needs.upstream.outputs.commit }}", + }} + gitCtx := map[string]any{"event": map[string]any{}} + results := callerResults("caller", []string{"upstream"}, map[string]*JobResult{ + "upstream": {Result: "success", Outputs: map[string]string{"commit": "abc123"}}, + }) + vars := map[string]string{"ENV": "staging"} + inputs := map[string]any{"PARENT_VAR": "from-parent"} + out, err := EvaluateCallerWith("caller", job, gitCtx, results, vars, inputs) + require.NoError(t, err) + assert.Equal(t, "staging", out["env_name"]) + assert.Equal(t, "from-parent", out["from_inputs"]) + assert.Equal(t, "abc123", out["from_needs"]) + }) + + t.Run("matrix.X resolves to this caller row's matrix instance", func(t *testing.T) { + var rawMatrix yaml.Node + require.NoError(t, rawMatrix.Encode(map[string][]any{"target": {"staging"}})) + job := &Job{ + With: map[string]any{"env": "${{ matrix.target }}"}, + Strategy: Strategy{RawMatrix: rawMatrix}, + } + out, err := EvaluateCallerWith("caller", job, nil, callerResults("caller", nil, nil), nil, nil) + require.NoError(t, err) + assert.Equal(t, "staging", out["env"]) + }) +} + +func TestMatchCallerInputsAgainstSpec(t *testing.T) { + // mustParseSpec wraps ParseWorkflowCallSpec for test brevity. + mustParseSpec := func(t *testing.T, content []byte) *WorkflowCallSpec { + t.Helper() + spec, err := ParseWorkflowCallSpec(content) + require.NoError(t, err) + return spec + } + + t.Run("default is filled when caller does not provide the input", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + greeting: + type: string + default: hi +`)) + out, err := MatchCallerInputsAgainstSpec(spec, nil) + require.NoError(t, err) + assert.Equal(t, map[string]any{"greeting": "hi"}, out) + }) + + t.Run("caller-provided value wins over default", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + greeting: + type: string + default: hi +`)) + out, err := MatchCallerInputsAgainstSpec(spec, map[string]any{"greeting": "hello"}) + require.NoError(t, err) + assert.Equal(t, map[string]any{"greeting": "hello"}, out) + }) + + t.Run("required input must be provided", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + target: + type: string + required: true +`)) + _, err := MatchCallerInputsAgainstSpec(spec, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), `"target" is required`) + }) + + t.Run("required input is satisfied by a default value", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + target: + type: string + required: true + default: prod +`)) + out, err := MatchCallerInputsAgainstSpec(spec, nil) + require.NoError(t, err) + assert.Equal(t, map[string]any{"target": "prod"}, out) + }) + + t.Run("boolean inputs accept bool and bool-ish strings", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + flag1: + type: boolean + flag2: + type: boolean + default: "true" + flag3: + type: boolean +`)) + out, err := MatchCallerInputsAgainstSpec(spec, map[string]any{ + "flag1": true, // already a bool + "flag3": "false", // bool-ish string + }) + require.NoError(t, err) + assert.Equal(t, true, out["flag1"]) + assert.Equal(t, true, out["flag2"]) // from default + assert.Equal(t, false, out["flag3"]) + }) + + t.Run("boolean input rejects non-bool-ish strings", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + flag: + type: boolean +`)) + _, err := MatchCallerInputsAgainstSpec(spec, map[string]any{"flag": "maybe"}) + require.Error(t, err) + assert.Contains(t, err.Error(), "expects boolean") + }) + + t.Run("number inputs accept numeric strings and ints", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + count: + type: number + ratio: + type: number + default: "0.5" +`)) + out, err := MatchCallerInputsAgainstSpec(spec, map[string]any{"count": "42"}) + require.NoError(t, err) + assert.InDelta(t, 42.0, out["count"], 0) + assert.InDelta(t, 0.5, out["ratio"], 0) + }) + + t.Run("unknown caller-with key is silently dropped", func(t *testing.T) { + spec := mustParseSpec(t, callableWorkflow(t, `inputs: + known: + type: string + default: ok +`)) + out, err := MatchCallerInputsAgainstSpec(spec, map[string]any{ + "known": "yes", + "unknown": "ignored", + }) + require.NoError(t, err) + assert.Equal(t, map[string]any{"known": "yes"}, out) + }) +} + +func TestParseCallerSecrets(t *testing.T) { + // secretYAMLNode unmarshals raw YAML text into a yaml.Node so tests can hand it to ParseCallerSecrets. + secretYAMLNode := func(t *testing.T, s string) yaml.Node { + t.Helper() + var node yaml.Node + require.NoError(t, yaml.Unmarshal([]byte(s), &node)) + // yaml.Unmarshal wraps content in a DocumentNode; the meaningful node is the first child. + if node.Kind == yaml.DocumentNode && len(node.Content) > 0 { + return *node.Content[0] + } + return node + } + + t.Run("zero node returns no inherit, no mapping", func(t *testing.T) { + inherit, mapping, err := ParseCallerSecrets(yaml.Node{}) + require.NoError(t, err) + assert.False(t, inherit) + assert.Nil(t, mapping) + }) + + t.Run("\"inherit\" scalar sets inherit=true", func(t *testing.T) { + inherit, mapping, err := ParseCallerSecrets(secretYAMLNode(t, `inherit`)) + require.NoError(t, err) + assert.True(t, inherit) + assert.Nil(t, mapping) + }) + + t.Run("non-inherit scalar is rejected", func(t *testing.T) { + _, _, err := ParseCallerSecrets(secretYAMLNode(t, `something-else`)) + require.Error(t, err) + assert.Contains(t, err.Error(), "expected mapping or 'inherit'") + }) + + t.Run("mapping of secrets-style references is parsed", func(t *testing.T) { + inherit, mapping, err := ParseCallerSecrets(secretYAMLNode(t, ` +DEPLOY_KEY: ${{ secrets.GITEA_DEPLOY_KEY }} +DB_PASS: ${{ secrets.PROD_DB_PASS }} +`)) + require.NoError(t, err) + assert.False(t, inherit) + assert.Equal(t, map[string]string{ + "DEPLOY_KEY": "GITEA_DEPLOY_KEY", + "DB_PASS": "PROD_DB_PASS", + }, mapping) + }) + + t.Run("mapping value not in ${{ secrets.NAME }} form is rejected", func(t *testing.T) { + // plain string + _, _, err := ParseCallerSecrets(secretYAMLNode(t, `KEY: not-an-expression`)) + require.Error(t, err) + assert.Contains(t, err.Error(), `must be of the form ${{ secrets.NAME }}`) + + // expression but referencing the wrong context (vars instead of secrets) + _, _, err = ParseCallerSecrets(secretYAMLNode(t, `KEY: ${{ vars.NAME }}`)) + require.Error(t, err) + assert.Contains(t, err.Error(), `must be of the form ${{ secrets.NAME }}`) + }) +} + +func TestValidateCallerSecrets(t *testing.T) { + specWith := func(secrets map[string]SecretSpec) *WorkflowCallSpec { + return &WorkflowCallSpec{Secrets: secrets} + } + + t.Run("explicit mapping with all required + only declared aliases is accepted", func(t *testing.T) { + spec := specWith(map[string]SecretSpec{ + "DEPLOY_KEY": {Required: true}, + "OPTIONAL": {}, + }) + mapping := map[string]string{ + "DEPLOY_KEY": "PROD_DEPLOY_KEY", + "OPTIONAL": "SOMETHING_ELSE", + } + require.NoError(t, ValidateCallerSecrets(spec, mapping)) + }) + + t.Run("alias not in callee schema is rejected", func(t *testing.T) { + spec := specWith(map[string]SecretSpec{"DEPLOY_KEY": {}}) + mapping := map[string]string{ + "DEPLOY_KEY": "PROD_DEPLOY_KEY", + "EXTRA": "SOMETHING_NOT_DECLARED", + } + err := ValidateCallerSecrets(spec, mapping) + require.Error(t, err) + assert.Contains(t, err.Error(), `caller secret "EXTRA"`) + assert.Contains(t, err.Error(), `not declared`) + }) + + t.Run("missing required secret is rejected", func(t *testing.T) { + spec := specWith(map[string]SecretSpec{ + "MUST_HAVE": {Required: true}, + "OPTIONAL": {}, + }) + mapping := map[string]string{"OPTIONAL": "X"} + err := ValidateCallerSecrets(spec, mapping) + require.Error(t, err) + assert.Contains(t, err.Error(), `required secret "MUST_HAVE"`) + assert.Contains(t, err.Error(), `not provided`) + }) + + t.Run("callee with no secrets schema accepts an empty mapping", func(t *testing.T) { + spec := specWith(map[string]SecretSpec{}) + require.NoError(t, ValidateCallerSecrets(spec, nil)) + require.NoError(t, ValidateCallerSecrets(spec, map[string]string{})) + }) + + t.Run("callee with no secrets schema rejects a non-empty mapping", func(t *testing.T) { + spec := specWith(map[string]SecretSpec{}) + err := ValidateCallerSecrets(spec, map[string]string{"X": "Y"}) + require.Error(t, err) + assert.Contains(t, err.Error(), `caller secret "X"`) + }) + + t.Run("nil spec is rejected", func(t *testing.T) { + err := ValidateCallerSecrets(nil, map[string]string{"X": "Y"}) + require.Error(t, err) + assert.Contains(t, err.Error(), "nil workflow_call spec") + }) +} + +func TestEvaluateWorkflowCallOutputs(t *testing.T) { + t.Run("nil spec returns empty map", func(t *testing.T) { + out, err := EvaluateWorkflowCallOutputs(nil, &model.GithubContext{}, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, out) + }) + + t.Run("spec with no outputs returns empty map", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{}} + out, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, out) + }) + + t.Run("plain string value passes through unchanged", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{ + "name": {Value: "static-value"}, + }} + out, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, nil, nil, nil) + require.NoError(t, err) + assert.Equal(t, map[string]string{"name": "static-value"}, out) + }) + + t.Run("output references jobs..outputs.", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{ + "sha": {Value: "${{ jobs.build.outputs.commit }}"}, + }} + jobOutputs := JobOutputs{ + "build": {"commit": "deadbeef"}, + } + out, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, nil, nil, jobOutputs) + require.NoError(t, err) + assert.Equal(t, "deadbeef", out["sha"]) + }) + + t.Run("output references inputs.", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{ + "target": {Value: "${{ inputs.env_name }}"}, + }} + inputs := map[string]any{"env_name": "staging"} + out, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, nil, inputs, nil) + require.NoError(t, err) + assert.Equal(t, "staging", out["target"]) + }) + + t.Run("multiple outputs are all evaluated", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{ + "static": {Value: "static-value"}, + "dynamic": {Value: "${{ vars.SUFFIX }}"}, + }} + vars := map[string]string{"SUFFIX": "abc"} + out, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, vars, nil, nil) + require.NoError(t, err) + assert.Equal(t, "static-value", out["static"]) + assert.Equal(t, "abc", out["dynamic"]) + }) + + t.Run("expression referencing an undefined symbol surfaces an error", func(t *testing.T) { + spec := &WorkflowCallSpec{Outputs: map[string]OutputSpec{ + "bad": {Value: "${{ this.is.not.valid() }}"}, + }} + _, err := EvaluateWorkflowCallOutputs(spec, &model.GithubContext{}, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), `output "bad"`) + }) +} + +// callableWorkflow returns a minimal valid called-workflow YAML with on.workflow_call. +func callableWorkflow(t *testing.T, body string) []byte { + t.Helper() + return []byte(`name: callable +on: + workflow_call: + ` + body + ` +jobs: + noop: + runs-on: ubuntu-latest + steps: + - run: "echo" +`) +} + +// callerResults returns the minimum results map shape that NewInterpeter expects +func callerResults(callerJobID string, callerNeeds []string, deps map[string]*JobResult) map[string]*JobResult { + out := make(map[string]*JobResult, len(deps)+1) + maps.Copy(out, deps) + out[callerJobID] = &JobResult{Needs: callerNeeds} + return out +} diff --git a/modules/structs/hook.go b/modules/structs/hook.go index 99c1535155ef3..ac918c7f36941 100644 --- a/modules/structs/hook.go +++ b/modules/structs/hook.go @@ -571,6 +571,20 @@ func (p *WorkflowDispatchPayload) JSONPayload() ([]byte, error) { return json.MarshalIndent(p, "", " ") } +// WorkflowCallPayload is persisted on a reusable workflow caller job's CallPayload field. +type WorkflowCallPayload struct { + Workflow string `json:"workflow"` + Ref string `json:"ref"` + Inputs map[string]any `json:"inputs"` + Repository *Repository `json:"repository"` + Sender *User `json:"sender"` +} + +// JSONPayload implements Payload +func (p *WorkflowCallPayload) JSONPayload() ([]byte, error) { + return json.MarshalIndent(p, "", " ") +} + // CommitStatusPayload represents a payload information of commit status event. type CommitStatusPayload struct { // TODO: add Branches per https://docs.github.com/en/webhooks/webhook-events-and-payloads#status diff --git a/routers/web/repo/actions/view.go b/routers/web/repo/actions/view.go index 3ce4337fbcd1e..ed2d46e89767a 100644 --- a/routers/web/repo/actions/view.go +++ b/routers/web/repo/actions/view.go @@ -321,6 +321,11 @@ type ViewJob struct { CanRerun bool `json:"canRerun"` Duration string `json:"duration"` Needs []string `json:"needs,omitempty"` + + // Reusable workflow fields. All zero/empty for plain (non-call) jobs. + IsReusableCaller bool `json:"isReusableCaller"` + ParentCallJobID int64 `json:"parentCallJobID"` + CallUses string `json:"callUses,omitempty"` } type ViewRunAttempt struct { @@ -445,6 +450,10 @@ func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, CanRerun: resp.State.Run.CanRerun, Duration: v.Duration().String(), Needs: v.Needs, + + IsReusableCaller: v.IsReusableCaller, + ParentCallJobID: v.ParentCallJobID, + CallUses: v.CallUses, }) } diff --git a/services/actions/concurrency.go b/services/actions/concurrency.go index 990c0e9a0bda9..66df4bd98a496 100644 --- a/services/actions/concurrency.go +++ b/services/actions/concurrency.go @@ -9,8 +9,6 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/modules/actions/jobparser" - "code.gitea.io/gitea/modules/json" - api "code.gitea.io/gitea/modules/structs" act_model "gitea.com/gitea/runner/act/model" "go.yaml.in/yaml/v4" @@ -29,7 +27,7 @@ func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.Act jobResults := map[string]*jobparser.JobResult{"": {}} if inputs == nil { var err error - inputs, err = getInputsFromRun(run) + inputs, err = getWorkflowDispatchInputsFromRun(run) if err != nil { return fmt.Errorf("get inputs: %w", err) } @@ -43,25 +41,6 @@ func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.Act return nil } -func findJobNeedsAndFillJobResults(ctx context.Context, job *actions_model.ActionRunJob) (map[string]*jobparser.JobResult, error) { - taskNeeds, err := FindTaskNeeds(ctx, job) - if err != nil { - return nil, fmt.Errorf("find task needs: %w", err) - } - jobResults := make(map[string]*jobparser.JobResult, len(taskNeeds)) - for jobID, taskNeed := range taskNeeds { - jobResult := &jobparser.JobResult{ - Result: taskNeed.Result.String(), - Outputs: taskNeed.Outputs, - } - jobResults[jobID] = jobResult - } - jobResults[job.JobID] = &jobparser.JobResult{ - Needs: job.Needs, - } - return jobResults, nil -} - // EvaluateJobConcurrencyFillModel evaluates the expressions in a job-level concurrency, // and fills the job's model fields with `concurrency.group` and `concurrency.cancel-in-progress`. // Job-level concurrency may depend on other job's outputs (via `needs`): `concurrency.group: my-group-${{ needs.job1.outputs.out1 }}` @@ -86,7 +65,7 @@ func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.Act if inputs == nil { var err error - inputs, err = getInputsFromRun(run) + inputs, err = getInputsForJob(ctx, run, actionRunJob) if err != nil { return fmt.Errorf("get inputs: %w", err) } @@ -104,14 +83,3 @@ func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.Act actionRunJob.IsConcurrencyEvaluated = true return nil } - -func getInputsFromRun(run *actions_model.ActionRun) (map[string]any, error) { - if run.Event != "workflow_dispatch" { - return map[string]any{}, nil - } - var payload api.WorkflowDispatchPayload - if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { - return nil, err - } - return payload.Inputs, nil -} diff --git a/services/actions/context.go b/services/actions/context.go index 1d4a08459d456..dc7223d19d04a 100644 --- a/services/actions/context.go +++ b/services/actions/context.go @@ -11,11 +11,13 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" actions_module "code.gitea.io/gitea/modules/actions" + "code.gitea.io/gitea/modules/actions/jobparser" "code.gitea.io/gitea/modules/container" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" + api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" "gitea.com/gitea/runner/act/model" @@ -96,6 +98,27 @@ func GenerateGiteaContext(ctx context.Context, run *actions_model.ActionRun, att if job != nil { gitContext["job"] = job.JobID gitContext["run_attempt"] = strconv.FormatInt(job.Attempt, 10) + + if job.ParentCallJobID > 0 { + // Inject the caller's resolved workflow_call inputs into gitea.event.inputs. + // The rest of gitea.event stays as the caller's actual trigger event (push/pull_request/etc.) + // to match GitHub's semantics (see https://docs.github.com/en/actions/reference/workflows-and-actions/reusing-workflow-configurations#github-context). + // FIXME: If the run is triggered by "workflow_dispatch", the original inputs of "workflow_dispatch" will be overridden. + // If necessary, the caller can send these values to the called workflow via `with:`. + caller, err := actions_model.GetRunJobByRunAndID(ctx, job.RunID, job.ParentCallJobID) + if err == nil && caller.CallPayload != "" { + var cp api.WorkflowCallPayload + if err := json.Unmarshal([]byte(caller.CallPayload), &cp); err == nil && cp.Inputs != nil { + event["inputs"] = cp.Inputs + } + } + + // Override gitea.event_name to "workflow_call", so that the runner-side `getEvaluatorInputs` can get inputs from event["inputs"]. + // See https://gitea.com/gitea/act_runner/src/commit/35834bf8178b7330b4f17315cdb35fa95d4b4f1e/act/runner/expression.go#L511 + // FIXME: The trade-off is that `${{ gitea.event_name }}` inside a reusable workflow's child job reads "workflow_call" + // instead of the caller's real trigger event name (push/pull_request/etc.) This is a small deviation from GitHub spec. + gitContext["event_name"] = "workflow_call" + } } if attempt == nil { @@ -125,7 +148,8 @@ type TaskNeed struct { Outputs map[string]string } -// FindTaskNeeds finds the `needs` for the task by the task's job +// FindTaskNeeds finds the `needs` for the task by the task's job. +// Lookup is scoped to the same ParentCallJobID. func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[string]*TaskNeed, error) { if len(job.Needs) == 0 { return nil, nil //nolint:nilnil // return nil when the job has no needs @@ -144,8 +168,12 @@ func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[st } jobIDJobs := make(map[string][]*actions_model.ActionRunJob) - for _, job := range jobs { - jobIDJobs[job.JobID] = append(jobIDJobs[job.JobID], job) + for _, candidate := range jobs { + // `needs` references are scope-bound: only candidates in the same caller scope match. + if candidate.ParentCallJobID != job.ParentCallJobID { + continue + } + jobIDJobs[candidate.JobID] = append(jobIDJobs[candidate.JobID], candidate) } ret := make(map[string]*TaskNeed, len(needs)) @@ -154,19 +182,19 @@ func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[st continue } var jobOutputs map[string]string - for _, job := range jobsWithSameID { - taskID := job.EffectiveTaskID() - if taskID == 0 || !job.Status.IsDone() { - // it shouldn't happen + for _, candidate := range jobsWithSameID { + if !candidate.Status.IsDone() { continue } - got, err := actions_model.FindTaskOutputByTaskID(ctx, taskID) - if err != nil { - return nil, fmt.Errorf("FindTaskOutputByTaskID: %w", err) + var outputs map[string]string + var err error + if candidate.IsReusableCaller { + outputs, err = computeReusableCallerOutputs(ctx, candidate, jobs) + } else { + outputs, err = loadJobTaskOutputs(ctx, candidate) } - outputs := make(map[string]string, len(got)) - for _, v := range got { - outputs[v.OutputKey] = v.OutputValue + if err != nil { + return nil, err } if len(jobOutputs) == 0 { jobOutputs = outputs @@ -182,6 +210,86 @@ func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[st return ret, nil } +// computeReusableCallerOutputs returns the workflow_call outputs of a reusable caller by recursing into its child subtree. +func computeReusableCallerOutputs(ctx context.Context, caller *actions_model.ActionRunJob, allJobs []*actions_model.ActionRunJob) (map[string]string, error) { + directChildren := make([]*actions_model.ActionRunJob, 0) + for _, j := range allJobs { + if j.ParentCallJobID == caller.ID { + directChildren = append(directChildren, j) + } + } + + if err := caller.LoadRun(ctx); err != nil { + return nil, err + } + wcSpec, err := jobparser.ParseWorkflowCallSpec(caller.ReusableWorkflowContent) + if err != nil { + return nil, err + } + if len(wcSpec.Outputs) == 0 { + return map[string]string{}, nil + } + + // Per-job outputs over the children of this caller. + jobOutputs := make(jobparser.JobOutputs, len(directChildren)) + for _, child := range directChildren { + var outs map[string]string + switch { + case child.IsReusableCaller: + outs, err = computeReusableCallerOutputs(ctx, child, allJobs) + default: + outs, err = loadJobTaskOutputs(ctx, child) + } + if err != nil { + return nil, err + } + if existing, ok := jobOutputs[child.JobID]; ok { + jobOutputs[child.JobID] = mergeTwoOutputs(outs, existing) + } else { + jobOutputs[child.JobID] = outs + } + } + + // build contexts for evaluating outputs + if err := caller.Run.LoadAttributes(ctx); err != nil { + return nil, err + } + gitCtx := GenerateGiteaContext(ctx, caller.Run, nil, caller) + vars, err := actions_model.GetVariablesOfRun(ctx, caller.Run) + if err != nil { + return nil, err + } + inputs := map[string]any{} + if caller.CallPayload != "" { + var p api.WorkflowCallPayload + if err := json.Unmarshal([]byte(caller.CallPayload), &p); err != nil { + return nil, fmt.Errorf("decode caller payload: %w", err) + } + if p.Inputs != nil { + inputs = p.Inputs + } + } + + return jobparser.EvaluateWorkflowCallOutputs(wcSpec, gitCtx.ToGitHubContext(), vars, inputs, jobOutputs) +} + +// loadJobTaskOutputs returns the task-output map of `job`. +func loadJobTaskOutputs(ctx context.Context, job *actions_model.ActionRunJob) (map[string]string, error) { + tid := job.EffectiveTaskID() + if tid == 0 { + return map[string]string{}, nil + } + rows, err := actions_model.FindTaskOutputByTaskID(ctx, tid) + if err != nil { + return nil, fmt.Errorf("FindTaskOutputByTaskID: %w", err) + } + out := make(map[string]string, len(rows)) + for _, r := range rows { + out[r.OutputKey] = r.OutputValue + } + return out, nil +} + // mergeTwoOutputs merges two outputs from two different ActionRunJobs // Values with the same output name may be overridden. The user should ensure the output names are unique. // See https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#using-job-outputs-in-a-matrix-job diff --git a/services/actions/context_test.go b/services/actions/context_test.go index d86ec47a3c17e..1bac7f8674f9e 100644 --- a/services/actions/context_test.go +++ b/services/actions/context_test.go @@ -8,7 +8,10 @@ import ( "testing" actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/models/unittest" + "code.gitea.io/gitea/modules/json" + api "code.gitea.io/gitea/modules/structs" act_model "gitea.com/gitea/runner/act/model" "github.com/stretchr/testify/assert" @@ -90,6 +93,191 @@ jobs: assert.NotEmpty(t, persisted.RawConcurrency) } +func TestComputeReusableCallerOutputs(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + ctx := t.Context() + + var nextRunIndex int64 = 9001 + insertRun := func(t *testing.T, workflowID string) *actions_model.ActionRun { + t.Helper() + run := &actions_model.ActionRun{ + Title: "reusable-out", + RepoID: 4, + Index: nextRunIndex, + OwnerID: 1, + WorkflowID: workflowID, + TriggerUserID: 1, + Ref: "refs/heads/master", + CommitSHA: "c2d72f548424103f01ee1dc02889c1e2bff816b0", + Event: "push", + TriggerEvent: "push", + EventPayload: "{}", + Status: actions_model.StatusSuccess, + } + nextRunIndex++ + require.NoError(t, db.Insert(ctx, run)) + return run + } + + insertCaller := func(t *testing.T, run *actions_model.ActionRun, jobID string, parentID int64, content, callPayload string) *actions_model.ActionRunJob { + t.Helper() + job := &actions_model.ActionRunJob{ + RunID: run.ID, + RepoID: run.RepoID, + OwnerID: run.OwnerID, + CommitSHA: run.CommitSHA, + Name: jobID, + JobID: jobID, + Attempt: 1, + Status: actions_model.StatusSuccess, + ParentCallJobID: parentID, + IsReusableCaller: true, + IsCallerExpanded: true, + ReusableWorkflowContent: []byte(content), + CallPayload: callPayload, + } + require.NoError(t, db.Insert(ctx, job)) + return job + } + + // Each call to insertChildJobAndTask with non-empty outputs allocates a fresh TaskID + // so its action_task_output rows stay isolated per subtest. + var nextTaskID int64 = 90001 + insertChildJobAndTask := func(t *testing.T, run *actions_model.ActionRun, jobID string, parentID int64, outputs map[string]string) *actions_model.ActionRunJob { + t.Helper() + var taskID int64 + if len(outputs) > 0 { + taskID = nextTaskID + nextTaskID++ + } + job := &actions_model.ActionRunJob{ + RunID: run.ID, + RepoID: run.RepoID, + OwnerID: run.OwnerID, + CommitSHA: run.CommitSHA, + Name: jobID, + JobID: jobID, + Attempt: 1, + Status: actions_model.StatusSuccess, + ParentCallJobID: parentID, + TaskID: taskID, + } + require.NoError(t, db.Insert(ctx, job)) + for k, v := range outputs { + require.NoError(t, db.Insert(ctx, &actions_model.ActionTaskOutput{ + TaskID: taskID, + OutputKey: k, + OutputValue: v, + })) + } + return job + } + + allJobsOfRun := func(t *testing.T, runID int64) []*actions_model.ActionRunJob { + t.Helper() + all, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: runID}) + require.NoError(t, err) + return all + } + + t.Run("returns empty when callee declares no outputs", func(t *testing.T) { + run := insertRun(t, "no-outputs.yaml") + caller := insertCaller(t, run, "caller", 0, `on: + workflow_call: + outputs: {} +`, "") + out, err := computeReusableCallerOutputs(ctx, caller, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Empty(t, out) + }) + + t.Run("literal output value passes through", func(t *testing.T) { + run := insertRun(t, "literal-out.yaml") + caller := insertCaller(t, run, "caller", 0, `on: + workflow_call: + outputs: + hello: + value: world +`, "") + out, err := computeReusableCallerOutputs(ctx, caller, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"hello": "world"}, out) + }) + + t.Run("output expression reads child task outputs", func(t *testing.T) { + run := insertRun(t, "child-out.yaml") + caller := insertCaller(t, run, "caller", 0, `on: + workflow_call: + outputs: + result: + value: ${{ jobs.child.outputs.foo }} +`, "") + insertChildJobAndTask(t, run, "child", caller.ID, map[string]string{"foo": "bar"}) + + out, err := computeReusableCallerOutputs(ctx, caller, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"result": "bar"}, out) + }) + + t.Run("CallPayload inputs reachable in output expression", func(t *testing.T) { + run := insertRun(t, "payload-out.yaml") + payload, err := json.Marshal(api.WorkflowCallPayload{ + Inputs: map[string]any{"env": "staging"}, + }) + require.NoError(t, err) + caller := insertCaller(t, run, "caller", 0, `on: + workflow_call: + inputs: + env: + type: string + outputs: + env: + value: ${{ inputs.env }} +`, string(payload)) + + out, err := computeReusableCallerOutputs(ctx, caller, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"env": "staging"}, out) + }) + + t.Run("nested caller outputs propagate to outer", func(t *testing.T) { + run := insertRun(t, "nested-out.yaml") + outer := insertCaller(t, run, "outer", 0, `on: + workflow_call: + outputs: + bubbled: + value: ${{ jobs.inner.outputs.up }} +`, "") + inner := insertCaller(t, run, "inner", outer.ID, `on: + workflow_call: + outputs: + up: + value: ${{ jobs.leaf.outputs.foo }} +`, "") + insertChildJobAndTask(t, run, "leaf", inner.ID, map[string]string{"foo": "bubble-value"}) + + out, err := computeReusableCallerOutputs(ctx, outer, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"bubbled": "bubble-value"}, out) + }) + + t.Run("matrix children with same JobID prefer non-empty values", func(t *testing.T) { + run := insertRun(t, "matrix-out.yaml") + caller := insertCaller(t, run, "caller", 0, `on: + workflow_call: + outputs: + foo: + value: ${{ jobs.matrix.outputs.foo }} +`, "") + insertChildJobAndTask(t, run, "matrix", caller.ID, map[string]string{"foo": ""}) + insertChildJobAndTask(t, run, "matrix", caller.ID, map[string]string{"foo": "filled"}) + + out, err := computeReusableCallerOutputs(ctx, caller, allJobsOfRun(t, run.ID)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"foo": "filled"}, out) + }) +} + func TestFindTaskNeeds(t *testing.T) { assert.NoError(t, unittest.PrepareTestDatabase()) diff --git a/services/actions/helper.go b/services/actions/helper.go new file mode 100644 index 0000000000000..1b53a7b538adc --- /dev/null +++ b/services/actions/helper.go @@ -0,0 +1,92 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + "fmt" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/json" + api "code.gitea.io/gitea/modules/structs" +) + +func getWorkflowDispatchInputsFromRun(run *actions_model.ActionRun) (map[string]any, error) { + if run.Event != "workflow_dispatch" { + return map[string]any{}, nil + } + var payload api.WorkflowDispatchPayload + if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { + return nil, err + } + return payload.Inputs, nil +} + +// getInputsForJob returns the `inputs.*` top-level expression context for a job's evaluation. +// - For top-level jobs, it falls back to the run's dispatch inputs (empty for non-dispatch events) +// - For reusable workflow children (and nested callers), this is the direct parent caller's CallPayload.Inputs +func getInputsForJob(ctx context.Context, run *actions_model.ActionRun, job *actions_model.ActionRunJob) (map[string]any, error) { + if job.ParentCallJobID == 0 { + return getWorkflowDispatchInputsFromRun(run) + } + + caller, err := actions_model.GetRunJobByRunAndID(ctx, run.ID, job.ParentCallJobID) + if err != nil { + return nil, fmt.Errorf("load caller job %d: %w", job.ParentCallJobID, err) + } + if caller.CallPayload == "" { + // should not happen - a child job cannot reach this point if its caller's CallPayload hasn't been evaluated + return map[string]any{}, nil + } + var p api.WorkflowCallPayload + if err := json.Unmarshal([]byte(caller.CallPayload), &p); err != nil { + return nil, fmt.Errorf("decode caller %d payload: %w", caller.ID, err) + } + if p.Inputs == nil { + return map[string]any{}, nil + } + return p.Inputs, nil +} + +// evaluateJobIf evaluates a job's `if:` +func evaluateJobIf(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, job *actions_model.ActionRunJob, vars map[string]string, allNeedsSucceed bool) (bool, error) { + parsedJob, err := job.ParseJob() + if err != nil { + return false, err + } + // Empty `if:` reduces to implicit `success()` - true iff every need finished as Success. + if len(parsedJob.If.Value) == 0 { + return allNeedsSucceed, nil + } + jobResults, err := findJobNeedsAndFillJobResults(ctx, job) + if err != nil { + return false, err + } + inputs, err := getInputsForJob(ctx, run, job) + if err != nil { + return false, err + } + gitCtx := GenerateGiteaContext(ctx, run, attempt, job) + return jobparser.EvaluateJobIfExpression(job.JobID, parsedJob, gitCtx, jobResults, vars, inputs) +} + +func findJobNeedsAndFillJobResults(ctx context.Context, job *actions_model.ActionRunJob) (map[string]*jobparser.JobResult, error) { + taskNeeds, err := FindTaskNeeds(ctx, job) + if err != nil { + return nil, fmt.Errorf("find task needs: %w", err) + } + jobResults := make(map[string]*jobparser.JobResult, len(taskNeeds)) + for jobID, taskNeed := range taskNeeds { + jobResult := &jobparser.JobResult{ + Result: taskNeed.Result.String(), + Outputs: taskNeed.Outputs, + } + jobResults[jobID] = jobResult + } + jobResults[job.JobID] = &jobparser.JobResult{ + Needs: job.Needs, + } + return jobResults, nil +} diff --git a/services/actions/job_emitter.go b/services/actions/job_emitter.go index b81ec9fe6c1e0..6154ebb0dc641 100644 --- a/services/actions/job_emitter.go +++ b/services/actions/job_emitter.go @@ -252,6 +252,14 @@ func checkJobsOfCurrentRunAttempt(ctx context.Context, run *actions_model.Action } resolver := newJobStatusResolver(jobs, vars) + var attempt *actions_model.ActionRunAttempt + if run.LatestAttemptID > 0 { + attempt, err = actions_model.GetRunAttemptByRepoAndID(ctx, run.RepoID, run.LatestAttemptID) + if err != nil { + return nil, nil, nil, err + } + } + if err = db.WithTx(ctx, func(ctx context.Context) error { for _, job := range jobs { job.Run = run @@ -259,15 +267,38 @@ func checkJobsOfCurrentRunAttempt(ctx context.Context, run *actions_model.Action updates := resolver.Resolve(ctx) for _, job := range jobs { - if status, ok := updates[job.ID]; ok { - job.Status = status - if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil { - return err - } else if n != 1 { - return fmt.Errorf("no affected for updating blocked job %v", job.ID) + status, ok := updates[job.ID] + if !ok { + continue + } + + if job.IsReusableCaller { + switch status { + case actions_model.StatusWaiting: + if err := expandReusableWorkflowCaller(ctx, run, attempt, job, vars); err != nil { + return fmt.Errorf("trigger caller-ready %d: %w", job.ID, err) + } + // expandReusableWorkflowCaller inserts children as Blocked, so emit run.ID again to resolve child jobs. + if err := EmitJobsIfReadyByRun(run.ID); err != nil { + return fmt.Errorf("emit run %d after caller %d ready: %w", run.ID, job.ID, err) + } + case actions_model.StatusSkipped: + job.Status = actions_model.StatusSkipped + if _, err := actions_model.UpdateRunJob(ctx, job, nil, "status"); err != nil { + return err + } } - updatedJobs = append(updatedJobs, job) + continue } + + // Non-caller: standard status update. + job.Status = status + if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil { + return err + } else if n != 1 { + return fmt.Errorf("no affected for updating blocked job %v", job.ID) + } + updatedJobs = append(updatedJobs, job) } return nil }); err != nil { @@ -286,10 +317,17 @@ type jobStatusResolver struct { } func newJobStatusResolver(jobs actions_model.ActionJobList, vars map[string]string) *jobStatusResolver { - idToJobs := make(map[string][]*actions_model.ActionRunJob, len(jobs)) + // Scope-aware: needs are resolved within the same ParentCallJobID scope so the same + // JobID in different reusable workflow calls does not cross-link. + scopedIDToJobs := make(map[int64]map[string][]*actions_model.ActionRunJob) jobMap := make(map[int64]*actions_model.ActionRunJob) for _, job := range jobs { - idToJobs[job.JobID] = append(idToJobs[job.JobID], job) + scope := scopedIDToJobs[job.ParentCallJobID] + if scope == nil { + scope = make(map[string][]*actions_model.ActionRunJob) + scopedIDToJobs[job.ParentCallJobID] = scope + } + scope[job.JobID] = append(scope[job.JobID], job) jobMap[job.ID] = job } @@ -297,8 +335,9 @@ func newJobStatusResolver(jobs actions_model.ActionJobList, vars map[string]stri needs := make(map[int64][]int64, len(jobs)) for _, job := range jobs { statuses[job.ID] = job.Status + scope := scopedIDToJobs[job.ParentCallJobID] for _, need := range job.Needs { - for _, v := range idToJobs[need] { + for _, v := range scope[need] { needs[job.ID] = append(needs[job.ID], v.ID) } } @@ -340,14 +379,6 @@ func (r *jobStatusResolver) resolveCheckNeeds(id int64) (allDone, allSucceed boo return allDone, allSucceed } -func (r *jobStatusResolver) resolveJobHasIfCondition(actionRunJob *actions_model.ActionRunJob) (hasIf bool) { - // FIXME evaluate this on the server side - if job, err := actionRunJob.ParseJob(); err == nil { - return len(job.If.Value) > 0 - } - return hasIf -} - func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model.Status { ret := map[int64]actions_model.Status{} for id, status := range r.statuses { @@ -355,6 +386,12 @@ func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model if status != actions_model.StatusBlocked { continue } + // A child of a caller cannot start until the caller has become "ready" (children inserted, CallPayload populated). + if actionRunJob.ParentCallJobID > 0 { + if parent, ok := r.jobMap[actionRunJob.ParentCallJobID]; ok && !parent.IsCallerExpanded { + continue + } + } allDone, allSucceed := r.resolveCheckNeeds(id) if !allDone { continue @@ -365,18 +402,15 @@ func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model if err != nil { // The err can be caused by different cases: database error, or syntax error, or the needed jobs haven't completed // At the moment there is no way to distinguish them. - // Actually, for most cases, the error is caused by "syntax error" / "the needed jobs haven't completed (skipped?)" // TODO: if workflow or concurrency expression has syntax error, there should be a user error message, need to show it to end users log.Debug("updateConcurrencyEvaluationForJobWithNeeds failed, this job will stay blocked: job: %d, err: %v", id, err) continue } - shouldStartJob := true - if !allSucceed { - // Not all dependent jobs completed successfully: - // * if the job has "if" condition, it can be started, then the act_runner will evaluate the "if" condition. - // * otherwise, the job should be skipped. - shouldStartJob = r.resolveJobHasIfCondition(actionRunJob) + shouldStartJob, err := evaluateJobIf(ctx, actionRunJob.Run, nil, actionRunJob, r.vars, allSucceed) + if err != nil { + log.Debug("evaluateJobIf failed, job will stay blocked: job: %d, err: %v", id, err) + continue } newStatus := util.Iif(shouldStartJob, actions_model.StatusWaiting, actions_model.StatusSkipped) diff --git a/services/actions/job_emitter_test.go b/services/actions/job_emitter_test.go index 9a40927e06594..842f8ff83eed1 100644 --- a/services/actions/job_emitter_test.go +++ b/services/actions/job_emitter_test.go @@ -4,11 +4,14 @@ package actions import ( + "fmt" "testing" actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" + repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/models/unittest" + user_model "code.gitea.io/gitea/models/user" "github.com/stretchr/testify/assert" ) @@ -129,10 +132,49 @@ jobs: want: map[int64]actions_model.Status{2: actions_model.StatusSkipped}, }, } - for _, tt := range tests { + assert.NoError(t, unittest.PrepareTestDatabase()) + ctx := t.Context() + stubRun := &actions_model.ActionRun{TriggerUser: &user_model.User{}, Repo: &repo_model.Repository{}} + for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Each subtest gets a unique RunID / RunAttemptID so jobs from different + // subtests don't bleed into each other's FindTaskNeeds queries (DB rows + // from earlier subtests stay around but are filtered out by run scope). + runID := int64(9001 + i) + attemptID := int64(9001 + i) + + // Server-side `if:` evaluation routes through findJobNeedsAndFillJobResults + // → FindTaskNeeds → DB query, so the resolver only sees needs status when + // jobs are persisted. Insert each test job (letting the DB assign IDs) and + // remember the test→DB ID mapping so we can translate the expected map. + idMap := make(map[int64]int64, len(tt.jobs)) + for _, j := range tt.jobs { + origID := j.ID + j.ID = 0 + j.RunID = runID + j.RunAttemptID = attemptID + j.Run = stubRun + if j.Status == actions_model.StatusBlocked && len(j.WorkflowPayload) == 0 { + j.WorkflowPayload = fmt.Appendf(nil, `name: test +on: push +jobs: + %s: + runs-on: ubuntu-latest + steps: + - run: echo +`, j.JobID) + } + assert.NoError(t, db.Insert(ctx, j)) + idMap[origID] = j.ID + } + + want := make(map[int64]actions_model.Status, len(tt.want)) + for k, v := range tt.want { + want[idMap[k]] = v + } + r := newJobStatusResolver(tt.jobs, nil) - assert.Equal(t, tt.want, r.Resolve(t.Context())) + assert.Equal(t, want, r.Resolve(ctx)) }) } } diff --git a/services/actions/rerun.go b/services/actions/rerun.go index d4027b7c02761..9f7a85835f030 100644 --- a/services/actions/rerun.go +++ b/services/actions/rerun.go @@ -14,6 +14,7 @@ import ( "code.gitea.io/gitea/models/unit" user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/container" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" @@ -112,8 +113,20 @@ type rerunPlan struct { run *actions_model.ActionRun templateAttempt *actions_model.ActionRunAttempt templateJobs actions_model.ActionJobList - rerunJobIDs container.Set[string] - triggerUser *user_model.User + + // rerunAttemptJobIDs holds the AttemptJobIDs of jobs that will actually be re-run in the new attempt. + // If a job here is a reusable caller, the whole subtree under it will be re-run. + rerunAttemptJobIDs container.Set[int64] + + // ancestorAttemptJobIDs holds the AttemptJobIDs of reusable caller jobs that have only some of their descendants being re-run: + // the caller itself is NOT re-run as a whole, it stays pass-through and its non-rerun children stay pass-through too. + ancestorAttemptJobIDs container.Set[int64] + + // skipCloneTemplateJobIDs holds the template-attempt DB row IDs of descendants of any reusable caller in rerunAttemptJobIDs. + // These jobs should not be cloned, since the caller's lazy expansion will re-insert them fresh. + skipCloneTemplateJobIDs container.Set[int64] + + triggerUser *user_model.User } // buildRerunPlan constructs a rerunPlan for the given workflow run without writing to the database. @@ -151,6 +164,7 @@ func buildRerunPlan(ctx context.Context, run *actions_model.ActionRun, triggerUs if err := plan.expandRerunJobIDs(jobsToRerun); err != nil { return nil, err } + plan.skipCloneTemplateJobIDs = plan.collectResetCallerDescendants() return plan, nil } @@ -188,6 +202,7 @@ func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionR var newJobs, newJobsToRerun actions_model.ActionJobList var cancelledConcurrencyJobs []*actions_model.ActionRunJob + var hasWaitingCallerJobs bool err = db.WithTx(ctx, func(ctx context.Context) error { newAttemptStatus, jobsToCancel, err := PrepareToStartRunWithConcurrency(ctx, newAttempt) @@ -212,10 +227,30 @@ func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionR hasWaitingJobs := false newJobs = make(actions_model.ActionJobList, 0, len(plan.templateJobs)) - newJobsToRerun = make(actions_model.ActionJobList, 0, len(plan.rerunJobIDs)) + newJobsToRerun = make(actions_model.ActionJobList, 0, len(plan.rerunAttemptJobIDs)) + + // templateIDToNewID maps each template-attempt job's DB ID to its newly-inserted clone's DB ID + templateIDToNewID := make(map[int64]int64, len(plan.templateJobs)) + for _, templateJob := range plan.templateJobs { + // descendants of a reset reusable caller are not cloned at all, the caller will re-insert them + if plan.skipCloneTemplateJobIDs.Contains(templateJob.ID) { + continue + } + newJob := cloneRunJobForAttempt(templateJob, newAttempt) - if plan.rerunJobIDs.Contains(templateJob.JobID) { + + // Remap ParentCallJobID from template attempts's DB ID -> new attempt's DB ID. + if templateJob.ParentCallJobID != 0 { + newParentID, ok := templateIDToNewID[templateJob.ParentCallJobID] + if !ok { + return fmt.Errorf("clone order violation: parent job %d not yet cloned for child %d", + templateJob.ParentCallJobID, templateJob.ID) + } + newJob.ParentCallJobID = newParentID + } + + if plan.rerunAttemptJobIDs.Contains(templateJob.AttemptJobID) { shouldBlockJob := shouldBlock || plan.hasRerunDependency(templateJob) newJob.Status = util.Iif(shouldBlockJob, actions_model.StatusBlocked, actions_model.StatusWaiting) @@ -227,6 +262,11 @@ func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionR newJob.ConcurrencyCancel = false newJob.IsConcurrencyEvaluated = false + if templateJob.IsReusableCaller { + newJob.IsCallerExpanded = false + newJob.CallPayload = "" + } + if newJob.RawConcurrency != "" && !shouldBlockJob { if err := EvaluateJobConcurrencyFillModel(ctx, plan.run, newAttempt, newJob, vars, nil); err != nil { return fmt.Errorf("evaluate job concurrency: %w", err) @@ -242,13 +282,25 @@ func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionR } else { newJob.TaskID = 0 newJob.SourceTaskID = templateJob.EffectiveTaskID() - newJob.Started = templateJob.Started - newJob.Stopped = templateJob.Stopped + + isAncestor := plan.ancestorAttemptJobIDs.Contains(templateJob.AttemptJobID) + newJob.Started = util.Iif(isAncestor, 0, templateJob.Started) + newJob.Stopped = util.Iif(isAncestor, 0, templateJob.Stopped) } if err := db.Insert(ctx, newJob); err != nil { return err } + templateIDToNewID[templateJob.ID] = newJob.ID + + // expand reusable caller + if newJob.IsReusableCaller && newJob.Status == actions_model.StatusWaiting && !newJob.IsCallerExpanded { + if err := expandReusableWorkflowCaller(ctx, plan.run, newAttempt, newJob, vars); err != nil { + return fmt.Errorf("inline trigger caller %d ready: %w", newJob.ID, err) + } + hasWaitingCallerJobs = true + } + hasWaitingJobs = hasWaitingJobs || newJob.Status == actions_model.StatusWaiting newJobs = append(newJobs, newJob) } @@ -280,60 +332,149 @@ func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionR CreateCommitStatusForRunJobs(ctx, plan.run, newJobs...) NotifyWorkflowJobsAndRunsStatusUpdate(ctx, newJobsToRerun) + // Post-commit kick for expanded callers: let job_emitter resolve its child jobs + if hasWaitingCallerJobs { + if err := EmitJobsIfReadyByRun(plan.run.ID); err != nil { + log.Error("emit run %d after rerun: %v", plan.run.ID, err) + } + } + return newAttempt, nil } +// expandRerunJobIDs computes rerunAttemptJobIDs and ancestorAttemptJobIDs from the user-selected jobsToRerun. func (p *rerunPlan) expandRerunJobIDs(jobsToRerun []*actions_model.ActionRunJob) error { - templateJobIDs := make(container.Set[string]) - for _, job := range p.templateJobs { - templateJobIDs.Add(job.JobID) - } - + // Empty jobsToRerun: rerun the whole latest attempt if len(jobsToRerun) == 0 { - p.rerunJobIDs = templateJobIDs + all := make(container.Set[int64], len(p.templateJobs)) + for _, job := range p.templateJobs { + all.Add(job.AttemptJobID) + } + p.rerunAttemptJobIDs = all + p.ancestorAttemptJobIDs = make(container.Set[int64]) return nil } - rerunJobIDs := make(container.Set[string]) + byID := make(map[int64]*actions_model.ActionRunJob, len(p.templateJobs)) + byAttemptJobID := make(map[int64]*actions_model.ActionRunJob, len(p.templateJobs)) + for _, job := range p.templateJobs { + byID[job.ID] = job + byAttemptJobID[job.AttemptJobID] = job + } + for _, job := range jobsToRerun { - if !templateJobIDs.Contains(job.JobID) { + if _, ok := byID[job.ID]; !ok { return util.NewInvalidArgumentErrorf("job %q does not exist in the latest attempt", job.JobID) } - rerunJobIDs.Add(job.JobID) } - for { - found := false - for _, job := range p.templateJobs { - if rerunJobIDs.Contains(job.JobID) { + rerunSet := make(container.Set[int64]) + ancestorSet := make(container.Set[int64]) + queue := make([]*actions_model.ActionRunJob, 0, len(jobsToRerun)) + + for _, job := range jobsToRerun { + j := byID[job.ID] + rerunSet.Add(j.AttemptJobID) + queue = append(queue, j) + } + + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + // same-scope downstream: siblings whose Needs reference cur.JobID join the rerun set + for _, candidate := range p.templateJobs { + if candidate.ParentCallJobID != cur.ParentCallJobID { continue } - for _, need := range job.Needs { - if rerunJobIDs.Contains(need) { - found = true - rerunJobIDs.Add(job.JobID) - break - } + if rerunSet.Contains(candidate.AttemptJobID) || ancestorSet.Contains(candidate.AttemptJobID) { + continue + } + if !slices.Contains(candidate.Needs, cur.JobID) { + continue } + rerunSet.Add(candidate.AttemptJobID) + queue = append(queue, candidate) } - if !found { - break + + // escalate to parent caller as an ancestor so its own siblings get checked next round + if cur.ParentCallJobID == 0 { + continue } + parent, ok := byID[cur.ParentCallJobID] + if !ok { + continue + } + if rerunSet.Contains(parent.AttemptJobID) || ancestorSet.Contains(parent.AttemptJobID) { + continue + } + ancestorSet.Add(parent.AttemptJobID) + queue = append(queue, parent) } - p.rerunJobIDs = rerunJobIDs + // remove entries whose parent-caller chain already has a rerunSet member + for atID := range ancestorSet { + cur := byAttemptJobID[atID] + for cur.ParentCallJobID != 0 { + parent, ok := byID[cur.ParentCallJobID] + if !ok { + break + } + if rerunSet.Contains(parent.AttemptJobID) { + delete(ancestorSet, atID) + break + } + cur = parent + } + } + + p.rerunAttemptJobIDs = rerunSet + p.ancestorAttemptJobIDs = ancestorSet return nil } +// hasRerunDependency reports whether `job` has a needs-reference that points to a job which is itself being rerun (in rerunAttemptJobIDs) +// or is an ancestor caller whose subtree is being rerun (in ancestorAttemptJobIDs). +// Either case means `job` should start in Blocked status. func (p *rerunPlan) hasRerunDependency(job *actions_model.ActionRunJob) bool { - for _, need := range job.Needs { - if p.rerunJobIDs.Contains(need) { + if len(job.Needs) == 0 { + return false + } + needSet := container.SetOf(job.Needs...) + for _, sibling := range p.templateJobs { + if sibling.ParentCallJobID != job.ParentCallJobID { + continue + } + if !needSet.Contains(sibling.JobID) { + continue + } + if p.rerunAttemptJobIDs.Contains(sibling.AttemptJobID) || p.ancestorAttemptJobIDs.Contains(sibling.AttemptJobID) { return true } } return false } +// collectResetCallerDescendants walks p.templateJobs and returns the DB IDs of every transitive descendant of any reusable caller whose AttemptJobID is in p.rerunAttemptJobIDs. +// These descendants must NOT be cloned by execRerunPlan: the reset caller will re-insert them with template-matched AttemptJobIDs. +func (p *rerunPlan) collectResetCallerDescendants() container.Set[int64] { + out := make(container.Set[int64]) + for _, tj := range p.templateJobs { + if !tj.IsReusableCaller || !p.rerunAttemptJobIDs.Contains(tj.AttemptJobID) { + continue + } + // If this caller's row ID is already in `out`, it means an outer caller has already covered its whole subtree. + // Skip the redundant walk. + if out.Contains(tj.ID) { + continue + } + for _, child := range actions_model.CollectReusableCallerAllChildJobs(tj, p.templateJobs) { + out.Add(child.ID) + } + } + return out +} + func cloneRunJobForAttempt(templateJob *actions_model.ActionRunJob, attempt *actions_model.ActionRunAttempt) *actions_model.ActionRunJob { return &actions_model.ActionRunJob{ RunID: templateJob.RunID, @@ -355,6 +496,15 @@ func cloneRunJobForAttempt(templateJob *actions_model.ActionRunJob, attempt *act ConcurrencyGroup: templateJob.ConcurrencyGroup, ConcurrencyCancel: templateJob.ConcurrencyCancel, TokenPermissions: templateJob.TokenPermissions, + + // reusable workflow fields + IsReusableCaller: templateJob.IsReusableCaller, + CallUses: templateJob.CallUses, + ReusableWorkflowContent: slices.Clone(templateJob.ReusableWorkflowContent), + CallSecrets: templateJob.CallSecrets, + CallPayload: templateJob.CallPayload, + IsCallerExpanded: templateJob.IsCallerExpanded, + ParentCallJobID: templateJob.ParentCallJobID, // remapped by execRerunPlan } } diff --git a/services/actions/rerun_test.go b/services/actions/rerun_test.go index 30772980619b8..539c9db857cb3 100644 --- a/services/actions/rerun_test.go +++ b/services/actions/rerun_test.go @@ -8,6 +8,7 @@ import ( actions_model "code.gitea.io/gitea/models/actions" user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/container" "code.gitea.io/gitea/modules/util" "github.com/stretchr/testify/assert" @@ -100,3 +101,244 @@ func TestRerunValidation(t *testing.T) { assert.ErrorIs(t, err, util.ErrInvalidArgument) }) } + +func TestRerunPlan(t *testing.T) { + // "verify" deliberately appears in two scopes (inner caller under deploy, and top-level) + // so any scope-blind matching in expandRerunJobIDs / hasRerunDependency would surface as a test failure here + + // build id=101, attemptJobID=1 + // test id=102, attemptJobID=2, needs=[build] + // deploy id=103, attemptJobID=3, caller + // ├── validate id=104, attemptJobID=4, parent=103 + // ├── push id=105, attemptJobID=5, parent=103, needs=[validate] + // ├── verify id=106, attemptJobID=6, parent=103, caller, needs=[push] + // │ ├── smoke-test id=107, attemptJobID=7, parent=106 + // │ └── cleanup id=108, attemptJobID=8, parent=106, needs=[smoke-test] + // └── finish-deploy id=109, attemptJobID=9, parent=103, needs=[verify] + // verify id=110, attemptJobID=10, needs=[deploy] (top-level, same JobID) + + buildJob := templateJob(101, 1, "build", 0, false) + testJob := templateJob(102, 2, "test", 0, false, "build") + deployJob := templateJob(103, 3, "deploy", 0, true) + validateJob := templateJob(104, 4, "validate", 103, false) + pushJob := templateJob(105, 5, "push", 103, false, "validate") + verifyInnerJob := templateJob(106, 6, "verify", 103, true, "push") + smokeTestJob := templateJob(107, 7, "smoke-test", 106, false) + cleanupJob := templateJob(108, 8, "cleanup", 106, false, "smoke-test") + finishDeployJob := templateJob(109, 9, "finish-deploy", 103, false, "verify") + verifyTopJob := templateJob(110, 10, "verify", 0, false, "deploy") + + jobs := []*actions_model.ActionRunJob{ + buildJob, testJob, deployJob, validateJob, pushJob, + verifyInnerJob, smokeTestJob, cleanupJob, + finishDeployJob, verifyTopJob, + } + + t.Run("ExpandRerunJobIDs", func(t *testing.T) { + t.Run("empty jobsToRerun reruns every template job, no ancestors", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs(nil)) + + assert.ElementsMatch(t, attemptJobIDsOf(jobs...), plan.rerunAttemptJobIDs.Values()) + assert.Empty(t, plan.ancestorAttemptJobIDs) + }) + + t.Run("same-scope downstream BFS pulls in dependents", func(t *testing.T) { + // a -> b -> c (chain), d unrelated. + a := templateJob(101, 1, "a", 0, false) + b := templateJob(102, 2, "b", 0, false, "a") + c := templateJob(103, 3, "c", 0, false, "b") + d := templateJob(104, 4, "d", 0, false) + plan := &rerunPlan{templateJobs: []*actions_model.ActionRunJob{a, b, c, d}} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{a})) + + assert.ElementsMatch(t, attemptJobIDsOf(a, b, c), plan.rerunAttemptJobIDs.Values()) + assert.Empty(t, plan.ancestorAttemptJobIDs) + }) + + t.Run("rerun a deep child escalates across reusable scopes", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{smokeTestJob})) + + // rerun: smoke-test (selected), cleanup (same-scope downstream), + // finish-deploy (deploy-scope sibling of inner verify ancestor), + // top-level verify (top-scope sibling of deploy ancestor). + assert.ElementsMatch(t, + attemptJobIDsOf(smokeTestJob, cleanupJob, finishDeployJob, verifyTopJob), + plan.rerunAttemptJobIDs.Values()) + + // ancestors: inner verify and deploy + assert.ElementsMatch(t, attemptJobIDsOf(verifyInnerJob, deployJob), plan.ancestorAttemptJobIDs.Values()) + }) + + t.Run("rerun a top-level caller resets only itself and same-scope dependents", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{deployJob})) + + // rerun: deploy (selected) + top-level verify (needs:[deploy]). + assert.ElementsMatch(t, attemptJobIDsOf(deployJob, verifyTopJob), plan.rerunAttemptJobIDs.Values()) + // deploy is top-level so no ancestors. + assert.Empty(t, plan.ancestorAttemptJobIDs) + }) + + t.Run("rerun a nested caller escalates one level", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{verifyInnerJob})) + + // inner verify (selected) -> finish-deploy (deploy-scope dep) -> top-level verify (top-scope dep of deploy). + assert.ElementsMatch(t, + attemptJobIDsOf(verifyInnerJob, finishDeployJob, verifyTopJob), + plan.rerunAttemptJobIDs.Values()) + // deploy is the only ancestor (one level up from inner verify). + assert.ElementsMatch(t, attemptJobIDsOf(deployJob), plan.ancestorAttemptJobIDs.Values()) + }) + + t.Run("selecting one same-name job leaves the other-scope same-name job alone", func(t *testing.T) { + // The fixture has two "verify" jobs in different scopes. + // Selecting only the top-level one must NOT pull in the inner one or its descendants. + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{verifyTopJob})) + + // Only the top-level verify is rerun. + assert.ElementsMatch(t, attemptJobIDsOf(verifyTopJob), plan.rerunAttemptJobIDs.Values()) + assert.Empty(t, plan.ancestorAttemptJobIDs) + }) + + t.Run("a caller is rerun when a sibling it needs is selected", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{pushJob})) + + assert.ElementsMatch(t, + attemptJobIDsOf(pushJob, verifyInnerJob, finishDeployJob, verifyTopJob), + plan.rerunAttemptJobIDs.Values()) + assert.ElementsMatch(t, attemptJobIDsOf(deployJob), plan.ancestorAttemptJobIDs.Values()) + + // Confirm the downstream effect: verify(inner) is a reset caller, so its children's DB row IDs are marked for skip-clone. + assert.ElementsMatch(t, rowIDsOf(smokeTestJob, cleanupJob), plan.collectResetCallerDescendants().Values()) + }) + + t.Run("multiple selections converge", func(t *testing.T) { + plan := &rerunPlan{templateJobs: jobs} + require.NoError(t, plan.expandRerunJobIDs([]*actions_model.ActionRunJob{deployJob, smokeTestJob})) + + assert.ElementsMatch(t, attemptJobIDsOf(deployJob, smokeTestJob, cleanupJob, finishDeployJob, verifyTopJob), plan.rerunAttemptJobIDs.Values()) + assert.Empty(t, plan.ancestorAttemptJobIDs) + assert.ElementsMatch(t, + rowIDsOf(validateJob, pushJob, verifyInnerJob, smokeTestJob, cleanupJob, finishDeployJob), + plan.collectResetCallerDescendants().Values()) + }) + }) + + t.Run("CollectResetCallerDescendants", func(t *testing.T) { + planWith := func(rerunJobs ...*actions_model.ActionRunJob) *rerunPlan { + set := make(container.Set[int64]) + for _, j := range rerunJobs { + set.Add(j.AttemptJobID) + } + return &rerunPlan{templateJobs: jobs, rerunAttemptJobIDs: set} + } + + t.Run("non-caller in reset set is ignored", func(t *testing.T) { + assert.Empty(t, planWith(smokeTestJob).collectResetCallerDescendants()) + }) + + t.Run("caller in reset set returns transitive descendants", func(t *testing.T) { + out := planWith(deployJob).collectResetCallerDescendants() + assert.ElementsMatch(t, + rowIDsOf(validateJob, pushJob, verifyInnerJob, smokeTestJob, cleanupJob, finishDeployJob), + out.Values()) + }) + + t.Run("multiple reset callers union their descendants", func(t *testing.T) { + out := planWith(deployJob, verifyInnerJob).collectResetCallerDescendants() + assert.ElementsMatch(t, + rowIDsOf(validateJob, pushJob, verifyInnerJob, smokeTestJob, cleanupJob, finishDeployJob), + out.Values()) + }) + + t.Run("nested-only reset returns just the nested subtree", func(t *testing.T) { + out := planWith(verifyInnerJob).collectResetCallerDescendants() + assert.ElementsMatch(t, rowIDsOf(smokeTestJob, cleanupJob), out.Values()) + }) + }) + + t.Run("HasRerunDependency", func(t *testing.T) { + t.Run("no needs returns false", func(t *testing.T) { + plan := &rerunPlan{ + templateJobs: []*actions_model.ActionRunJob{buildJob}, + rerunAttemptJobIDs: make(container.Set[int64]), + ancestorAttemptJobIDs: make(container.Set[int64]), + } + assert.False(t, plan.hasRerunDependency(buildJob)) + }) + + t.Run("dependency in rerun set returns true", func(t *testing.T) { + plan := &rerunPlan{ + templateJobs: jobs, + rerunAttemptJobIDs: container.SetOf(smokeTestJob.AttemptJobID), + ancestorAttemptJobIDs: make(container.Set[int64]), + } + // cleanup `needs: [smoke-test]`, both in inner verify scope. + assert.True(t, plan.hasRerunDependency(cleanupJob)) + }) + + t.Run("dependency in ancestor set returns true", func(t *testing.T) { + plan := &rerunPlan{ + templateJobs: jobs, + rerunAttemptJobIDs: container.SetOf(attemptJobIDsOf(smokeTestJob, cleanupJob)...), + ancestorAttemptJobIDs: container.SetOf(verifyInnerJob.AttemptJobID), + } + assert.True(t, plan.hasRerunDependency(finishDeployJob)) + }) + + t.Run("dependency on unrelated sibling returns false", func(t *testing.T) { + plan := &rerunPlan{ + templateJobs: jobs, + rerunAttemptJobIDs: container.SetOf(smokeTestJob.AttemptJobID), + ancestorAttemptJobIDs: make(container.Set[int64]), + } + assert.False(t, plan.hasRerunDependency(pushJob)) + }) + + t.Run("scope-bound: same JobID in another scope does not match", func(t *testing.T) { + plan := &rerunPlan{ + templateJobs: jobs, + rerunAttemptJobIDs: container.SetOf(verifyTopJob.AttemptJobID), + ancestorAttemptJobIDs: make(container.Set[int64]), + } + assert.False(t, plan.hasRerunDependency(finishDeployJob)) + + // Sanity: swap to the inner verify and the same target now sees it. + plan.rerunAttemptJobIDs = container.SetOf(verifyInnerJob.AttemptJobID) + assert.True(t, plan.hasRerunDependency(finishDeployJob)) + }) + }) +} + +// templateJob is a small constructor for fixture jobs used by the rerunPlan unit tests. +func templateJob(id, attemptJobID int64, jobID string, parentID int64, isCaller bool, needs ...string) *actions_model.ActionRunJob { + return &actions_model.ActionRunJob{ + ID: id, + AttemptJobID: attemptJobID, + JobID: jobID, + ParentCallJobID: parentID, + IsReusableCaller: isCaller, + Needs: needs, + } +} + +func attemptJobIDsOf(jobs ...*actions_model.ActionRunJob) []int64 { + out := make([]int64, len(jobs)) + for i, j := range jobs { + out[i] = j.AttemptJobID + } + return out +} + +func rowIDsOf(jobs ...*actions_model.ActionRunJob) []int64 { + out := make([]int64, len(jobs)) + for i, j := range jobs { + out[i] = j.ID + } + return out +} diff --git a/services/actions/reusable_workflow.go b/services/actions/reusable_workflow.go new file mode 100644 index 0000000000000..36845a344c474 --- /dev/null +++ b/services/actions/reusable_workflow.go @@ -0,0 +1,314 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + "fmt" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/models/db" + perm_model "code.gitea.io/gitea/models/perm" + access_model "code.gitea.io/gitea/models/perm/access" + repo_model "code.gitea.io/gitea/models/repo" + "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/container" + "code.gitea.io/gitea/modules/gitrepo" + "code.gitea.io/gitea/modules/json" + api "code.gitea.io/gitea/modules/structs" + "code.gitea.io/gitea/modules/util" + "code.gitea.io/gitea/services/convert" + + "xorm.io/builder" +) + +// MaxReusableCallDepth caps reusable workflow nesting at MaxReusableCallDepth-1 levels. +// A top-level caller is depth 0; checkCallerChain rejects when the caller's own depth reaches MaxReusableCallDepth. +const MaxReusableCallDepth = 10 + +// loadReusableWorkflowSource resolves the workflow file referenced by a caller and returns its raw bytes. +func loadReusableWorkflowSource(ctx context.Context, run *actions_model.ActionRun, ref *jobparser.UsesRef) ([]byte, error) { + if err := run.LoadAttributes(ctx); err != nil { + return nil, err + } + + switch ref.Kind { + case jobparser.UsesKindLocalSameRepo: + // Same-repo: pin to the run's commit SHA, no @ref support. + return readWorkflowFromRepo(ctx, run.Repo, run.CommitSHA, ref.Path) + + case jobparser.UsesKindLocalCrossRepo: + repo, err := repo_model.GetRepositoryByOwnerAndName(ctx, ref.Owner, ref.Repo) + if err != nil { + return nil, fmt.Errorf("look up cross-repo workflow source %q: %w", ref.Owner+"/"+ref.Repo, err) + } + ok, err := access_model.CanReadWorkflowCrossRepo(ctx, repo, run) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("no permission to read reusable workflow from %s/%s", ref.Owner, ref.Repo) + } + return readWorkflowFromRepo(ctx, repo, ref.Ref, ref.Path) + } + return nil, fmt.Errorf("unsupported uses kind %d", ref.Kind) +} + +func readWorkflowFromRepo(ctx context.Context, repo *repo_model.Repository, refOrSHA, path string) ([]byte, error) { + gitRepo, err := gitrepo.OpenRepository(ctx, repo) + if err != nil { + return nil, fmt.Errorf("open repo %s: %w", repo.FullName(), err) + } + defer gitRepo.Close() + + commit, err := gitRepo.GetCommit(refOrSHA) + if err != nil { + return nil, fmt.Errorf("get commit %q in %s: %w", refOrSHA, repo.FullName(), err) + } + str, err := commit.GetFileContent(path, 1024*1024) + if err != nil { + return nil, fmt.Errorf("read %s@%s:%s: %w", repo.FullName(), refOrSHA, path, err) + } + return []byte(str), nil +} + +// checkCallerChain walks `caller`'s ancestor chain (via ParentCallJobID) and: +// - rejects cycles (caller.CallUses appearing in any ancestor's CallUses) +// - enforces MaxReusableCallDepth on caller's depth (top-level = 0) +func checkCallerChain(ctx context.Context, caller *actions_model.ActionRunJob) error { + if caller.ParentCallJobID == 0 { + return nil // top-level caller: depth 0, no ancestors to walk + } + + visited := make(container.Set[string]) + visited.Add(caller.CallUses) + + depth := 0 + current := caller + for current.ParentCallJobID != 0 { + next, err := actions_model.GetRunJobByRunAndID(ctx, current.RunID, current.ParentCallJobID) + if err != nil { + return fmt.Errorf("walk caller chain: %w", err) + } + current = next + depth++ + if depth >= MaxReusableCallDepth { + return fmt.Errorf("reusable workflow call depth exceeds limit (%d) at %q", MaxReusableCallDepth, caller.CallUses) + } + if current.IsReusableCaller && current.CallUses != "" { + if visited.Contains(current.CallUses) { + return fmt.Errorf("reusable workflow call cycle detected: %q", current.CallUses) + } + visited.Add(current.CallUses) + } + } + return nil +} + +// expandReusableWorkflowCaller drives a reusable workflow caller through its "ready" transition: +// load source -> eval secrets/with -> build payload -> insert children -> atomically flip IsCallerExpanded. +// It does NOT schedule a follow-up resolver pass; the caller of this function is responsible for emitting. +func expandReusableWorkflowCaller(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, caller *actions_model.ActionRunJob, vars map[string]string) error { + // Already expanded by an earlier call, skip + if caller.IsCallerExpanded { + return nil + } + + // 1. Cycle + depth check via the ParentCallJobID chain. + if err := checkCallerChain(ctx, caller); err != nil { + return err + } + + // 2. Parse the caller's own job (Uses, With, RawSecrets) from its WorkflowPayload. + parsedJob, err := caller.ParseJob() + if err != nil { + return fmt.Errorf("parse caller job %d: %w", caller.ID, err) + } + + // 3. Load called-workflow source. + ref, err := jobparser.ParseUses(parsedJob.Uses) + if err != nil { + return fmt.Errorf("parse uses %q: %w", parsedJob.Uses, err) + } + content, err := loadReusableWorkflowSource(ctx, run, ref) + if err != nil { + return err + } + + // 4. Parse the called workflow's spec (used by both secret validation and input evaluation). + wcSpec, err := jobparser.ParseWorkflowCallSpec(content) + if err != nil { + return fmt.Errorf("parse called workflow spec: %w", err) + } + + // 5. Resolve caller's `secrets:` and validate it against the callee's schema. + inherit, secretsMap, err := jobparser.ParseCallerSecrets(parsedJob.RawSecrets) + if err != nil { + return fmt.Errorf("caller secrets %q: %w", caller.JobID, err) + } + if !inherit { + if err := jobparser.ValidateCallerSecrets(wcSpec, secretsMap); err != nil { + return fmt.Errorf("caller %q secrets: %w", caller.JobID, err) + } + } + switch { + case inherit: + caller.CallSecrets = jobparser.SecretsInherit + case len(secretsMap) > 0: + mapBytes, err := json.Marshal(secretsMap) + if err != nil { + return fmt.Errorf("marshal caller secret map: %w", err) + } + caller.CallSecrets = string(mapBytes) + } + caller.ReusableWorkflowContent = content + + // 6. Evaluate caller's `with:`, then match against the callee schema. + workflowCallInputs := map[string]any{} + if len(wcSpec.Inputs) > 0 { + jobResults, err := findJobNeedsAndFillJobResults(ctx, caller) + if err != nil { + return fmt.Errorf("find caller needs: %w", err) + } + parentInputs, err := getInputsForJob(ctx, run, caller) + if err != nil { + return err + } + callerGitCtx := GenerateGiteaContext(ctx, run, attempt, caller) + evaluated, err := jobparser.EvaluateCallerWith( + caller.JobID, parsedJob, + callerGitCtx, jobResults, vars, parentInputs, + ) + if err != nil { + return fmt.Errorf("evaluate caller with: %w", err) + } + workflowCallInputs, err = jobparser.MatchCallerInputsAgainstSpec(wcSpec, evaluated) + if err != nil { + return fmt.Errorf("caller %q inputs: %w", caller.JobID, err) + } + } + + // 7. Build CallPayload (persisted in step 9). + callPayload, err := (&api.WorkflowCallPayload{ + Workflow: run.WorkflowID, + Ref: run.Ref, + Repository: convert.ToRepo(ctx, run.Repo, access_model.Permission{AccessMode: perm_model.AccessModeNone}), + Sender: convert.ToUserWithAccessMode(ctx, run.TriggerUser, perm_model.AccessModeNone), + Inputs: workflowCallInputs, + }).JSONPayload() + if err != nil { + return fmt.Errorf("build call payload: %w", err) + } + + // 8. Insert direct children of this caller. + existingChildren, err := actions_model.GetReusableCallerDirectChildJobs(ctx, caller) + if err != nil { + return fmt.Errorf("get existing children of caller %d: %w", caller.ID, err) + } + if len(existingChildren) > 0 { + // Should not happen - child jobs cannot be expanded before the caller gets ready + return fmt.Errorf("invariant violation: caller %d has %d pre-existing children", caller.ID, len(existingChildren)) + } + if err := insertCallerChildren(ctx, run, attempt, caller, content, vars, workflowCallInputs); err != nil { + return err + } + + // 9. Update caller-related cols. + caller.CallPayload = string(callPayload) + caller.IsCallerExpanded = true + n, err := actions_model.UpdateRunJob(ctx, caller, + builder.Eq{"is_caller_expanded": false}, + "call_secrets", "reusable_workflow_content", "call_payload", "is_caller_expanded") + if err != nil { + return fmt.Errorf("commit caller %d expansion: %w", caller.ID, err) + } + if n == 0 { + return fmt.Errorf("caller %d already expanded by another writer", caller.ID) + } + return nil +} + +// insertCallerChildren parses the called workflow with the caller's resolved inputs and inserts each parsed job. +func insertCallerChildren(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, caller *actions_model.ActionRunJob, content []byte, vars map[string]string, inputs map[string]any) error { + // Parse the called workflow with the caller's `inputs` + gitCtx := GenerateGiteaContext(ctx, run, attempt, nil) + if event, ok := gitCtx["event"].(map[string]any); ok { + event["inputs"] = inputs + } + gitCtx["event_name"] = "workflow_call" + + childWorkflows, err := jobparser.Parse(content, + jobparser.WithVars(vars), + jobparser.WithGitContext(gitCtx.ToGitHubContext()), + jobparser.WithInputs(inputs), + ) + if err != nil { + return fmt.Errorf("parse called workflow for caller %d: %w", caller.ID, err) + } + if len(childWorkflows) == 0 { + return fmt.Errorf("called workflow for caller %d (uses %q) has no jobs", caller.ID, caller.CallUses) + } + + priorChildren, err := actions_model.GetReusableCallerPriorAttemptChildren(ctx, run.ID, attempt.Attempt, caller.AttemptJobID) + if err != nil { + return fmt.Errorf("lookup prior-attempt children of caller %d: %w", caller.ID, err) + } + + for _, sw := range childWorkflows { + jobID, parsedChild := sw.Job() + if parsedChild == nil { + continue + } + needs := parsedChild.Needs() + if err := sw.SetJob(jobID, parsedChild.EraseNeeds()); err != nil { + return err + } + payload, err := sw.Marshal() + if err != nil { + return fmt.Errorf("marshal child %q under caller %d: %w", jobID, caller.ID, err) + } + + parsedChild.Name = util.EllipsisDisplayString(parsedChild.Name, 255) + + // AttemptJobID: prefer a prior-attempt match by (JobID, Name) and fall back to a fresh allocator value for newly-appearing logical jobs. + // The two-level key disambiguates matrix instances (same JobID, different Names) and distinct jobs that legally share the same Name (different JobIDs). + var attemptJobID int64 + if priorChild, ok := priorChildren[jobID][parsedChild.Name]; ok { + attemptJobID = priorChild.AttemptJobID + } else { + attemptJobID, err = actions_model.GetNextAttemptJobID(ctx, run.ID) + if err != nil { + return fmt.Errorf("alloc attempt_job_id for child %q: %w", jobID, err) + } + } + child := &actions_model.ActionRunJob{ + RunID: run.ID, + RunAttemptID: attempt.ID, + RepoID: run.RepoID, + OwnerID: run.OwnerID, + CommitSHA: run.CommitSHA, + IsForkPullRequest: run.IsForkPullRequest, + Name: parsedChild.Name, + Attempt: attempt.Attempt, + WorkflowPayload: payload, + JobID: jobID, + AttemptJobID: attemptJobID, + Needs: needs, + RunsOn: parsedChild.RunsOn(), + Status: actions_model.StatusBlocked, + ParentCallJobID: caller.ID, + } + if perms := ExtractJobPermissionsFromWorkflow(sw, parsedChild); perms != nil { + child.TokenPermissions = perms + } + if parsedChild.Uses != "" { + child.IsReusableCaller = true + child.CallUses = parsedChild.Uses + } + if err := db.Insert(ctx, child); err != nil { + return fmt.Errorf("insert child %q under caller %d: %w", jobID, caller.ID, err) + } + } + return nil +} diff --git a/services/actions/run.go b/services/actions/run.go index ed3b3728f73fc..ad32689ab5c2b 100644 --- a/services/actions/run.go +++ b/services/actions/run.go @@ -10,6 +10,7 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/util" act_model "gitea.com/gitea/runner/act/model" @@ -55,6 +56,7 @@ func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model // The title will be cut off at 255 characters if it's longer than 255 characters. func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte, vars map[string]string, inputs map[string]any, wfRawConcurrency *act_model.RawConcurrency) error { var cancelledConcurrencyJobs []*actions_model.ActionRunJob + var hasWaitingCallerJobs bool if err := db.WithTx(ctx, func(ctx context.Context) error { index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID) if err != nil { @@ -128,7 +130,7 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte runJobs := make([]*actions_model.ActionRunJob, 0, len(jobs)) var hasWaitingJobs bool - for i, v := range jobs { + for _, v := range jobs { id, job := v.Job() needs := job.Needs() if err := v.SetJob(id, job.EraseNeeds()); err != nil { @@ -136,8 +138,14 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte } payload, _ := v.Marshal() + isReusableWorkflowCaller := job.Uses != "" shouldBlockJob := runAttempt.Status == actions_model.StatusBlocked || len(needs) > 0 || run.NeedApproval + attemptJobID, err := actions_model.GetNextAttemptJobID(ctx, run.ID) + if err != nil { + return fmt.Errorf("alloc attempt_job_id: %w", err) + } + job.Name = util.EllipsisDisplayString(job.Name, 255) runJob := &actions_model.ActionRunJob{ RunID: run.ID, @@ -150,7 +158,7 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte Attempt: runAttempt.Attempt, WorkflowPayload: payload, JobID: id, - AttemptJobID: int64(i + 1), + AttemptJobID: attemptJobID, Needs: needs, RunsOn: job.RunsOn(), Status: util.Iif(shouldBlockJob, actions_model.StatusBlocked, actions_model.StatusWaiting), @@ -160,6 +168,11 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte runJob.TokenPermissions = perms } + if isReusableWorkflowCaller { + runJob.IsReusableCaller = true + runJob.CallUses = job.Uses + } + // check job concurrency if job.RawConcurrency != nil { rawConcurrency, err := yaml.Marshal(job.RawConcurrency) @@ -193,6 +206,14 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte return err } + // expand reusable caller + if isReusableWorkflowCaller && runJob.Status == actions_model.StatusWaiting { + if err := expandReusableWorkflowCaller(ctx, run, runAttempt, runJob, vars); err != nil { + return fmt.Errorf("inline trigger caller %d ready: %w", runJob.ID, err) + } + hasWaitingCallerJobs = true + } + runJobs = append(runJobs, runJob) } @@ -216,5 +237,12 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte NotifyWorkflowJobsAndRunsStatusUpdate(ctx, cancelledConcurrencyJobs) EmitJobsIfReadyByJobs(cancelledConcurrencyJobs) + // Post-commit kick for expanded callers: let job_emitter resolve its child jobs + if hasWaitingCallerJobs { + if err := EmitJobsIfReadyByRun(run.ID); err != nil { + log.Error("emit run %d after InsertRun: %v", run.ID, err) + } + } + return nil } diff --git a/web_src/js/components/ActionRunJobView.vue b/web_src/js/components/ActionRunJobView.vue index bce1d079c7e35..80db4fa192ab4 100644 --- a/web_src/js/components/ActionRunJobView.vue +++ b/web_src/js/components/ActionRunJobView.vue @@ -1,20 +1,22 @@