diff --git a/contrib/backport/backport.go b/contrib/backport/backport.go index 2af980e5e3cae..0c10c59bd8874 100644 --- a/contrib/backport/backport.go +++ b/contrib/backport/backport.go @@ -18,7 +18,7 @@ import ( "github.com/google/go-github/v84/github" "github.com/urfave/cli/v3" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) const defaultVersion = "v1.18" // to backport to diff --git a/go.mod b/go.mod index d67eaaec2733a..72d85dcb02159 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,6 @@ require ( google.golang.org/grpc v1.80.0 google.golang.org/protobuf v1.36.11 gopkg.in/ini.v1 v1.67.1 - gopkg.in/yaml.v3 v3.0.1 mvdan.cc/xurls/v2 v2.6.0 strk.kbt.io/projects/go/libravatar v0.0.0-20260301104140-add494e31dab xorm.io/builder v0.3.13 @@ -282,6 +281,7 @@ require ( golang.org/x/tools v0.43.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260401020348-3a24fdc17823 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) ignore ( diff --git a/models/actions/run_job.go b/models/actions/run_job.go index d1e5d1e938084..e70dbc3b447ee 100644 --- a/models/actions/run_job.go +++ b/models/actions/run_job.go @@ -61,6 +61,13 @@ type ActionRunJob struct { // It is JSON-encoded repo_model.ActionsTokenPermissions and may be empty if not specified. TokenPermissions *repo_model.ActionsTokenPermissions `xorm:"JSON TEXT"` + RawStrategy string `xorm:"TEXT"` // raw strategy from job YAML's "strategy" section (stored before matrix expansion for deferred evaluation) + + // IsMatrixEvaluated is only valid/needed when this job's RawStrategy is not empty and contains a matrix that depends on job outputs. + // If the matrix can't be evaluated yet (e.g. job hasn't completed), this field will be false. + // If the matrix has been successfully evaluated with job outputs, this field will be true. + IsMatrixEvaluated bool + Started timeutil.TimeStamp Stopped timeutil.TimeStamp Created timeutil.TimeStamp `xorm:"created"` @@ -289,3 +296,11 @@ func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) return CancelJobs(ctx, jobsToCancel) } + +// InsertActionRunJobs inserts multiple ActionRunJob records into the database +func InsertActionRunJobs(ctx context.Context, jobs []*ActionRunJob) error { + if len(jobs) == 0 { + return nil + } + return db.Insert(ctx, jobs) +} diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go index db74ff78d5040..2992e42007082 100644 --- a/models/migrations/migrations.go +++ b/models/migrations/migrations.go @@ -405,6 +405,7 @@ func prepareMigrationTasks() []*migration { newMigration(328, "Add TokenPermissions column to ActionRunJob", v1_26.AddTokenPermissionsToActionRunJob), newMigration(329, "Add unique constraint for user badge", v1_26.AddUniqueIndexForUserBadge), newMigration(330, "Add name column to webhook", v1_26.AddNameToWebhook), + newMigration(331, "Add support for matrix actions evaluation", v1_26.AddMatrixEvaluationColumnsToActionRunJob), } return preparedMigrations } diff --git a/models/migrations/v1_26/v326.go b/models/migrations/v1_26/v326.go index 76532e2f858b5..5e4a26c2242ba 100644 --- a/models/migrations/v1_26/v326.go +++ b/models/migrations/v1_26/v326.go @@ -4,312 +4,26 @@ package v1_26 import ( - "errors" - "fmt" - "net/url" - "strconv" - "strings" - - "code.gitea.io/gitea/modules/json" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" - api "code.gitea.io/gitea/modules/structs" - webhook_module "code.gitea.io/gitea/modules/webhook" - "xorm.io/xorm" ) -const ( - actionsRunPath = "/actions/runs/" - - // Only commit status target URLs whose resolved run ID is smaller than this threshold are rewritten by this partial migration. - // The fixed value 1000 is a conservative cutoff chosen to cover the smaller legacy run indexes that are most likely to be confused with ID-based URLs at runtime. - // Larger legacy {run} or {job} numbers are usually easier to disambiguate. For example: - // * /actions/runs/1200/jobs/1420 is most likely an ID-based URL, because a run should not contain more than 256 jobs. - // * /actions/runs/1500/jobs/3 is most likely an index-based URL, because a job ID cannot be smaller than its run ID. - // But URLs with small numbers, such as /actions/runs/5/jobs/6, are much harder to distinguish reliably. - // This migration therefore prioritizes rewriting target URLs for runs in that lower range. - legacyURLIDThreshold int64 = 1000 -) - -type migrationRepository struct { - ID int64 - OwnerName string - Name string -} - -type migrationActionRun struct { - ID int64 - RepoID int64 - Index int64 - CommitSHA string `xorm:"commit_sha"` - Event webhook_module.HookEventType - TriggerEvent string - EventPayload string -} - -type migrationActionRunJob struct { - ID int64 - RunID int64 -} - -type migrationCommitStatus struct { - ID int64 - RepoID int64 - TargetURL string -} - -type commitSHAAndRuns struct { - commitSHA string - runs map[int64]*migrationActionRun -} - -// FixCommitStatusTargetURLToUseRunAndJobID partially migrates legacy Actions -// commit status target URLs to the new run/job ID-based form. -// -// Only rows whose resolved run ID is below legacyURLIDThreshold are rewritten. -// This is because smaller legacy run indexes are more likely to collide with run ID URLs during runtime resolution, -// so this migration prioritizes that lower range and leaves the remaining legacy target URLs to the web compatibility logic. -func FixCommitStatusTargetURLToUseRunAndJobID(x *xorm.Engine) error { - jobsByRunIDCache := make(map[int64][]int64) - repoLinkCache := make(map[int64]string) - groups, err := loadLegacyMigrationRunGroups(x) - if err != nil { +func AddMatrixEvaluationColumnsToActionRunJob(x *xorm.Engine) error { + if _, err := x.SyncWithOptions(xorm.SyncOptions{ + IgnoreDropIndices: true, + }, new(ActionRunJobWithMatrixSupport)); err != nil { return err } - - for repoID, groupsBySHA := range groups { - for _, group := range groupsBySHA { - if err := migrateCommitStatusTargetURLForGroup(x, "commit_status", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil { - return err - } - if err := migrateCommitStatusTargetURLForGroup(x, "commit_status_summary", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil { - return err - } - } - } return nil } -func loadLegacyMigrationRunGroups(x *xorm.Engine) (map[int64]map[string]*commitSHAAndRuns, error) { - var runs []migrationActionRun - if err := x.Table("action_run"). - Where("id < ?", legacyURLIDThreshold). - Cols("id", "repo_id", "`index`", "commit_sha", "event", "trigger_event", "event_payload"). - Find(&runs); err != nil { - return nil, fmt.Errorf("query action_run: %w", err) - } - - groups := make(map[int64]map[string]*commitSHAAndRuns) - for i := range runs { - run := runs[i] - commitID, err := getCommitStatusCommitID(&run) - if err != nil { - log.Warn("skip action_run id=%d when resolving commit status commit SHA: %v", run.ID, err) - continue - } - if commitID == "" { - // empty commitID means the run didn't create any commit status records, just skip - continue - } - if groups[run.RepoID] == nil { - groups[run.RepoID] = make(map[string]*commitSHAAndRuns) - } - if groups[run.RepoID][commitID] == nil { - groups[run.RepoID][commitID] = &commitSHAAndRuns{ - commitSHA: commitID, - runs: make(map[int64]*migrationActionRun), - } - } - groups[run.RepoID][commitID].runs[run.Index] = &run - } - return groups, nil +// ActionRunJobWithMatrixSupport is a temporary struct for migration purposes +// It only defines the new columns we need to add +type ActionRunJobWithMatrixSupport struct { + RawStrategy string `xorm:"TEXT"` // raw strategy from job YAML's "strategy" section + IsMatrixEvaluated bool // whether the matrix has been evaluated with job outputs } -func migrateCommitStatusTargetURLForGroup( - x *xorm.Engine, - table string, - repoID int64, - sha string, - runs map[int64]*migrationActionRun, - jobsByRunIDCache map[int64][]int64, - repoLinkCache map[int64]string, -) error { - var rows []migrationCommitStatus - if err := x.Table(table). - Where("repo_id = ?", repoID). - And("sha = ?", sha). - Cols("id", "repo_id", "target_url"). - Find(&rows); err != nil { - return fmt.Errorf("query %s for repo_id=%d sha=%s: %w", table, repoID, sha, err) - } - - for _, row := range rows { - repoLink, err := getRepoLinkCached(x, repoLinkCache, row.RepoID) - if err != nil || repoLink == "" { - if err != nil { - log.Warn("convert %s id=%d getRepoLinkCached: %v", table, row.ID, err) - } else { - log.Warn("convert %s id=%d: repo=%d not found", table, row.ID, row.RepoID) - } - continue - } - - runNum, jobNum, ok := parseTargetURL(row.TargetURL, repoLink) - if !ok { - continue - } - - run, ok := runs[runNum] - if !ok { - continue - } - - jobID, ok, err := getJobIDByIndexCached(x, jobsByRunIDCache, run.ID, jobNum) - if err != nil || !ok { - if err != nil { - log.Warn("convert %s id=%d getJobIDByIndexCached: %v", table, row.ID, err) - } else { - log.Warn("convert %s id=%d: job not found for run_id=%d job_index=%d", table, row.ID, run.ID, jobNum) - } - continue - } - - oldURL := row.TargetURL - newURL := fmt.Sprintf("%s%s%d/jobs/%d", repoLink, actionsRunPath, run.ID, jobID) - if oldURL == newURL { - continue - } - - if _, err := x.Table(table).ID(row.ID).Cols("target_url").Update(&migrationCommitStatus{TargetURL: newURL}); err != nil { - return fmt.Errorf("update %s id=%d target_url from %s to %s: %w", table, row.ID, oldURL, newURL, err) - } - } - return nil -} - -func getRepoLinkCached(x *xorm.Engine, cache map[int64]string, repoID int64) (string, error) { - if link, ok := cache[repoID]; ok { - return link, nil - } - repo := &migrationRepository{} - has, err := x.Table("repository").Where("id=?", repoID).Get(repo) - if err != nil { - return "", err - } - if !has { - cache[repoID] = "" - return "", nil - } - link := setting.AppSubURL + "/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name) - cache[repoID] = link - return link, nil -} - -func getJobIDByIndexCached(x *xorm.Engine, cache map[int64][]int64, runID, jobIndex int64) (int64, bool, error) { - jobIDs, ok := cache[runID] - if !ok { - var jobs []migrationActionRunJob - if err := x.Table("action_run_job").Where("run_id=?", runID).Asc("id").Cols("id").Find(&jobs); err != nil { - return 0, false, err - } - jobIDs = make([]int64, 0, len(jobs)) - for _, job := range jobs { - jobIDs = append(jobIDs, job.ID) - } - cache[runID] = jobIDs - } - if jobIndex < 0 || jobIndex >= int64(len(jobIDs)) { - return 0, false, nil - } - return jobIDs[jobIndex], true, nil -} - -func parseTargetURL(targetURL, repoLink string) (runNum, jobNum int64, ok bool) { - prefix := repoLink + actionsRunPath - if !strings.HasPrefix(targetURL, prefix) { - return 0, 0, false - } - rest := targetURL[len(prefix):] - - parts := strings.Split(rest, "/") - if len(parts) == 3 && parts[1] == "jobs" { - runNum, err1 := strconv.ParseInt(parts[0], 10, 64) - jobNum, err2 := strconv.ParseInt(parts[2], 10, 64) - if err1 != nil || err2 != nil { - return 0, 0, false - } - return runNum, jobNum, true - } - - return 0, 0, false -} - -func getCommitStatusCommitID(run *migrationActionRun) (string, error) { - switch run.Event { - case webhook_module.HookEventPush: - payload, err := getPushEventPayload(run) - if err != nil { - return "", fmt.Errorf("getPushEventPayload: %w", err) - } - if payload.HeadCommit == nil { - return "", errors.New("head commit is missing in event payload") - } - return payload.HeadCommit.ID, nil - case webhook_module.HookEventPullRequest, - webhook_module.HookEventPullRequestSync, - webhook_module.HookEventPullRequestAssign, - webhook_module.HookEventPullRequestLabel, - webhook_module.HookEventPullRequestReviewRequest, - webhook_module.HookEventPullRequestMilestone: - payload, err := getPullRequestEventPayload(run) - if err != nil { - return "", fmt.Errorf("getPullRequestEventPayload: %w", err) - } - if payload.PullRequest == nil { - return "", errors.New("pull request is missing in event payload") - } else if payload.PullRequest.Head == nil { - return "", errors.New("head of pull request is missing in event payload") - } - return payload.PullRequest.Head.Sha, nil - case webhook_module.HookEventPullRequestReviewApproved, - webhook_module.HookEventPullRequestReviewRejected, - webhook_module.HookEventPullRequestReviewComment: - payload, err := getPullRequestEventPayload(run) - if err != nil { - return "", fmt.Errorf("getPullRequestEventPayload: %w", err) - } - if payload.PullRequest == nil { - return "", errors.New("pull request is missing in event payload") - } else if payload.PullRequest.Head == nil { - return "", errors.New("head of pull request is missing in event payload") - } - return payload.PullRequest.Head.Sha, nil - case webhook_module.HookEventRelease: - return run.CommitSHA, nil - default: - return "", nil - } -} - -func getPushEventPayload(run *migrationActionRun) (*api.PushPayload, error) { - if run.Event != webhook_module.HookEventPush { - return nil, fmt.Errorf("event %s is not a push event", run.Event) - } - var payload api.PushPayload - if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { - return nil, err - } - return &payload, nil -} - -func getPullRequestEventPayload(run *migrationActionRun) (*api.PullRequestPayload, error) { - if !run.Event.IsPullRequest() && !run.Event.IsPullRequestReview() { - return nil, fmt.Errorf("event %s is not a pull request event", run.Event) - } - var payload api.PullRequestPayload - if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { - return nil, err - } - return &payload, nil +// TableName returns the table name for xorm to sync +func (ActionRunJobWithMatrixSupport) TableName() string { + return "action_run_job" } diff --git a/models/migrations/v1_26/v331.go b/models/migrations/v1_26/v331.go new file mode 100644 index 0000000000000..76532e2f858b5 --- /dev/null +++ b/models/migrations/v1_26/v331.go @@ -0,0 +1,315 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_26 + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + api "code.gitea.io/gitea/modules/structs" + webhook_module "code.gitea.io/gitea/modules/webhook" + + "xorm.io/xorm" +) + +const ( + actionsRunPath = "/actions/runs/" + + // Only commit status target URLs whose resolved run ID is smaller than this threshold are rewritten by this partial migration. + // The fixed value 1000 is a conservative cutoff chosen to cover the smaller legacy run indexes that are most likely to be confused with ID-based URLs at runtime. + // Larger legacy {run} or {job} numbers are usually easier to disambiguate. For example: + // * /actions/runs/1200/jobs/1420 is most likely an ID-based URL, because a run should not contain more than 256 jobs. + // * /actions/runs/1500/jobs/3 is most likely an index-based URL, because a job ID cannot be smaller than its run ID. + // But URLs with small numbers, such as /actions/runs/5/jobs/6, are much harder to distinguish reliably. + // This migration therefore prioritizes rewriting target URLs for runs in that lower range. + legacyURLIDThreshold int64 = 1000 +) + +type migrationRepository struct { + ID int64 + OwnerName string + Name string +} + +type migrationActionRun struct { + ID int64 + RepoID int64 + Index int64 + CommitSHA string `xorm:"commit_sha"` + Event webhook_module.HookEventType + TriggerEvent string + EventPayload string +} + +type migrationActionRunJob struct { + ID int64 + RunID int64 +} + +type migrationCommitStatus struct { + ID int64 + RepoID int64 + TargetURL string +} + +type commitSHAAndRuns struct { + commitSHA string + runs map[int64]*migrationActionRun +} + +// FixCommitStatusTargetURLToUseRunAndJobID partially migrates legacy Actions +// commit status target URLs to the new run/job ID-based form. +// +// Only rows whose resolved run ID is below legacyURLIDThreshold are rewritten. +// This is because smaller legacy run indexes are more likely to collide with run ID URLs during runtime resolution, +// so this migration prioritizes that lower range and leaves the remaining legacy target URLs to the web compatibility logic. +func FixCommitStatusTargetURLToUseRunAndJobID(x *xorm.Engine) error { + jobsByRunIDCache := make(map[int64][]int64) + repoLinkCache := make(map[int64]string) + groups, err := loadLegacyMigrationRunGroups(x) + if err != nil { + return err + } + + for repoID, groupsBySHA := range groups { + for _, group := range groupsBySHA { + if err := migrateCommitStatusTargetURLForGroup(x, "commit_status", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil { + return err + } + if err := migrateCommitStatusTargetURLForGroup(x, "commit_status_summary", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil { + return err + } + } + } + return nil +} + +func loadLegacyMigrationRunGroups(x *xorm.Engine) (map[int64]map[string]*commitSHAAndRuns, error) { + var runs []migrationActionRun + if err := x.Table("action_run"). + Where("id < ?", legacyURLIDThreshold). + Cols("id", "repo_id", "`index`", "commit_sha", "event", "trigger_event", "event_payload"). + Find(&runs); err != nil { + return nil, fmt.Errorf("query action_run: %w", err) + } + + groups := make(map[int64]map[string]*commitSHAAndRuns) + for i := range runs { + run := runs[i] + commitID, err := getCommitStatusCommitID(&run) + if err != nil { + log.Warn("skip action_run id=%d when resolving commit status commit SHA: %v", run.ID, err) + continue + } + if commitID == "" { + // empty commitID means the run didn't create any commit status records, just skip + continue + } + if groups[run.RepoID] == nil { + groups[run.RepoID] = make(map[string]*commitSHAAndRuns) + } + if groups[run.RepoID][commitID] == nil { + groups[run.RepoID][commitID] = &commitSHAAndRuns{ + commitSHA: commitID, + runs: make(map[int64]*migrationActionRun), + } + } + groups[run.RepoID][commitID].runs[run.Index] = &run + } + return groups, nil +} + +func migrateCommitStatusTargetURLForGroup( + x *xorm.Engine, + table string, + repoID int64, + sha string, + runs map[int64]*migrationActionRun, + jobsByRunIDCache map[int64][]int64, + repoLinkCache map[int64]string, +) error { + var rows []migrationCommitStatus + if err := x.Table(table). + Where("repo_id = ?", repoID). + And("sha = ?", sha). + Cols("id", "repo_id", "target_url"). + Find(&rows); err != nil { + return fmt.Errorf("query %s for repo_id=%d sha=%s: %w", table, repoID, sha, err) + } + + for _, row := range rows { + repoLink, err := getRepoLinkCached(x, repoLinkCache, row.RepoID) + if err != nil || repoLink == "" { + if err != nil { + log.Warn("convert %s id=%d getRepoLinkCached: %v", table, row.ID, err) + } else { + log.Warn("convert %s id=%d: repo=%d not found", table, row.ID, row.RepoID) + } + continue + } + + runNum, jobNum, ok := parseTargetURL(row.TargetURL, repoLink) + if !ok { + continue + } + + run, ok := runs[runNum] + if !ok { + continue + } + + jobID, ok, err := getJobIDByIndexCached(x, jobsByRunIDCache, run.ID, jobNum) + if err != nil || !ok { + if err != nil { + log.Warn("convert %s id=%d getJobIDByIndexCached: %v", table, row.ID, err) + } else { + log.Warn("convert %s id=%d: job not found for run_id=%d job_index=%d", table, row.ID, run.ID, jobNum) + } + continue + } + + oldURL := row.TargetURL + newURL := fmt.Sprintf("%s%s%d/jobs/%d", repoLink, actionsRunPath, run.ID, jobID) + if oldURL == newURL { + continue + } + + if _, err := x.Table(table).ID(row.ID).Cols("target_url").Update(&migrationCommitStatus{TargetURL: newURL}); err != nil { + return fmt.Errorf("update %s id=%d target_url from %s to %s: %w", table, row.ID, oldURL, newURL, err) + } + } + return nil +} + +func getRepoLinkCached(x *xorm.Engine, cache map[int64]string, repoID int64) (string, error) { + if link, ok := cache[repoID]; ok { + return link, nil + } + repo := &migrationRepository{} + has, err := x.Table("repository").Where("id=?", repoID).Get(repo) + if err != nil { + return "", err + } + if !has { + cache[repoID] = "" + return "", nil + } + link := setting.AppSubURL + "/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name) + cache[repoID] = link + return link, nil +} + +func getJobIDByIndexCached(x *xorm.Engine, cache map[int64][]int64, runID, jobIndex int64) (int64, bool, error) { + jobIDs, ok := cache[runID] + if !ok { + var jobs []migrationActionRunJob + if err := x.Table("action_run_job").Where("run_id=?", runID).Asc("id").Cols("id").Find(&jobs); err != nil { + return 0, false, err + } + jobIDs = make([]int64, 0, len(jobs)) + for _, job := range jobs { + jobIDs = append(jobIDs, job.ID) + } + cache[runID] = jobIDs + } + if jobIndex < 0 || jobIndex >= int64(len(jobIDs)) { + return 0, false, nil + } + return jobIDs[jobIndex], true, nil +} + +func parseTargetURL(targetURL, repoLink string) (runNum, jobNum int64, ok bool) { + prefix := repoLink + actionsRunPath + if !strings.HasPrefix(targetURL, prefix) { + return 0, 0, false + } + rest := targetURL[len(prefix):] + + parts := strings.Split(rest, "/") + if len(parts) == 3 && parts[1] == "jobs" { + runNum, err1 := strconv.ParseInt(parts[0], 10, 64) + jobNum, err2 := strconv.ParseInt(parts[2], 10, 64) + if err1 != nil || err2 != nil { + return 0, 0, false + } + return runNum, jobNum, true + } + + return 0, 0, false +} + +func getCommitStatusCommitID(run *migrationActionRun) (string, error) { + switch run.Event { + case webhook_module.HookEventPush: + payload, err := getPushEventPayload(run) + if err != nil { + return "", fmt.Errorf("getPushEventPayload: %w", err) + } + if payload.HeadCommit == nil { + return "", errors.New("head commit is missing in event payload") + } + return payload.HeadCommit.ID, nil + case webhook_module.HookEventPullRequest, + webhook_module.HookEventPullRequestSync, + webhook_module.HookEventPullRequestAssign, + webhook_module.HookEventPullRequestLabel, + webhook_module.HookEventPullRequestReviewRequest, + webhook_module.HookEventPullRequestMilestone: + payload, err := getPullRequestEventPayload(run) + if err != nil { + return "", fmt.Errorf("getPullRequestEventPayload: %w", err) + } + if payload.PullRequest == nil { + return "", errors.New("pull request is missing in event payload") + } else if payload.PullRequest.Head == nil { + return "", errors.New("head of pull request is missing in event payload") + } + return payload.PullRequest.Head.Sha, nil + case webhook_module.HookEventPullRequestReviewApproved, + webhook_module.HookEventPullRequestReviewRejected, + webhook_module.HookEventPullRequestReviewComment: + payload, err := getPullRequestEventPayload(run) + if err != nil { + return "", fmt.Errorf("getPullRequestEventPayload: %w", err) + } + if payload.PullRequest == nil { + return "", errors.New("pull request is missing in event payload") + } else if payload.PullRequest.Head == nil { + return "", errors.New("head of pull request is missing in event payload") + } + return payload.PullRequest.Head.Sha, nil + case webhook_module.HookEventRelease: + return run.CommitSHA, nil + default: + return "", nil + } +} + +func getPushEventPayload(run *migrationActionRun) (*api.PushPayload, error) { + if run.Event != webhook_module.HookEventPush { + return nil, fmt.Errorf("event %s is not a push event", run.Event) + } + var payload api.PushPayload + if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { + return nil, err + } + return &payload, nil +} + +func getPullRequestEventPayload(run *migrationActionRun) (*api.PullRequestPayload, error) { + if !run.Event.IsPullRequest() && !run.Event.IsPullRequestReview() { + return nil, fmt.Errorf("event %s is not a pull request event", run.Event) + } + var payload api.PullRequestPayload + if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil { + return nil, err + } + return &payload, nil +} diff --git a/models/unittest/fixtures_loader.go b/models/unittest/fixtures_loader.go index 5b79cb5643bf2..3bfa5f18fa682 100644 --- a/models/unittest/fixtures_loader.go +++ b/models/unittest/fixtures_loader.go @@ -14,7 +14,7 @@ import ( "code.gitea.io/gitea/models/db" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" "xorm.io/xorm" "xorm.io/xorm/schemas" ) diff --git a/modules/actions/jobparser/interpeter.go b/modules/actions/jobparser/interpeter.go index 512b6f02ab6b2..bdff039f91ebd 100644 --- a/modules/actions/jobparser/interpeter.go +++ b/modules/actions/jobparser/interpeter.go @@ -33,6 +33,10 @@ func NewInterpeter( }, JobID: jobID, } + + // Add the current job to the workflow so run.Job() doesn't return nil + run.Workflow.Jobs[jobID] = job + for id, result := range results { need := yaml.Node{} _ = need.Encode(result.Needs) diff --git a/modules/actions/jobparser/jobparser.go b/modules/actions/jobparser/jobparser.go index 1d4c4c1756aaa..f49b0057a6ee3 100644 --- a/modules/actions/jobparser/jobparser.go +++ b/modules/actions/jobparser/jobparser.go @@ -14,6 +14,23 @@ import ( "go.yaml.in/yaml/v4" ) +// deepCopyYamlNode creates a deep copy of a yaml.Node to prevent mutations +// from affecting the original. This is important because yaml.Node.Content +// is a slice of pointers, and a shallow copy would share the same child nodes. +func deepCopyYamlNode(node *yaml.Node) *yaml.Node { + if node == nil { + return nil + } + nodeCopy := *node + if node.Content != nil { + nodeCopy.Content = make([]*yaml.Node, len(node.Content)) + for i, child := range node.Content { + nodeCopy.Content[i] = deepCopyYamlNode(child) + } + } + return &nodeCopy +} + func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) { origin, err := model.ReadWorkflow(bytes.NewReader(content)) if err != nil { @@ -31,10 +48,11 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) { } results := map[string]*JobResult{} for id, job := range origin.Jobs { + outputs := pc.jobOutputs[id] results[id] = &JobResult{ Needs: job.Needs(), Result: pc.jobResults[id], - Outputs: nil, // not supported yet + Outputs: outputs, } } @@ -49,7 +67,32 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) { for i, id := range ids { job := jobs[i] - matricxes, err := getMatrixes(origin.GetJob(id)) + originJob := origin.GetJob(id) + + if originJob == nil { + return nil, fmt.Errorf("job %s not found in origin workflow", id) + } + + // Clone the origin job to avoid modifying the shared object + evaluatedJob := *originJob + if originJob.Strategy != nil { + stratCopy := *originJob.Strategy + // Deep copy the RawMatrix yaml.Node to prevent mutations from affecting the original + stratCopy.RawMatrix = *deepCopyYamlNode(&originJob.Strategy.RawMatrix) + evaluatedJob.Strategy = &stratCopy + } + + // Create an evaluator with access to needs/outputs for matrix evaluation + matrixEvaluator := NewExpressionEvaluator(NewInterpeter(id, &evaluatedJob, nil, pc.gitContext, results, pc.vars, pc.inputs)) + + // Evaluate the matrix before expanding it + if evaluatedJob.Strategy != nil && evaluatedJob.Strategy.RawMatrix.Kind != 0 { + if err := matrixEvaluator.EvaluateYamlNode(&evaluatedJob.Strategy.RawMatrix); err != nil { + return nil, fmt.Errorf("error evaluating matrix for job %s: %w", id, err) + } + } + + matricxes, err := getMatrixes(&evaluatedJob) if err != nil { return nil, fmt.Errorf("getMatrixes: %w", err) } @@ -59,9 +102,9 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) { job.Name = id } job.Strategy.RawMatrix = encodeMatrix(matrix) - evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results, pc.vars, pc.inputs)) + evaluator := NewExpressionEvaluator(NewInterpeter(id, &evaluatedJob, matrix, pc.gitContext, results, pc.vars, pc.inputs)) job.Name = nameWithMatrix(job.Name, matrix, evaluator) - runsOn := origin.GetJob(id).RunsOn() + runsOn := evaluatedJob.RunsOn() for i, v := range runsOn { runsOn[i] = evaluator.Interpolate(v) } @@ -89,6 +132,12 @@ func WithJobResults(results map[string]string) ParseOption { } } +func WithJobOutputs(outputs map[string]map[string]string) ParseOption { + return func(c *parseContext) { + c.jobOutputs = outputs + } +} + func WithGitContext(context *model.GithubContext) ParseOption { return func(c *parseContext) { c.gitContext = context @@ -109,6 +158,7 @@ func WithInputs(inputs map[string]any) ParseOption { type parseContext struct { jobResults map[string]string + jobOutputs map[string]map[string]string gitContext *model.GithubContext vars map[string]string inputs map[string]any diff --git a/modules/actions/jobparser/jobparser_test.go b/modules/actions/jobparser/jobparser_test.go index 51ba70fc2adf1..2e82dfc034260 100644 --- a/modules/actions/jobparser/jobparser_test.go +++ b/modules/actions/jobparser/jobparser_test.go @@ -7,6 +7,8 @@ import ( "strings" "testing" + "github.com/nektos/act/pkg/exprparser" + "github.com/nektos/act/pkg/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v4" @@ -53,6 +55,20 @@ func TestParse(t *testing.T) { options: nil, wantErr: false, }, + { + name: "job_name_with_matrix_dynamic", + options: []ParseOption{ + WithJobResults(map[string]string{ + "job1": "success", + }), + WithJobOutputs(map[string]map[string]string{ + "job1": { + "versions": "[1.17, 1.18, 1.19]", + }, + }), + }, + wantErr: false, + }, { name: "prefixed_newline", options: nil, @@ -85,3 +101,418 @@ func TestParse(t *testing.T) { }) } } + +func TestDeepCopyYamlNode(t *testing.T) { + t.Run("deep_copy_preserves_isolation", func(t *testing.T) { + // Create original node with nested content + original := &yaml.Node{ + Kind: yaml.MappingNode, + Tag: "!!map", + Value: "", + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "key1"}, + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "value1"}, + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "key2"}, + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "value2"}, + }, + } + + // Create deep copy + copied := deepCopyYamlNode(original) + + // Verify copy is not nil + require.NotNil(t, copied) + + // Verify values are equal + assert.Equal(t, original.Kind, copied.Kind) + assert.Equal(t, original.Tag, copied.Tag) + assert.Len(t, original.Content, len(copied.Content)) + + // Verify content pointers are different (isolation) + for i, node := range original.Content { + assert.NotSame(t, node, copied.Content[i], "Content[%d] should be different pointers", i) + assert.Equal(t, node.Value, copied.Content[i].Value, "Content[%d] values should be equal", i) + } + + // Modify the copy and verify original is unaffected + copied.Content[0].Value = "modified" + assert.NotEqual(t, original.Content[0].Value, copied.Content[0].Value) + }) + + t.Run("deep_copy_handles_nil", func(t *testing.T) { + copied := deepCopyYamlNode(nil) + assert.Nil(t, copied) + }) + + t.Run("deep_copy_handles_recursive", func(t *testing.T) { + // Create a nested structure + original := &yaml.Node{ + Kind: yaml.MappingNode, + Tag: "!!map", + Value: "", + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "nested"}, + { + Kind: yaml.MappingNode, + Tag: "!!map", + Value: "", + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "inner"}, + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "data"}, + }, + }, + }, + } + + copied := deepCopyYamlNode(original) + + // Verify deep isolation at all levels + require.NotNil(t, copied) + assert.NotSame(t, original.Content[1], copied.Content[1]) + assert.NotSame(t, original.Content[1].Content[0], copied.Content[1].Content[0]) + + // Modify nested copy and verify original is unaffected + copied.Content[1].Content[0].Value = "modified" + assert.NotEqual(t, original.Content[1].Content[0].Value, copied.Content[1].Content[0].Value) + }) +} + +func TestStrategyIsolationAfterEvaluation(t *testing.T) { + // This test verifies that EvaluateYamlNode mutations on a copied Strategy + // do not affect the original Strategy. This was the root cause of the issue. + t.Run("evaluation_does_not_mutate_original", func(t *testing.T) { + // Create an original job with a matrix + originalJob := &model.Job{ + Strategy: &model.Strategy{ + RawMatrix: yaml.Node{ + Kind: yaml.MappingNode, + Tag: "!!map", + Value: "", + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "version"}, + { + Kind: yaml.SequenceNode, + Tag: "!!seq", + Value: "", + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "${{ fromJson(needs.setup.outputs.versions) }}"}, + }, + }, + }, + }, + }, + } + + // Save the original Content pointer for verification + originalContentPtr := originalJob.Strategy.RawMatrix.Content[1].Content[0] + originalValue := originalContentPtr.Value + + // Simulate what happens in Parse(): shallow copy followed by evaluation + evaluatedJob := *originalJob + if originalJob.Strategy != nil { + stratCopy := *originalJob.Strategy + // This is the fix: deep copy the RawMatrix + stratCopy.RawMatrix = *deepCopyYamlNode(&originalJob.Strategy.RawMatrix) + evaluatedJob.Strategy = &stratCopy + } + + // Create an evaluator and evaluate the matrix + // (In real usage, this would have job outputs and other context) + evaluator := NewExpressionEvaluator(exprparser.NewInterpeter( + &exprparser.EvaluationEnvironment{ + Github: &model.GithubContext{}, + Vars: map[string]string{}, + Inputs: map[string]any{}, + }, + exprparser.Config{}, + )) + + // Evaluate the copied node + _ = evaluator.EvaluateYamlNode(&evaluatedJob.Strategy.RawMatrix) + + // Verify that the original job's matrix is unchanged + assert.Equal(t, originalValue, originalJob.Strategy.RawMatrix.Content[1].Content[0].Value, + "Original job's matrix should not be mutated by evaluation") + + // Verify that they are now different pointers (isolation) + assert.NotSame(t, originalJob.Strategy.RawMatrix.Content[1].Content[0], + evaluatedJob.Strategy.RawMatrix.Content[1].Content[0], + "Evaluated job should have different node pointers") + }) +} + +func TestParseWithMissingJobOutputs(t *testing.T) { + // Test graceful degradation when job outputs are missing + t.Run("missing_job_outputs_degrades_gracefully", func(t *testing.T) { + workflowYAML := ` +name: test-missing-outputs +on: push + +jobs: + setup: + runs-on: ubuntu-latest + strategy: + matrix: + version: [1.0, 2.0] + steps: + - run: echo setup + + build: + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest, windows-latest] + steps: + - run: echo build +` + // Parse without providing job outputs - should gracefully handle + result, err := Parse([]byte(workflowYAML)) + + // Should not error on parse + assert.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result) + }) + + t.Run("empty_job_outputs_map", func(t *testing.T) { + workflowYAML := ` +name: test-empty-outputs +on: push + +jobs: + setup: + runs-on: ubuntu-latest + steps: + - run: echo setup + + build: + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + version: [1.0, 2.0] + steps: + - run: echo build +` + // Parse with empty job outputs + result, err := Parse([]byte(workflowYAML), + WithJobOutputs(map[string]map[string]string{})) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // Should still parse successfully + assert.NotEmpty(t, result) + }) +} + +func TestParseWithNeedsReferenceNoOutputs(t *testing.T) { + // Test references to jobs that have no outputs provided + t.Run("needs_reference_without_outputs", func(t *testing.T) { + workflowYAML := ` +name: test-needs-no-outputs +on: push + +jobs: + setup: + runs-on: ubuntu-latest + steps: + - run: echo setup + + build: + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest, windows-latest] + steps: + - run: echo build +` + // Parse with a needs reference but static matrix only + result, err := Parse([]byte(workflowYAML), + WithJobResults(map[string]string{ + "setup": "success", + })) + + // Should not error on parse + assert.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result) + }) + + t.Run("needs_reference_with_partial_outputs", func(t *testing.T) { + workflowYAML := ` +name: test-partial-outputs +on: push + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + versions: "[1.0, 2.0]" + steps: + - run: echo setup + + build: + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.setup.outputs.versions) }} + os: [ubuntu-latest, windows-latest] + steps: + - run: echo build +` + // Parse with partial outputs provided + result, err := Parse([]byte(workflowYAML), + WithJobOutputs(map[string]map[string]string{ + "setup": { + "versions": "[1.0, 2.0]", + }, + })) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // Should parse successfully + assert.NotEmpty(t, result) + }) +} + +func TestParseWithMixedMatrixValues(t *testing.T) { + // Test matrix with both static arrays and dynamic template expressions + t.Run("static_and_dynamic_matrix_values", func(t *testing.T) { + workflowYAML := ` +name: test-mixed-matrix +on: push + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + versions: "[1.0, 2.0]" + steps: + - run: echo setup + + build: + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + version: ${{ fromJson(needs.setup.outputs.versions) }} + node: [14, 16, 18] + steps: + - run: echo build +` + // Parse with dynamic matrix values + result, err := Parse([]byte(workflowYAML), + WithJobOutputs(map[string]map[string]string{ + "setup": { + "versions": "[1.0, 2.0]", + }, + })) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // Verify we have workflows + assert.NotEmpty(t, result) + + // Check that all three matrix dimensions are present + hasAllDimensions := false + for _, workflow := range result { + id, swfJob := workflow.Job() + if id == "build" { + // In jobparser, we just verify the job was parsed successfully + if swfJob != nil { + // Check strategy has matrix + if swfJob.Strategy.RawMatrix.Kind != 0 { + // All three dimensions should be defined + hasAllDimensions = true + } + } + break + } + } + + assert.True(t, hasAllDimensions, "should have all matrix dimensions") + }) + + t.Run("multiple_dynamic_matrix_values", func(t *testing.T) { + workflowYAML := ` +name: test-multiple-dynamic +on: push + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + versions: "[1.0, 2.0]" + platforms: "[\"linux\", \"darwin\"]" + steps: + - run: echo setup + + build: + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.setup.outputs.versions) }} + platform: ${{ fromJson(needs.setup.outputs.platforms) }} + static: [a, b] + steps: + - run: echo build +` + // Parse with multiple dynamic values + result, err := Parse([]byte(workflowYAML), + WithJobOutputs(map[string]map[string]string{ + "setup": { + "versions": "[1.0, 2.0]", + "platforms": "[\"linux\", \"darwin\"]", + }, + })) + + assert.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result) + }) + + t.Run("all_static_arrays_no_dynamic", func(t *testing.T) { + workflowYAML := ` +name: test-all-static +on: push + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-latest, windows-latest] + version: [1.18, 1.19, 1.20] + node: [14, 16] + steps: + - run: echo build +` + // Parse with all static arrays, no dynamic values + result, err := Parse([]byte(workflowYAML)) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // Should expand correctly + // 2 os * 3 versions * 2 node = 12 combinations + assert.NotEmpty(t, result) + + // Verify matrix structure + for _, workflow := range result { + id, swfJob := workflow.Job() + if id == "build" { + // Verify the job was parsed with a matrix strategy + assert.NotNil(t, swfJob) + assert.NotEqual(t, 0, swfJob.Strategy.RawMatrix.Kind) + break + } + } + }) +} diff --git a/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.in.yaml b/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.in.yaml new file mode 100644 index 0000000000000..8d9efb9c59c8b --- /dev/null +++ b/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.in.yaml @@ -0,0 +1,24 @@ +name: test +jobs: + job1: + runs-on: ubuntu-latest + outputs: + versions: ${{ steps.version-map.outputs.versions }} + steps: + - name: Generate the version map + id: version-map + run: | + echo "versions=[1.17, 1.18, 1.19]" >> $GITHUB_OUTPUT + job2: + needs: job1 + strategy: + matrix: + version: ${{ fromJSON(needs.job1.outputs.versions) }} + os: ubuntu-24.04 + runs-on: ${{ matrix.os }} + name: test_version_${{ matrix.version }}_on_${{ matrix.os }} + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.version }} + - run: uname -a && go version diff --git a/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.out.yaml b/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.out.yaml new file mode 100644 index 0000000000000..58ea9535f5562 --- /dev/null +++ b/modules/actions/jobparser/testdata/job_name_with_matrix_dynamic.out.yaml @@ -0,0 +1,66 @@ +name: test +jobs: + job1: + name: job1 + runs-on: ubuntu-latest + steps: + - id: version-map + name: Generate the version map + run: | + echo "versions=[1.17, 1.18, 1.19]" >> $GITHUB_OUTPUT + outputs: + versions: ${{ steps.version-map.outputs.versions }} +--- +name: test +jobs: + job2: + name: test_version_1.17_on_ubuntu-24.04 + needs: job1 + runs-on: ubuntu-24.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.version }} + - run: uname -a && go version + strategy: + matrix: + os: + - ubuntu-24.04 + version: + - 1.17 +--- +name: test +jobs: + job2: + name: test_version_1.18_on_ubuntu-24.04 + needs: job1 + runs-on: ubuntu-24.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.version }} + - run: uname -a && go version + strategy: + matrix: + os: + - ubuntu-24.04 + version: + - 1.18 +--- +name: test +jobs: + job2: + name: test_version_1.19_on_ubuntu-24.04 + needs: job1 + runs-on: ubuntu-24.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.version }} + - run: uname -a && go version + strategy: + matrix: + os: + - ubuntu-24.04 + version: + - 1.19 diff --git a/modules/issue/template/unmarshal.go b/modules/issue/template/unmarshal.go index ceab6babf4c44..2abf72fc7c8a2 100644 --- a/modules/issue/template/unmarshal.go +++ b/modules/issue/template/unmarshal.go @@ -14,7 +14,7 @@ import ( api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // CouldBe indicates a file with the filename could be a template, diff --git a/modules/label/parser.go b/modules/label/parser.go index 2a10152062649..635d319614b45 100644 --- a/modules/label/parser.go +++ b/modules/label/parser.go @@ -10,7 +10,7 @@ import ( "code.gitea.io/gitea/modules/options" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) type labelFile struct { diff --git a/modules/markup/markdown/convertyaml.go b/modules/markup/markdown/convertyaml.go index 04664a9c1db4f..3034fae2bcb8c 100644 --- a/modules/markup/markdown/convertyaml.go +++ b/modules/markup/markdown/convertyaml.go @@ -11,7 +11,7 @@ import ( "github.com/yuin/goldmark/ast" east "github.com/yuin/goldmark/extension/ast" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func nodeToTable(meta *yaml.Node) ast.Node { diff --git a/modules/markup/markdown/meta.go b/modules/markup/markdown/meta.go index 6ddd892110bc8..ecc0b81098c45 100644 --- a/modules/markup/markdown/meta.go +++ b/modules/markup/markdown/meta.go @@ -9,7 +9,7 @@ import ( "unicode" "unicode/utf8" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func isYAMLSeparator(line []byte) bool { diff --git a/modules/markup/markdown/renderconfig.go b/modules/markup/markdown/renderconfig.go index d8b1b10ce632a..19e15f4886295 100644 --- a/modules/markup/markdown/renderconfig.go +++ b/modules/markup/markdown/renderconfig.go @@ -10,7 +10,7 @@ import ( "code.gitea.io/gitea/modules/markup" "github.com/yuin/goldmark/ast" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // RenderConfig represents rendering configuration for this file diff --git a/modules/markup/markdown/renderconfig_test.go b/modules/markup/markdown/renderconfig_test.go index 53c52177a7512..84e4ac33f664d 100644 --- a/modules/markup/markdown/renderconfig_test.go +++ b/modules/markup/markdown/renderconfig_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func TestRenderConfig_UnmarshalYAML(t *testing.T) { diff --git a/modules/migration/file_format.go b/modules/migration/file_format.go index fd6ac45a21073..86a1aa90a2af4 100644 --- a/modules/migration/file_format.go +++ b/modules/migration/file_format.go @@ -13,7 +13,7 @@ import ( "code.gitea.io/gitea/modules/log" "github.com/santhosh-tekuri/jsonschema/v6" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // schemaLoader implements jsonschema.URLLoader diff --git a/modules/optional/serialization.go b/modules/optional/serialization.go index 345ce562686ed..f9174053bfda8 100644 --- a/modules/optional/serialization.go +++ b/modules/optional/serialization.go @@ -6,7 +6,7 @@ package optional import ( "code.gitea.io/gitea/modules/json" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func (o *Option[T]) UnmarshalJSON(data []byte) error { diff --git a/modules/optional/serialization_test.go b/modules/optional/serialization_test.go index c059294bbb99d..12bf747d11d05 100644 --- a/modules/optional/serialization_test.go +++ b/modules/optional/serialization_test.go @@ -11,7 +11,7 @@ import ( "code.gitea.io/gitea/modules/optional" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) type testSerializationStruct struct { diff --git a/modules/packages/helm/metadata.go b/modules/packages/helm/metadata.go index 421fc5e7259ca..b5e9299458e79 100644 --- a/modules/packages/helm/metadata.go +++ b/modules/packages/helm/metadata.go @@ -13,7 +13,7 @@ import ( "code.gitea.io/gitea/modules/validation" "github.com/hashicorp/go-version" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) var ( diff --git a/modules/packages/pub/metadata.go b/modules/packages/pub/metadata.go index a2cf6b728ab9f..00d24e8483868 100644 --- a/modules/packages/pub/metadata.go +++ b/modules/packages/pub/metadata.go @@ -14,7 +14,7 @@ import ( "code.gitea.io/gitea/modules/validation" "github.com/hashicorp/go-version" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) var ( diff --git a/modules/packages/rubygems/metadata.go b/modules/packages/rubygems/metadata.go index 5c01abc74358b..fc6c75840f84f 100644 --- a/modules/packages/rubygems/metadata.go +++ b/modules/packages/rubygems/metadata.go @@ -14,7 +14,7 @@ import ( "code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/modules/validation" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) var ( diff --git a/modules/structs/issue.go b/modules/structs/issue.go index fd29727a4365e..7c230bafa9f31 100644 --- a/modules/structs/issue.go +++ b/modules/structs/issue.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // StateType issue state type diff --git a/modules/structs/issue_test.go b/modules/structs/issue_test.go index 55bd01df49631..d53077a151d57 100644 --- a/modules/structs/issue_test.go +++ b/modules/structs/issue_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func TestIssueTemplate_Type(t *testing.T) { @@ -95,7 +95,7 @@ labels: t.Run(tt.name, func(t *testing.T) { err := yaml.Unmarshal([]byte(tt.content), tt.tmpl) if tt.wantErr != "" { - assert.EqualError(t, err, tt.wantErr) + assert.ErrorContains(t, err, tt.wantErr) } else { assert.NoError(t, err) assert.Equal(t, tt.want, tt.tmpl) diff --git a/routers/api/packages/helm/helm.go b/routers/api/packages/helm/helm.go index 4c1b72d5c010b..f49130ad723df 100644 --- a/routers/api/packages/helm/helm.go +++ b/routers/api/packages/helm/helm.go @@ -23,7 +23,7 @@ import ( "code.gitea.io/gitea/services/context" packages_service "code.gitea.io/gitea/services/packages" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func apiError(ctx *context.Context, status int, obj any) { diff --git a/routers/web/devtest/mail_preview.go b/routers/web/devtest/mail_preview.go index 7b1787d52bc9e..9e2c51153ec1b 100644 --- a/routers/web/devtest/mail_preview.go +++ b/routers/web/devtest/mail_preview.go @@ -12,7 +12,7 @@ import ( "code.gitea.io/gitea/services/context" "code.gitea.io/gitea/services/mailer" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func MailPreviewRender(ctx *context.Context) { diff --git a/routers/web/web.go b/routers/web/web.go index 61d1fdc1421a8..facb70c37c078 100644 --- a/routers/web/web.go +++ b/routers/web/web.go @@ -41,6 +41,7 @@ import ( "code.gitea.io/gitea/routers/web/user" user_setting "code.gitea.io/gitea/routers/web/user/setting" "code.gitea.io/gitea/routers/web/user/setting/security" + actions_service "code.gitea.io/gitea/services/actions" auth_service "code.gitea.io/gitea/services/auth" "code.gitea.io/gitea/services/context" "code.gitea.io/gitea/services/forms" @@ -286,6 +287,8 @@ func Routes() *web.Router { if setting.Metrics.Enabled { prometheus.MustRegister(metrics.NewCollector()) + // Register matrix re-evaluation metrics + prometheus.MustRegister(actions_service.NewMatrixMetricsCollector()) routes.Get("/metrics", append(mid, Metrics)...) } diff --git a/services/actions/job_emitter.go b/services/actions/job_emitter.go index 20a4f81eabb91..d1099e207df1b 100644 --- a/services/actions/job_emitter.go +++ b/services/actions/job_emitter.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "time" actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" @@ -202,6 +203,9 @@ func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, up if err != nil { return nil, nil, err } + + log.Debug("Checking %d jobs for run %d (status: %s)", len(jobs), run.ID, run.Status) + vars, err := actions_model.GetVariablesOfRun(ctx, run) if err != nil { return nil, nil, err @@ -213,14 +217,18 @@ func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, up } updates := newJobStatusResolver(jobs, vars).Resolve(ctx) + log.Debug("Job status resolver returned %d job status updates for run %d", len(updates), run.ID) + for _, job := range jobs { if status, ok := updates[job.ID]; ok { + oldStatus := job.Status job.Status = status if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil { return err } else if n != 1 { return fmt.Errorf("no affected for updating blocked job %v", job.ID) } + log.Debug("Job %d (JobID: %s) status updated: %s -> %s", job.ID, job.JobID, oldStatus, status) updatedJobs = append(updatedJobs, job) } } @@ -229,6 +237,20 @@ func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, up return nil, nil, err } + // Reload jobs from the database to pick up any newly created matrix jobs + oldJobCount := len(jobs) + jobs, err = db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID}) + if err != nil { + return nil, nil, err + } + + if len(jobs) > oldJobCount { + log.Info("Matrix re-evaluation created %d new jobs for run %d (was %d, now %d)", + len(jobs)-oldJobCount, run.ID, oldJobCount, len(jobs)) + } + + log.Debug("Job check completed for run %d: %d jobs updated, %d total jobs", run.ID, len(updatedJobs), len(jobs)) + return jobs, updatedJobs, nil } @@ -313,47 +335,107 @@ func (r *jobStatusResolver) resolveJobHasIfCondition(actionRunJob *actions_model func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model.Status { ret := map[int64]actions_model.Status{} + resolveMetrics := struct { + totalBlocked int + matrixReevaluated int + concurrencyUpdated int + jobsStarted int + jobsSkipped int + }{} + for id, status := range r.statuses { actionRunJob := r.jobMap[id] if status != actions_model.StatusBlocked { continue } + + resolveMetrics.totalBlocked++ + log.Debug("Resolving blocked job %d (JobID: %s, RunID: %d)", id, actionRunJob.JobID, actionRunJob.RunID) + allDone, allSucceed := r.resolveCheckNeeds(id) if !allDone { + log.Debug("Job %d: not all dependencies completed yet", id) continue } + log.Debug("Job %d: all dependencies completed (allSucceed: %v), checking matrix re-evaluation", id, allSucceed) + + // Try to re-evaluate the matrix with job outputs if it depends on them + startTime := time.Now() + newMatrixJobs, err := ReEvaluateMatrixForJobWithNeeds(ctx, actionRunJob, r.vars) + duration := time.Since(startTime).Milliseconds() + + if err != nil { + log.Error("Matrix re-evaluation error for job %d (JobID: %s): %v (duration: %dms)", id, actionRunJob.JobID, err, duration) + continue + } + + // If new matrix jobs were created, add them to the resolver and continue + if len(newMatrixJobs) > 0 { + resolveMetrics.matrixReevaluated++ + log.Info("Matrix re-evaluation succeeded for job %d (JobID: %s): created %d new jobs (duration: %dms)", + id, actionRunJob.JobID, len(newMatrixJobs), duration) + + // Mark the original matrix placeholder job as skipped so it won't be run later. + actionRunJob.Status = actions_model.StatusSkipped + if _, err := db.GetEngine(ctx).ID(actionRunJob.ID).Cols("status").Update(actionRunJob); err != nil { + log.Error("Failed to mark matrix placeholder job %d (JobID: %s) as skipped after re-evaluation: %v", id, actionRunJob.JobID, err) + } + continue + } + + log.Debug("Job %d: no matrix re-evaluation needed or result is empty", id) + // update concurrency and check whether the job can run now - err := updateConcurrencyEvaluationForJobWithNeeds(ctx, actionRunJob, r.vars) + err = updateConcurrencyEvaluationForJobWithNeeds(ctx, actionRunJob, r.vars) if err != nil { // The err can be caused by different cases: database error, or syntax error, or the needed jobs haven't completed // At the moment there is no way to distinguish them. // Actually, for most cases, the error is caused by "syntax error" / "the needed jobs haven't completed (skipped?)" // TODO: if workflow or concurrency expression has syntax error, there should be a user error message, need to show it to end users - log.Debug("updateConcurrencyEvaluationForJobWithNeeds failed, this job will stay blocked: job: %d, err: %v", id, err) + log.Debug("Concurrency evaluation failed for job %d (JobID: %s): %v (job will stay blocked)", id, actionRunJob.JobID, err) continue } + resolveMetrics.concurrencyUpdated++ + shouldStartJob := true if !allSucceed { // Not all dependent jobs completed successfully: // * if the job has "if" condition, it can be started, then the act_runner will evaluate the "if" condition. // * otherwise, the job should be skipped. shouldStartJob = r.resolveJobHasIfCondition(actionRunJob) + log.Debug("Job %d: not all dependencies succeeded. Has if-condition: %v, should start: %v", id, shouldStartJob, shouldStartJob) } newStatus := util.Iif(shouldStartJob, actions_model.StatusWaiting, actions_model.StatusSkipped) if newStatus == actions_model.StatusWaiting { newStatus, err = PrepareToStartJobWithConcurrency(ctx, actionRunJob) if err != nil { - log.Error("ShouldBlockJobByConcurrency failed, this job will stay blocked: job: %d, err: %v", id, err) + log.Error("Concurrency check failed for job %d (JobID: %s): %v (job will stay blocked)", id, actionRunJob.JobID, err) } } if newStatus != actions_model.StatusBlocked { ret[id] = newStatus + switch newStatus { + case actions_model.StatusWaiting: + resolveMetrics.jobsStarted++ + log.Info("Job %d (JobID: %s) transitioned to StatusWaiting", id, actionRunJob.JobID) + case actions_model.StatusSkipped: + resolveMetrics.jobsSkipped++ + log.Info("Job %d (JobID: %s) transitioned to StatusSkipped", id, actionRunJob.JobID) + } } } + + // Log resolution metrics summary + if resolveMetrics.totalBlocked > 0 { + log.Debug("Job resolution summary: total_blocked=%d, matrix_reevaluated=%d, concurrency_updated=%d, jobs_started=%d, jobs_skipped=%d", + resolveMetrics.totalBlocked, resolveMetrics.matrixReevaluated, resolveMetrics.concurrencyUpdated, + resolveMetrics.jobsStarted, resolveMetrics.jobsSkipped) + } + return ret } diff --git a/services/actions/matrix.go b/services/actions/matrix.go new file mode 100644 index 0000000000000..b05cd5686ee31 --- /dev/null +++ b/services/actions/matrix.go @@ -0,0 +1,368 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + "errors" + "fmt" + "maps" + "strings" + "time" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/log" + + "go.yaml.in/yaml/v4" +) + +// ExtractRawStrategies extracts strategy definitions from the raw workflow content +// Returns a map of jobID to strategy YAML for jobs that have matrix dependencies +func ExtractRawStrategies(content []byte) (map[string]string, error) { + var workflowDef struct { + Jobs map[string]struct { + Strategy any `yaml:"strategy"` + Needs any `yaml:"needs"` + } `yaml:"jobs"` + } + + if err := yaml.Unmarshal(content, &workflowDef); err != nil { + return nil, err + } + + strategies := make(map[string]string) + for jobID, jobDef := range workflowDef.Jobs { + if jobDef.Strategy == nil { + continue + } + + // Check if this job has needs (dependencies) + var needsList []string + switch needs := jobDef.Needs.(type) { + case string: + needsList = append(needsList, needs) + case []any: + for _, need := range needs { + if needStr, ok := need.(string); ok { + needsList = append(needsList, needStr) + } + } + } + + // Only store strategy for jobs with dependencies + if len(needsList) > 0 { + if strategyBytes, err := yaml.Marshal(jobDef.Strategy); err == nil { + strategies[jobID] = string(strategyBytes) + } + } + } + + return strategies, nil +} + +// HasMatrixWithNeeds checks if a job's strategy contains a matrix that depends on job outputs +func HasMatrixWithNeeds(rawStrategy string) bool { + if rawStrategy == "" { + return false + } + + var strategy map[string]any + if err := yaml.Unmarshal([]byte(rawStrategy), &strategy); err != nil { + return false + } + + matrix, ok := strategy["matrix"] + if !ok { + return false + } + + // Check if any matrix value contains "needs." reference + matrixStr := fmt.Sprintf("%v", matrix) + return strings.Contains(matrixStr, "needs.") +} + +// ReEvaluateMatrixForJobWithNeeds re-evaluates the matrix strategy of a job using outputs from dependent jobs +// If the matrix depends on job outputs and all dependent jobs are done, it will: +// 1. Evaluate the matrix with the job outputs +// 2. Create new ActionRunJobs for each matrix combination +// 3. Return the newly created jobs +func ReEvaluateMatrixForJobWithNeeds(ctx context.Context, job *actions_model.ActionRunJob, vars map[string]string) ([]*actions_model.ActionRunJob, error) { + startTime := time.Now() + + if job.IsMatrixEvaluated || job.RawStrategy == "" { + return nil, nil + } + + if !HasMatrixWithNeeds(job.RawStrategy) { + // Mark as evaluated since there's no needs-dependent matrix and persist to DB + job.IsMatrixEvaluated = true + if _, err := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated"); err != nil { + log.Error("Failed to persist is_matrix_evaluated flag for job %d: %v", job.ID, err) + } + log.Debug("Matrix re-evaluation skipped for job %d: no needs-dependent matrix found", job.ID) + return nil, nil + } + + log.Debug("Starting matrix re-evaluation for job %d (JobID: %s)", job.ID, job.JobID) + + // Get the outputs from dependent jobs + taskNeeds, err := FindTaskNeeds(ctx, job) + if err != nil { + errMsg := fmt.Sprintf("failed to find task needs for job %d (JobID: %s): %v", job.ID, job.JobID, err) + log.Error("Matrix re-evaluation error: %s", errMsg) + return nil, fmt.Errorf("find task needs: %w", err) + } + + log.Debug("Found %d task needs for job %d (JobID: %s)", len(taskNeeds), job.ID, job.JobID) + + // If any task needs are not done, we can't evaluate yet + pendingNeeds := []string{} + for jobID, taskNeed := range taskNeeds { + if !taskNeed.Result.IsDone() { + pendingNeeds = append(pendingNeeds, fmt.Sprintf("%s(%s)", jobID, taskNeed.Result)) + } + } + if len(pendingNeeds) > 0 { + log.Debug("Matrix re-evaluation deferred for job %d: pending needs: %v", job.ID, pendingNeeds) + GetMatrixMetrics().RecordDeferred() + return nil, nil + } + + // Merge vars with needs outputs + mergedVars := mergeNeedsIntoVars(vars, taskNeeds) + log.Debug("Merged %d variables with needs outputs for job %d", len(mergedVars), job.ID) + + // Load the original run to get workflow context + if job.Run == nil { + if err := job.LoadRun(ctx); err != nil { + errMsg := fmt.Sprintf("failed to load run for job %d (JobID: %s): %v", job.ID, job.JobID, err) + log.Error("Matrix re-evaluation error: %s", errMsg) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, fmt.Errorf("load run: %w", err) + } + } + + // Verify run is not nil after loading + if job.Run == nil { + errMsg := fmt.Sprintf("run is nil for job %d (JobID: %s) after loading", job.ID, job.JobID) + log.Error("Matrix re-evaluation error: %s", errMsg) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, errors.New("run not found: nil run") + } + + // Load run attributes (TriggerUser, Repo, etc.) + if err := job.Run.LoadAttributes(ctx); err != nil { + errMsg := fmt.Sprintf("failed to load run attributes for job %d (JobID: %s): %v", job.ID, job.JobID, err) + log.Error("Matrix re-evaluation error: %s", errMsg) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, fmt.Errorf("load run attributes: %w", err) + } + + // Create the giteaCtx for expression evaluation + giteaCtx := GenerateGiteaContext(job.Run, job) + + // Convert taskNeeds to job outputs format for jobparser + jobOutputs := make(map[string]map[string]string) + jobResults := make(map[string]string) + for jobID, taskNeed := range taskNeeds { + jobOutputs[jobID] = taskNeed.Outputs + jobResults[jobID] = taskNeed.Result.String() + } + + // We need to construct a workflow that includes both this job AND its dependencies + // so that the jobparser can resolve needs.*.outputs.* expressions + workflowYAML, err := constructWorkflowWithNeeds(job, taskNeeds) + if err != nil { + // If we can't construct the workflow, we can't expand the matrix + // Mark as evaluated and skip the job to prevent the placeholder from running unexpanded + job.IsMatrixEvaluated = true + job.Status = actions_model.StatusSkipped + if _, dbErr := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated", "status"); dbErr != nil { + log.Error("Failed to persist is_matrix_evaluated/status flag for job %d after workflow construction failure: %v", job.ID, dbErr) + } + log.Error("Failed to construct workflow for job %d (JobID: %s): %v, marking as skipped", job.ID, job.JobID, err) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, err + } + + // Parse the constructed workflow with job outputs to expand the matrix + parseStartTime := time.Now() + jobs, err := jobparser.Parse( + workflowYAML, + jobparser.WithVars(mergedVars), + jobparser.WithGitContext(giteaCtx.ToGitHubContext()), + jobparser.WithJobOutputs(jobOutputs), + jobparser.WithJobResults(jobResults), + ) + parseTime := time.Since(parseStartTime) + GetMatrixMetrics().RecordParseTime(parseTime) + + if err != nil { + // If parsing fails, we can't expand the matrix + // Mark as evaluated and persist to DB to avoid repeated parse attempts + job.IsMatrixEvaluated = true + if _, dbErr := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated"); dbErr != nil { + log.Error("Failed to persist is_matrix_evaluated flag for job %d after parse failure: %v", job.ID, dbErr) + } + errMsg := fmt.Sprintf("failed to parse workflow payload for job %d (JobID: %s) during matrix expansion. Error: %v. RawStrategy: %s", + job.ID, job.JobID, err, job.RawStrategy) + log.Error("Matrix parse error: %s", errMsg) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, nil + } + + if len(jobs) == 0 { + // No jobs generated - mark as evaluated and skip the placeholder job + job.IsMatrixEvaluated = true + job.Status = actions_model.StatusSkipped + if _, err := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated", "status"); err != nil { + log.Error("Failed to persist is_matrix_evaluated/status flag for job %d: %v", job.ID, err) + } + log.Debug("No jobs generated from matrix expansion for job %d (JobID: %s), marking as skipped", job.ID, job.JobID) + return nil, nil + } + + log.Debug("Parsed %d matrix combinations for job %d (JobID: %s)", len(jobs), job.ID, job.JobID) + + // Create new ActionRunJobs for each parsed workflow (each matrix combination) + newJobs := make([]*actions_model.ActionRunJob, 0) + + for i, parsedSingleWorkflow := range jobs { + id, jobDef := parsedSingleWorkflow.Job() + if jobDef == nil { + log.Warn("Skipped nil jobDef at index %d for job %d (JobID: %s)", i, job.ID, job.JobID) + continue + } + + // Skip the original job ID - we only want the matrix-expanded versions + if id == job.JobID { + log.Debug("Skipped original job ID %s in matrix expansion for job %d", id, job.ID) + continue + } + + // Erase needs from the payload before storing + needs := jobDef.Needs() + if err := parsedSingleWorkflow.SetJob(id, jobDef.EraseNeeds()); err != nil { + log.Error("Failed to erase needs from job %s (matrix expansion for job %d): %v", id, job.ID, err) + continue + } + + payload, _ := parsedSingleWorkflow.Marshal() + + newJob := &actions_model.ActionRunJob{ + RunID: job.RunID, + RepoID: job.RepoID, + OwnerID: job.OwnerID, + CommitSHA: job.CommitSHA, + IsForkPullRequest: job.IsForkPullRequest, + Name: jobDef.Name, + WorkflowPayload: payload, + JobID: id, + Needs: needs, + RunsOn: jobDef.RunsOn(), + Status: actions_model.StatusBlocked, + } + + newJobs = append(newJobs, newJob) + } + + // If no new jobs were created, mark as evaluated, skip the placeholder job, and persist to DB + if len(newJobs) == 0 { + job.IsMatrixEvaluated = true + job.Status = actions_model.StatusSkipped + if _, err := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated", "status"); err != nil { + log.Error("Failed to persist is_matrix_evaluated/status flag for job %d: %v", job.ID, err) + } + log.Warn("No valid jobs created from matrix expansion for job %d (JobID: %s). Original jobs: %d, marking as skipped", job.ID, job.JobID, len(jobs)) + return nil, nil + } + + // Insert the new jobs into database + insertStartTime := time.Now() + if err := actions_model.InsertActionRunJobs(ctx, newJobs); err != nil { + insertTime := time.Since(insertStartTime) + GetMatrixMetrics().RecordInsertTime(insertTime) + errMsg := fmt.Sprintf("failed to insert %d new matrix jobs for job %d (JobID: %s): %v", len(newJobs), job.ID, job.JobID, err) + log.Error("Matrix insertion error: %s", errMsg) + GetMatrixMetrics().RecordReevaluation(time.Since(startTime), false, 0) + return nil, fmt.Errorf("insert new jobs: %w", err) + } + insertTime := time.Since(insertStartTime) + GetMatrixMetrics().RecordInsertTime(insertTime) + + // Mark the original placeholder job as evaluated and skipped so it is never run + job.IsMatrixEvaluated = true + job.Status = actions_model.StatusSkipped + if _, err := actions_model.UpdateRunJob(ctx, job, nil, "is_matrix_evaluated", "status"); err != nil { + log.Error("Failed to update job %d after matrix expansion (is_matrix_evaluated/status): %v", job.ID, err) + } + + totalTime := time.Since(startTime) + GetMatrixMetrics().RecordReevaluation(totalTime, true, int64(len(newJobs))) + + log.Info("Successfully completed matrix re-evaluation for job %d (JobID: %s): created %d new jobs from %d matrix combinations (total: %dms, parse: %dms, insert: %dms)", + job.ID, job.JobID, len(newJobs), len(jobs), totalTime.Milliseconds(), parseTime.Milliseconds(), insertTime.Milliseconds()) + + return newJobs, nil +} + +// mergeNeedsIntoVars converts task needs outputs into variables for expression evaluation +func mergeNeedsIntoVars(baseVars map[string]string, taskNeeds map[string]*TaskNeed) map[string]string { + merged := make(map[string]string) + + // Copy base vars + maps.Copy(merged, baseVars) + + // Add needs outputs as variables in format: needs..outputs. + for jobID, taskNeed := range taskNeeds { + for outputKey, outputValue := range taskNeed.Outputs { + key := fmt.Sprintf("needs.%s.outputs.%s", jobID, outputKey) + merged[key] = outputValue + } + } + + return merged +} + +// constructWorkflowWithNeeds creates a workflow YAML that includes the target job +// and stub definitions for its dependencies so the jobparser can resolve needs.*.outputs expressions +func constructWorkflowWithNeeds(job *actions_model.ActionRunJob, taskNeeds map[string]*TaskNeed) ([]byte, error) { + // Parse the original job's workflow payload to get the job definition + var jobWorkflow map[string]any + if err := yaml.Unmarshal(job.WorkflowPayload, &jobWorkflow); err != nil { + return nil, fmt.Errorf("unmarshal job workflow: %w", err) + } + + // Extract the job definition from the parsed workflow + jobsSection, ok := jobWorkflow["jobs"].(map[string]any) + if !ok { + return nil, errors.New("invalid jobs section in workflow") + } + + // Create a new workflow with the target job and stub jobs for dependencies + newJobs := make(map[string]any) + + // Add stub jobs for each dependency with their outputs + for needJobID, taskNeed := range taskNeeds { + stubJob := map[string]any{ + "runs-on": "ubuntu-latest", + "outputs": taskNeed.Outputs, + "steps": []any{}, + } + newJobs[needJobID] = stubJob + } + + // Add the actual job we want to expand (with matrix and needs) + maps.Copy(newJobs, jobsSection) + + // Construct the full workflow + workflow := map[string]any{ + "name": "matrix-expansion", + "on": "push", + "jobs": newJobs, + } + + return yaml.Marshal(workflow) +} diff --git a/services/actions/matrix_metrics.go b/services/actions/matrix_metrics.go new file mode 100644 index 0000000000000..7c9c120e56913 --- /dev/null +++ b/services/actions/matrix_metrics.go @@ -0,0 +1,161 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "sync" + "time" +) + +// MatrixMetrics tracks performance metrics for matrix re-evaluation operations +type MatrixMetrics struct { + mu sync.RWMutex + + // Counters + TotalReevaluations int64 + SuccessfulReevaluations int64 + FailedReevaluations int64 + JobsCreatedTotal int64 + DeferredReevaluations int64 + + // Timing + TotalReevaluationTime time.Duration + TotalParseTime time.Duration + TotalInsertTime time.Duration + + // Histograms (for detailed analysis) + ReevaluationTimes []time.Duration + ParseTimes []time.Duration + InsertTimes []time.Duration +} + +var ( + matrixMetricsInstance *MatrixMetrics + metricsOnce sync.Once +) + +// GetMatrixMetrics returns the global matrix metrics instance +func GetMatrixMetrics() *MatrixMetrics { + metricsOnce.Do(func() { + matrixMetricsInstance = &MatrixMetrics{ + ReevaluationTimes: make([]time.Duration, 0, 1000), + ParseTimes: make([]time.Duration, 0, 1000), + InsertTimes: make([]time.Duration, 0, 1000), + } + }) + return matrixMetricsInstance +} + +// appendToHistogram appends a duration to a histogram with rolling window (keep last 1000) +func appendToHistogram(histogram *[]time.Duration, duration time.Duration) { + if len(*histogram) < 1000 { + *histogram = append(*histogram, duration) + } else { + // Shift and add new value + copy(*histogram, (*histogram)[1:]) + (*histogram)[len(*histogram)-1] = duration + } +} + +// RecordReevaluation records a matrix re-evaluation attempt +func (m *MatrixMetrics) RecordReevaluation(duration time.Duration, success bool, jobsCreated int64) { + m.mu.Lock() + defer m.mu.Unlock() + + m.TotalReevaluations++ + m.TotalReevaluationTime += duration + + if success { + m.SuccessfulReevaluations++ + m.JobsCreatedTotal += jobsCreated + } else { + m.FailedReevaluations++ + } + + appendToHistogram(&m.ReevaluationTimes, duration) +} + +// RecordDeferred records a deferred matrix re-evaluation +func (m *MatrixMetrics) RecordDeferred() { + m.mu.Lock() + defer m.mu.Unlock() + m.DeferredReevaluations++ +} + +// RecordParseTime records the time taken to parse a workflow +func (m *MatrixMetrics) RecordParseTime(duration time.Duration) { + m.mu.Lock() + defer m.mu.Unlock() + + m.TotalParseTime += duration + appendToHistogram(&m.ParseTimes, duration) +} + +// RecordInsertTime records the time taken to insert matrix jobs +func (m *MatrixMetrics) RecordInsertTime(duration time.Duration) { + m.mu.Lock() + defer m.mu.Unlock() + + m.TotalInsertTime += duration + appendToHistogram(&m.InsertTimes, duration) +} + +// GetStats returns a snapshot of the current metrics +func (m *MatrixMetrics) GetStats() map[string]any { + m.mu.RLock() + defer m.mu.RUnlock() + + avgReevaluationTime := time.Duration(0) + if m.TotalReevaluations > 0 { + avgReevaluationTime = m.TotalReevaluationTime / time.Duration(m.TotalReevaluations) + } + + avgParseTime := time.Duration(0) + if len(m.ParseTimes) > 0 { + avgParseTime = m.TotalParseTime / time.Duration(len(m.ParseTimes)) + } + + avgInsertTime := time.Duration(0) + if len(m.InsertTimes) > 0 { + avgInsertTime = m.TotalInsertTime / time.Duration(len(m.InsertTimes)) + } + + successRate := 0.0 + if m.TotalReevaluations > 0 { + successRate = float64(m.SuccessfulReevaluations) / float64(m.TotalReevaluations) * 100 + } + + return map[string]any{ + "total_reevaluations": m.TotalReevaluations, + "successful_reevaluations": m.SuccessfulReevaluations, + "failed_reevaluations": m.FailedReevaluations, + "deferred_reevaluations": m.DeferredReevaluations, + "success_rate_percent": successRate, + "total_jobs_created": m.JobsCreatedTotal, + "total_reevaluation_time_ms": m.TotalReevaluationTime.Milliseconds(), + "avg_reevaluation_time_ms": avgReevaluationTime.Milliseconds(), + "total_parse_time_ms": m.TotalParseTime.Milliseconds(), + "avg_parse_time_ms": avgParseTime.Milliseconds(), + "total_insert_time_ms": m.TotalInsertTime.Milliseconds(), + "avg_insert_time_ms": avgInsertTime.Milliseconds(), + } +} + +// Reset clears all metrics +func (m *MatrixMetrics) Reset() { + m.mu.Lock() + defer m.mu.Unlock() + + m.TotalReevaluations = 0 + m.SuccessfulReevaluations = 0 + m.FailedReevaluations = 0 + m.JobsCreatedTotal = 0 + m.DeferredReevaluations = 0 + m.TotalReevaluationTime = 0 + m.TotalParseTime = 0 + m.TotalInsertTime = 0 + m.ReevaluationTimes = m.ReevaluationTimes[:0] + m.ParseTimes = m.ParseTimes[:0] + m.InsertTimes = m.InsertTimes[:0] +} diff --git a/services/actions/matrix_metrics_prometheus.go b/services/actions/matrix_metrics_prometheus.go new file mode 100644 index 0000000000000..eb27db3bb887a --- /dev/null +++ b/services/actions/matrix_metrics_prometheus.go @@ -0,0 +1,119 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// MatrixMetricsCollector implements the prometheus.Collector interface +// and exposes matrix re-evaluation metrics for prometheus +type MatrixMetricsCollector struct { + // Counters + totalReevaluations prometheus.Gauge + successfulReevaluations prometheus.Gauge + failedReevaluations prometheus.Gauge + deferredReevaluations prometheus.Gauge + jobsCreatedTotal prometheus.Gauge + + // Timing (in milliseconds) + totalReevaluationTime prometheus.Gauge + avgReevaluationTime prometheus.Gauge + totalParseTime prometheus.Gauge + avgParseTime prometheus.Gauge + totalInsertTime prometheus.Gauge + avgInsertTime prometheus.Gauge + + // Rates + successRate prometheus.Gauge +} + +const ( + namespace = "gitea" + subsystem = "matrix" +) + +// newMatrixGauge creates a new Prometheus Gauge with standard matrix metrics naming +func newMatrixGauge(name, help string) prometheus.Gauge { + return prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: help, + }, + ) +} + +// NewMatrixMetricsCollector creates a new MatrixMetricsCollector +func NewMatrixMetricsCollector() *MatrixMetricsCollector { + return &MatrixMetricsCollector{ + totalReevaluations: newMatrixGauge("total_reevaluations", "Total number of matrix re-evaluation attempts"), + successfulReevaluations: newMatrixGauge("successful_reevaluations", "Number of successful matrix re-evaluations"), + failedReevaluations: newMatrixGauge("failed_reevaluations", "Number of failed matrix re-evaluations"), + deferredReevaluations: newMatrixGauge("deferred_reevaluations", "Number of deferred matrix re-evaluations (waiting for dependencies)"), + jobsCreatedTotal: newMatrixGauge("jobs_created_total", "Total number of jobs created from matrix expansion"), + totalReevaluationTime: newMatrixGauge("total_reevaluation_time_ms", "Total time spent on matrix re-evaluations in milliseconds"), + avgReevaluationTime: newMatrixGauge("avg_reevaluation_time_ms", "Average time per matrix re-evaluation in milliseconds"), + totalParseTime: newMatrixGauge("total_parse_time_ms", "Total time spent parsing workflow payloads in milliseconds"), + avgParseTime: newMatrixGauge("avg_parse_time_ms", "Average time per workflow parse in milliseconds"), + totalInsertTime: newMatrixGauge("total_insert_time_ms", "Total time spent inserting jobs into database in milliseconds"), + avgInsertTime: newMatrixGauge("avg_insert_time_ms", "Average time per database insert in milliseconds"), + successRate: newMatrixGauge("success_rate_percent", "Success rate of matrix re-evaluations as percentage (0-100)"), + } +} + +// Describe returns the metrics descriptions +func (c *MatrixMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + c.totalReevaluations.Describe(ch) + c.successfulReevaluations.Describe(ch) + c.failedReevaluations.Describe(ch) + c.deferredReevaluations.Describe(ch) + c.jobsCreatedTotal.Describe(ch) + c.totalReevaluationTime.Describe(ch) + c.avgReevaluationTime.Describe(ch) + c.totalParseTime.Describe(ch) + c.avgParseTime.Describe(ch) + c.totalInsertTime.Describe(ch) + c.avgInsertTime.Describe(ch) + c.successRate.Describe(ch) +} + +// Collect collects the current metric values and sends them to the channel +func (c *MatrixMetricsCollector) Collect(ch chan<- prometheus.Metric) { + metrics := GetMatrixMetrics() + stats := metrics.GetStats() + + // Set counter values + c.totalReevaluations.Set(float64(stats["total_reevaluations"].(int64))) + c.successfulReevaluations.Set(float64(stats["successful_reevaluations"].(int64))) + c.failedReevaluations.Set(float64(stats["failed_reevaluations"].(int64))) + c.deferredReevaluations.Set(float64(stats["deferred_reevaluations"].(int64))) + c.jobsCreatedTotal.Set(float64(stats["total_jobs_created"].(int64))) + + // Set timing values (already in milliseconds) + c.totalReevaluationTime.Set(float64(stats["total_reevaluation_time_ms"].(int64))) + c.avgReevaluationTime.Set(float64(stats["avg_reevaluation_time_ms"].(int64))) + c.totalParseTime.Set(float64(stats["total_parse_time_ms"].(int64))) + c.avgParseTime.Set(float64(stats["avg_parse_time_ms"].(int64))) + c.totalInsertTime.Set(float64(stats["total_insert_time_ms"].(int64))) + c.avgInsertTime.Set(float64(stats["avg_insert_time_ms"].(int64))) + + // Set success rate + c.successRate.Set(stats["success_rate_percent"].(float64)) + + // Collect all metrics + c.totalReevaluations.Collect(ch) + c.successfulReevaluations.Collect(ch) + c.failedReevaluations.Collect(ch) + c.deferredReevaluations.Collect(ch) + c.jobsCreatedTotal.Collect(ch) + c.totalReevaluationTime.Collect(ch) + c.avgReevaluationTime.Collect(ch) + c.totalParseTime.Collect(ch) + c.avgParseTime.Collect(ch) + c.totalInsertTime.Collect(ch) + c.avgInsertTime.Collect(ch) + c.successRate.Collect(ch) +} diff --git a/services/actions/matrix_metrics_test.go b/services/actions/matrix_metrics_test.go new file mode 100644 index 0000000000000..db810004140df --- /dev/null +++ b/services/actions/matrix_metrics_test.go @@ -0,0 +1,88 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" +) + +// Essential Prometheus Collector Tests + +func TestNewMatrixMetricsCollector(t *testing.T) { + collector := NewMatrixMetricsCollector() + assert.NotNil(t, collector) + assert.NotNil(t, collector.totalReevaluations) + assert.NotNil(t, collector.successRate) +} + +func TestMatrixMetricsCollectorDescribe(t *testing.T) { + collector := NewMatrixMetricsCollector() + ch := make(chan *prometheus.Desc, 100) + collector.Describe(ch) + assert.NotEmpty(t, ch) +} + +func TestMatrixMetricsCollectorCollect(t *testing.T) { + matrixMetricsInstance = nil + metrics := GetMatrixMetrics() + metrics.RecordReevaluation(10*time.Millisecond, true, 5) + metrics.RecordParseTime(8 * time.Millisecond) + + collector := NewMatrixMetricsCollector() + ch := make(chan prometheus.Metric, 100) + collector.Collect(ch) + assert.NotEmpty(t, ch) + + matrixMetricsInstance = nil +} + +func TestMatrixMetricsGetStats(t *testing.T) { + metrics := &MatrixMetrics{ + ReevaluationTimes: make([]time.Duration, 0, 1000), + ParseTimes: make([]time.Duration, 0, 1000), + InsertTimes: make([]time.Duration, 0, 1000), + } + + metrics.RecordReevaluation(10*time.Millisecond, true, 3) + metrics.RecordReevaluation(15*time.Millisecond, true, 2) + metrics.RecordReevaluation(5*time.Millisecond, false, 0) + + stats := metrics.GetStats() + assert.Equal(t, int64(3), stats["total_reevaluations"]) + assert.Equal(t, int64(2), stats["successful_reevaluations"]) + assert.Equal(t, int64(1), stats["failed_reevaluations"]) + assert.Greater(t, stats["success_rate_percent"].(float64), 60.0) +} + +func BenchmarkMatrixMetricsCollectorCollect(b *testing.B) { + metrics := &MatrixMetrics{ + ReevaluationTimes: make([]time.Duration, 0, 1000), + ParseTimes: make([]time.Duration, 0, 1000), + InsertTimes: make([]time.Duration, 0, 1000), + } + matrixMetricsInstance = metrics + + for range 100 { + metrics.RecordReevaluation(10*time.Millisecond, true, 5) + metrics.RecordParseTime(5 * time.Millisecond) + } + + collector := NewMatrixMetricsCollector() + + b.ResetTimer() + for b.Loop() { + // Use a fresh channel each iteration to avoid filling up the buffer + ch := make(chan prometheus.Metric, 100) + collector.Collect(ch) + // Drain the channel to prevent blocking + close(ch) + for range ch { + // discard metrics + } + } +} diff --git a/services/actions/run.go b/services/actions/run.go index e9fcdcaf43d60..b0b8c47a9d073 100644 --- a/services/actions/run.go +++ b/services/actions/run.go @@ -10,6 +10,7 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/modules/actions/jobparser" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/util" notify_service "code.gitea.io/gitea/services/notify" @@ -52,7 +53,7 @@ func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model run.Title = jobs[0].RunName } - if err = InsertRun(ctx, run, jobs, vars, inputsWithDefaults); err != nil { + if err = InsertRun(ctx, run, content, jobs, vars, inputsWithDefaults); err != nil { return fmt.Errorf("InsertRun: %w", err) } @@ -74,7 +75,7 @@ func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model // InsertRun inserts a run // The title will be cut off at 255 characters if it's longer than 255 characters. -func InsertRun(ctx context.Context, run *actions_model.ActionRun, jobs []*jobparser.SingleWorkflow, vars map[string]string, inputs map[string]any) error { +func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte, jobs []*jobparser.SingleWorkflow, vars map[string]string, inputs map[string]any) error { return db.WithTx(ctx, func(ctx context.Context) error { index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID) if err != nil { @@ -101,6 +102,14 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, jobs []*jobpar return err } + // Extract raw strategies from the original workflow before parsing + rawStrategies, err := ExtractRawStrategies(content) + if err != nil { + log.Warn("Failed to extract raw strategies from workflow: %v", err) + // Continue without raw strategies - jobs will work but dynamic matrix won't be supported + rawStrategies = nil + } + runJobs := make([]*actions_model.ActionRunJob, 0, len(jobs)) var hasWaitingJobs bool @@ -133,6 +142,13 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, jobs []*jobpar runJob.TokenPermissions = perms } + // Store raw strategy only if job has matrix that actually depends on job outputs (needs.*.outputs) + // This avoids unnecessary DB storage and later re-evaluation checks for purely static matrices + if rawStrategy, exists := rawStrategies[id]; exists && len(needs) > 0 && HasMatrixWithNeeds(rawStrategy) { + runJob.RawStrategy = rawStrategy + runJob.IsMatrixEvaluated = false + } + // check job concurrency if job.RawConcurrency != nil { rawConcurrency, err := yaml.Marshal(job.RawConcurrency) diff --git a/services/issue/template.go b/services/issue/template.go index 99977c67cf88b..f1a78c6ae0a0b 100644 --- a/services/issue/template.go +++ b/services/issue/template.go @@ -16,7 +16,7 @@ import ( api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // templateDirCandidates issue templates directory diff --git a/services/migrations/dump.go b/services/migrations/dump.go index eb0367e9f94d7..18cab1715070f 100644 --- a/services/migrations/dump.go +++ b/services/migrations/dump.go @@ -26,7 +26,7 @@ import ( "code.gitea.io/gitea/modules/structs" "github.com/google/uuid" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) var _ base.Uploader = &RepositoryDumper{} diff --git a/services/migrations/restore.go b/services/migrations/restore.go index 5686285935065..bc4cc4b19d333 100644 --- a/services/migrations/restore.go +++ b/services/migrations/restore.go @@ -12,7 +12,7 @@ import ( base "code.gitea.io/gitea/modules/migration" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) // RepositoryRestorer implements an Downloader from the local directory diff --git a/tests/integration/actions_job_test.go b/tests/integration/actions_job_test.go index 3da290f1d3a6d..e71370a3ea60b 100644 --- a/tests/integration/actions_job_test.go +++ b/tests/integration/actions_job_test.go @@ -759,3 +759,97 @@ func getTaskJobNameByTaskID(t *testing.T, authToken, ownerName, repoName string, } return "" } + +func TestDynamicMatrixFromJobOutputs(t *testing.T) { + testCases := []struct { + treePath string + fileContent string + outcomes map[string]*mockTaskOutcome + }{ + { + treePath: ".gitea/workflows/dynamic-matrix.yml", + fileContent: `name: Dynamic Matrix from Job Outputs +on: + push: + paths: + - '.gitea/workflows/dynamic-matrix.yml' +jobs: + generate: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.gen_matrix.outputs.matrix }} + steps: + - name: Generate matrix + id: gen_matrix + run: | + echo "matrix=[1,2,3]" >> "$GITHUB_OUTPUT" + + build: + needs: [generate] + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.generate.outputs.matrix) }} + steps: + - run: echo "Building version ${{ matrix.version }}" +`, + outcomes: map[string]*mockTaskOutcome{ + "generate": { + result: runnerv1.Result_RESULT_SUCCESS, + outputs: map[string]string{ + "matrix": "[1,2,3]", + }, + }, + "build (1)": { + result: runnerv1.Result_RESULT_SUCCESS, + }, + "build (2)": { + result: runnerv1.Result_RESULT_SUCCESS, + }, + "build (3)": { + result: runnerv1.Result_RESULT_SUCCESS, + }, + }, + }, + } + onGiteaRun(t, func(t *testing.T, u *url.URL) { + user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) + session := loginUser(t, user2.Name) + token := getTokenForLoggedInUser(t, session, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser) + + apiRepo := createActionsTestRepo(t, token, "actions-dynamic-jobs-outputs-with-matrix", false) + runner := newMockRunner() + runner.registerAsRepoRunner(t, user2.Name, apiRepo.Name, "mock-runner", []string{"ubuntu-latest"}, false) + + for _, tc := range testCases { + t.Run("test "+tc.treePath, func(t *testing.T) { + opts := getWorkflowCreateFileOptions(user2, apiRepo.DefaultBranch, "create "+tc.treePath, tc.fileContent) + createWorkflowFile(t, token, user2.Name, apiRepo.Name, tc.treePath, opts) + + // Execute the generate job first (use longer timeout for CI environments) + task := runner.fetchTask(t, 10*time.Second) + jobName := getTaskJobNameByTaskID(t, token, user2.Name, apiRepo.Name, task.Id) + assert.Equal(t, "generate", jobName) + outcome := tc.outcomes[jobName] + assert.NotNil(t, outcome) + runner.execTask(t, task, outcome) + + // Now the build job should be created with matrix expansion from the output + // We expect 3 tasks for build (1), build (2), build (3) + buildTasks := make([]int64, 0) + for range 3 { + buildTask := runner.fetchTask(t, 10*time.Second) + buildJobName := getTaskJobNameByTaskID(t, token, user2.Name, apiRepo.Name, buildTask.Id) + t.Logf("Fetched task: %s", buildJobName) + assert.Contains(t, []string{"build (1)", "build (2)", "build (3)"}, buildJobName, "Expected a build job with matrix index") + outcome := tc.outcomes[buildJobName] + assert.NotNil(t, outcome) + runner.execTask(t, buildTask, outcome) + buildTasks = append(buildTasks, buildTask.Id) + } + + assert.Len(t, buildTasks, 3, "Expected 3 build tasks from dynamic matrix") + }) + } + }) +} diff --git a/tests/integration/api_issue_config_test.go b/tests/integration/api_issue_config_test.go index f6045e1a80518..e8d91e5c7102c 100644 --- a/tests/integration/api_issue_config_test.go +++ b/tests/integration/api_issue_config_test.go @@ -15,7 +15,7 @@ import ( "code.gitea.io/gitea/tests" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func createIssueConfig(t *testing.T, user *user_model.User, repo *repo_model.Repository, issueConfig map[string]any) { diff --git a/tests/integration/api_packages_helm_test.go b/tests/integration/api_packages_helm_test.go index 02df4ae906dd9..be999940bd3cf 100644 --- a/tests/integration/api_packages_helm_test.go +++ b/tests/integration/api_packages_helm_test.go @@ -20,7 +20,7 @@ import ( "code.gitea.io/gitea/tests" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func TestPackageHelm(t *testing.T) { diff --git a/tests/integration/dump_restore_test.go b/tests/integration/dump_restore_test.go index d2d43075c3c00..1758a81d11722 100644 --- a/tests/integration/dump_restore_test.go +++ b/tests/integration/dump_restore_test.go @@ -23,7 +23,7 @@ import ( "code.gitea.io/gitea/services/migrations" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v4" ) func TestDumpRestore(t *testing.T) {