Skip to content
Draft
2 changes: 1 addition & 1 deletion contrib/backport/backport.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (

"github.com/google/go-github/v84/github"
"github.com/urfave/cli/v3"
"gopkg.in/yaml.v3"
"go.yaml.in/yaml/v4"
)

const defaultVersion = "v1.18" // to backport to
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ require (
google.golang.org/grpc v1.80.0
google.golang.org/protobuf v1.36.11
gopkg.in/ini.v1 v1.67.1
gopkg.in/yaml.v3 v3.0.1
mvdan.cc/xurls/v2 v2.6.0
strk.kbt.io/projects/go/libravatar v0.0.0-20260301104140-add494e31dab
xorm.io/builder v0.3.13
Expand Down Expand Up @@ -282,6 +281,7 @@ require (
golang.org/x/tools v0.43.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401020348-3a24fdc17823 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

ignore (
Expand Down
15 changes: 15 additions & 0 deletions models/actions/run_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,13 @@ type ActionRunJob struct {
// It is JSON-encoded repo_model.ActionsTokenPermissions and may be empty if not specified.
TokenPermissions *repo_model.ActionsTokenPermissions `xorm:"JSON TEXT"`

RawStrategy string `xorm:"TEXT"` // raw strategy from job YAML's "strategy" section (stored before matrix expansion for deferred evaluation)

// IsMatrixEvaluated is only valid/needed when this job's RawStrategy is not empty and contains a matrix that depends on job outputs.
// If the matrix can't be evaluated yet (e.g. job hasn't completed), this field will be false.
// If the matrix has been successfully evaluated with job outputs, this field will be true.
IsMatrixEvaluated bool

Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Expand Down Expand Up @@ -289,3 +296,11 @@ func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob)

return CancelJobs(ctx, jobsToCancel)
}

// InsertActionRunJobs inserts multiple ActionRunJob records into the database
func InsertActionRunJobs(ctx context.Context, jobs []*ActionRunJob) error {
if len(jobs) == 0 {
return nil
}
return db.Insert(ctx, jobs)
}
1 change: 1 addition & 0 deletions models/migrations/migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@ func prepareMigrationTasks() []*migration {
newMigration(328, "Add TokenPermissions column to ActionRunJob", v1_26.AddTokenPermissionsToActionRunJob),
newMigration(329, "Add unique constraint for user badge", v1_26.AddUniqueIndexForUserBadge),
newMigration(330, "Add name column to webhook", v1_26.AddNameToWebhook),
newMigration(331, "Add support for matrix actions evaluation", v1_26.AddMatrixEvaluationColumnsToActionRunJob),
}
return preparedMigrations
}
Expand Down
310 changes: 12 additions & 298 deletions models/migrations/v1_26/v326.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,312 +4,26 @@
package v1_26

import (
"errors"
"fmt"
"net/url"
"strconv"
"strings"

"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
webhook_module "code.gitea.io/gitea/modules/webhook"

"xorm.io/xorm"
)

const (
actionsRunPath = "/actions/runs/"

// Only commit status target URLs whose resolved run ID is smaller than this threshold are rewritten by this partial migration.
// The fixed value 1000 is a conservative cutoff chosen to cover the smaller legacy run indexes that are most likely to be confused with ID-based URLs at runtime.
// Larger legacy {run} or {job} numbers are usually easier to disambiguate. For example:
// * /actions/runs/1200/jobs/1420 is most likely an ID-based URL, because a run should not contain more than 256 jobs.
// * /actions/runs/1500/jobs/3 is most likely an index-based URL, because a job ID cannot be smaller than its run ID.
// But URLs with small numbers, such as /actions/runs/5/jobs/6, are much harder to distinguish reliably.
// This migration therefore prioritizes rewriting target URLs for runs in that lower range.
legacyURLIDThreshold int64 = 1000
)

type migrationRepository struct {
ID int64
OwnerName string
Name string
}

type migrationActionRun struct {
ID int64
RepoID int64
Index int64
CommitSHA string `xorm:"commit_sha"`
Event webhook_module.HookEventType
TriggerEvent string
EventPayload string
}

type migrationActionRunJob struct {
ID int64
RunID int64
}

type migrationCommitStatus struct {
ID int64
RepoID int64
TargetURL string
}

type commitSHAAndRuns struct {
commitSHA string
runs map[int64]*migrationActionRun
}

// FixCommitStatusTargetURLToUseRunAndJobID partially migrates legacy Actions
// commit status target URLs to the new run/job ID-based form.
//
// Only rows whose resolved run ID is below legacyURLIDThreshold are rewritten.
// This is because smaller legacy run indexes are more likely to collide with run ID URLs during runtime resolution,
// so this migration prioritizes that lower range and leaves the remaining legacy target URLs to the web compatibility logic.
func FixCommitStatusTargetURLToUseRunAndJobID(x *xorm.Engine) error {
jobsByRunIDCache := make(map[int64][]int64)
repoLinkCache := make(map[int64]string)
groups, err := loadLegacyMigrationRunGroups(x)
if err != nil {
func AddMatrixEvaluationColumnsToActionRunJob(x *xorm.Engine) error {
if _, err := x.SyncWithOptions(xorm.SyncOptions{
IgnoreDropIndices: true,
}, new(ActionRunJobWithMatrixSupport)); err != nil {
return err
}

for repoID, groupsBySHA := range groups {
for _, group := range groupsBySHA {
if err := migrateCommitStatusTargetURLForGroup(x, "commit_status", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil {
return err
}
if err := migrateCommitStatusTargetURLForGroup(x, "commit_status_summary", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil {
return err
}
}
}
return nil
}

func loadLegacyMigrationRunGroups(x *xorm.Engine) (map[int64]map[string]*commitSHAAndRuns, error) {
var runs []migrationActionRun
if err := x.Table("action_run").
Where("id < ?", legacyURLIDThreshold).
Cols("id", "repo_id", "`index`", "commit_sha", "event", "trigger_event", "event_payload").
Find(&runs); err != nil {
return nil, fmt.Errorf("query action_run: %w", err)
}

groups := make(map[int64]map[string]*commitSHAAndRuns)
for i := range runs {
run := runs[i]
commitID, err := getCommitStatusCommitID(&run)
if err != nil {
log.Warn("skip action_run id=%d when resolving commit status commit SHA: %v", run.ID, err)
continue
}
if commitID == "" {
// empty commitID means the run didn't create any commit status records, just skip
continue
}
if groups[run.RepoID] == nil {
groups[run.RepoID] = make(map[string]*commitSHAAndRuns)
}
if groups[run.RepoID][commitID] == nil {
groups[run.RepoID][commitID] = &commitSHAAndRuns{
commitSHA: commitID,
runs: make(map[int64]*migrationActionRun),
}
}
groups[run.RepoID][commitID].runs[run.Index] = &run
}
return groups, nil
// ActionRunJobWithMatrixSupport is a temporary struct for migration purposes
// It only defines the new columns we need to add
type ActionRunJobWithMatrixSupport struct {
RawStrategy string `xorm:"TEXT"` // raw strategy from job YAML's "strategy" section
IsMatrixEvaluated bool // whether the matrix has been evaluated with job outputs
}

func migrateCommitStatusTargetURLForGroup(
x *xorm.Engine,
table string,
repoID int64,
sha string,
runs map[int64]*migrationActionRun,
jobsByRunIDCache map[int64][]int64,
repoLinkCache map[int64]string,
) error {
var rows []migrationCommitStatus
if err := x.Table(table).
Where("repo_id = ?", repoID).
And("sha = ?", sha).
Cols("id", "repo_id", "target_url").
Find(&rows); err != nil {
return fmt.Errorf("query %s for repo_id=%d sha=%s: %w", table, repoID, sha, err)
}

for _, row := range rows {
repoLink, err := getRepoLinkCached(x, repoLinkCache, row.RepoID)
if err != nil || repoLink == "" {
if err != nil {
log.Warn("convert %s id=%d getRepoLinkCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: repo=%d not found", table, row.ID, row.RepoID)
}
continue
}

runNum, jobNum, ok := parseTargetURL(row.TargetURL, repoLink)
if !ok {
continue
}

run, ok := runs[runNum]
if !ok {
continue
}

jobID, ok, err := getJobIDByIndexCached(x, jobsByRunIDCache, run.ID, jobNum)
if err != nil || !ok {
if err != nil {
log.Warn("convert %s id=%d getJobIDByIndexCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: job not found for run_id=%d job_index=%d", table, row.ID, run.ID, jobNum)
}
continue
}

oldURL := row.TargetURL
newURL := fmt.Sprintf("%s%s%d/jobs/%d", repoLink, actionsRunPath, run.ID, jobID)
if oldURL == newURL {
continue
}

if _, err := x.Table(table).ID(row.ID).Cols("target_url").Update(&migrationCommitStatus{TargetURL: newURL}); err != nil {
return fmt.Errorf("update %s id=%d target_url from %s to %s: %w", table, row.ID, oldURL, newURL, err)
}
}
return nil
}

func getRepoLinkCached(x *xorm.Engine, cache map[int64]string, repoID int64) (string, error) {
if link, ok := cache[repoID]; ok {
return link, nil
}
repo := &migrationRepository{}
has, err := x.Table("repository").Where("id=?", repoID).Get(repo)
if err != nil {
return "", err
}
if !has {
cache[repoID] = ""
return "", nil
}
link := setting.AppSubURL + "/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
cache[repoID] = link
return link, nil
}

func getJobIDByIndexCached(x *xorm.Engine, cache map[int64][]int64, runID, jobIndex int64) (int64, bool, error) {
jobIDs, ok := cache[runID]
if !ok {
var jobs []migrationActionRunJob
if err := x.Table("action_run_job").Where("run_id=?", runID).Asc("id").Cols("id").Find(&jobs); err != nil {
return 0, false, err
}
jobIDs = make([]int64, 0, len(jobs))
for _, job := range jobs {
jobIDs = append(jobIDs, job.ID)
}
cache[runID] = jobIDs
}
if jobIndex < 0 || jobIndex >= int64(len(jobIDs)) {
return 0, false, nil
}
return jobIDs[jobIndex], true, nil
}

func parseTargetURL(targetURL, repoLink string) (runNum, jobNum int64, ok bool) {
prefix := repoLink + actionsRunPath
if !strings.HasPrefix(targetURL, prefix) {
return 0, 0, false
}
rest := targetURL[len(prefix):]

parts := strings.Split(rest, "/")
if len(parts) == 3 && parts[1] == "jobs" {
runNum, err1 := strconv.ParseInt(parts[0], 10, 64)
jobNum, err2 := strconv.ParseInt(parts[2], 10, 64)
if err1 != nil || err2 != nil {
return 0, 0, false
}
return runNum, jobNum, true
}

return 0, 0, false
}

func getCommitStatusCommitID(run *migrationActionRun) (string, error) {
switch run.Event {
case webhook_module.HookEventPush:
payload, err := getPushEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPushEventPayload: %w", err)
}
if payload.HeadCommit == nil {
return "", errors.New("head commit is missing in event payload")
}
return payload.HeadCommit.ID, nil
case webhook_module.HookEventPullRequest,
webhook_module.HookEventPullRequestSync,
webhook_module.HookEventPullRequestAssign,
webhook_module.HookEventPullRequestLabel,
webhook_module.HookEventPullRequestReviewRequest,
webhook_module.HookEventPullRequestMilestone:
payload, err := getPullRequestEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPullRequestEventPayload: %w", err)
}
if payload.PullRequest == nil {
return "", errors.New("pull request is missing in event payload")
} else if payload.PullRequest.Head == nil {
return "", errors.New("head of pull request is missing in event payload")
}
return payload.PullRequest.Head.Sha, nil
case webhook_module.HookEventPullRequestReviewApproved,
webhook_module.HookEventPullRequestReviewRejected,
webhook_module.HookEventPullRequestReviewComment:
payload, err := getPullRequestEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPullRequestEventPayload: %w", err)
}
if payload.PullRequest == nil {
return "", errors.New("pull request is missing in event payload")
} else if payload.PullRequest.Head == nil {
return "", errors.New("head of pull request is missing in event payload")
}
return payload.PullRequest.Head.Sha, nil
case webhook_module.HookEventRelease:
return run.CommitSHA, nil
default:
return "", nil
}
}

func getPushEventPayload(run *migrationActionRun) (*api.PushPayload, error) {
if run.Event != webhook_module.HookEventPush {
return nil, fmt.Errorf("event %s is not a push event", run.Event)
}
var payload api.PushPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return &payload, nil
}

func getPullRequestEventPayload(run *migrationActionRun) (*api.PullRequestPayload, error) {
if !run.Event.IsPullRequest() && !run.Event.IsPullRequestReview() {
return nil, fmt.Errorf("event %s is not a pull request event", run.Event)
}
var payload api.PullRequestPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return &payload, nil
// TableName returns the table name for xorm to sync
func (ActionRunJobWithMatrixSupport) TableName() string {
return "action_run_job"
}
Loading
Loading