-
-
Notifications
You must be signed in to change notification settings - Fork 757
Expand file tree
/
Copy pathrun_benchmarks_over_history.yml
More file actions
170 lines (150 loc) · 5.61 KB
/
run_benchmarks_over_history.yml
File metadata and controls
170 lines (150 loc) · 5.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# GitHub actions workflow that runs the benchmark suite in benchmarks/
# from "commit_start" to "commit_end". It pushes the results to the
# pybamm-bench repo and updates the display website.
# This workflow is meant to be triggered manually, see
# https://docs.github.com/en/enterprise-server@3.0/actions/managing-workflow-runs/manually-running-a-workflow
name: Manual benchmarks
on:
workflow_dispatch:
inputs:
commit_start:
description: "Identifier of commit from which to start"
default: "v0.1.0"
type: string
pattern: '^[a-zA-Z0-9._-]+$'
commit_end:
description: "Identifier of commit at which to end"
default: "main"
type: string
pattern: '^[a-zA-Z0-9._-]+$'
ncommits:
description: "Number of commits to benchmark between commit_start and commit_end"
default: "100"
type: string
pattern: '^[0-9]+$'
permissions: {}
env:
PYBAMM_DISABLE_TELEMETRY: "true"
jobs:
benchmarks:
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up Python 3.14
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: 3.14
- name: Install nox and asv
run: pip install -U pip nox asv
- name: Fetch main branch
# Not required when workflow triggered
# on main, but useful when
# experimenting/developing on another branch.
if: github.ref != 'refs/heads/main'
run: |
git fetch origin main:main
- name: Validate commit_start
id: validate_start
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
script: |
const input = context.payload.inputs.commit_start;
if (!input || !/^[a-zA-Z0-9._-]+$/.test(input)) {
core.setFailed('Invalid commit_start format');
return;
}
core.setOutput('commit_start', input);
- name: Validate commit_end
id: validate_end
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
script: |
const input = context.payload.inputs.commit_end;
if (!input || !/^[a-zA-Z0-9._-]+$/.test(input)) {
core.setFailed('Invalid commit_end format');
return;
}
core.setOutput('commit_end', input);
- name: Validate ncommits
id: validate_ncommits
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
script: |
const input = context.payload.inputs.ncommits;
if (!input || !/^[0-9]+$/.test(input)) {
core.setFailed('Invalid ncommits format');
return;
}
const numValue = parseInt(input, 10);
if (numValue < 1 || numValue > 10000) {
core.setFailed('ncommits must be between 1 and 10000');
return;
}
if (numValue > 5000) {
core.warning('Processing a large number of commits. This may take a while....');
}
core.setOutput('ncommits', numValue.toString());
- name: Set environment variables
env:
COMMIT_START: ${{ steps.validate_start.outputs.commit_start }}
COMMIT_END: ${{ steps.validate_end.outputs.commit_end }}
NCOMMITS: ${{ steps.validate_ncommits.outputs.ncommits }}
run: |
echo "COMMIT_START=$COMMIT_START" >> $GITHUB_ENV
echo "COMMIT_END=$COMMIT_END" >> $GITHUB_ENV
echo "NCOMMITS=$NCOMMITS" >> $GITHUB_ENV
- name: Run benchmarks
run: |
asv machine --machine "GitHubRunner"
asv run -m "GitHubRunner" -s $NCOMMITS \
$COMMIT_START..$COMMIT_END
- name: Upload results as artifact
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: asv_over_history_results
path: results
if-no-files-found: error
publish-results:
if: github.repository == 'pybamm-team/PyBaMM'
name: Push and publish results
needs: benchmarks
runs-on: ubuntu-latest
environment: benchmarks
permissions:
contents: write
steps:
- name: Set up Python 3.14
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: 3.14
- name: Install asv
run: pip install asv
- name: Checkout pybamm-bench repo
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: pybamm-team/pybamm-bench
token: ${{ secrets.BENCH_PAT }}
persist-credentials: false
- name: Download results artifact(s)
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v4.6.2
with:
name: asv_over_history_results
path: results
- name: Copy new results and push to pybamm-bench repo
env:
PUSH_BENCH_EMAIL: ${{ secrets.PUSH_BENCH_EMAIL }}
PUSH_BENCH_NAME: ${{ secrets.PUSH_BENCH_NAME }}
run: |
git config --global user.email "$PUSH_BENCH_EMAIL"
git config --global user.name "$PUSH_BENCH_NAME"
git add results
git commit -am "Add new results"
git push
- name: Publish results
run: |
git fetch origin gh-pages:gh-pages
asv gh-pages