Skip to content

Commit f0c6043

Browse files
committed
added comments
1 parent 8c999b7 commit f0c6043

File tree

5 files changed

+23
-12
lines changed

5 files changed

+23
-12
lines changed

buildenv/jenkins/benchmarkMetric.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ def main():
1414
benchmarkMetricsTemplate = json.loads(benchmarkMetricsTemplate_json)
1515

1616
tests = args.testNames.split(",")
17+
18+
#populate the template file with corresponding metrics extracted from console log
1719
for test in tests:
1820
for metric in benchmarkMetricsTemplate[test].values():
1921
regex_parser = re.search(metric.get("regex"), console)

buildenv/jenkins/initBenchmarkMetrics.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,18 @@
11
import argparse, pathlib, json
22

3+
#extract necessary benchmark information from metricConfig based on test, and
4+
#update benchmarkMetrics such that it is optimal for later processing
35
def initBenchmarkMetrics(metricConfig, test, benchmarkMetrics):
46
test_info = test.split("-")
5-
benchmarkMap = metricConfig[test_info[0]]
7+
benchmarkMap = metricConfig[test_info[0]] #index by general test category
68
metricMap = benchmarkMap["metrics"]
7-
if len(test_info) > 1:
9+
if len(test_info) > 1: #if there is a variant, grab it directly
810
variant = test_info[1]
911
if (metricMap.get(variant) != None):
1012
benchmarkMetrics.update({test : {variant : metricMap[variant]}})
1113
return
1214

15+
#if there is no variant, we take the metricMap to already contain the unique information needed for test
1316
benchmarkMetrics.update({test : metricMap})
1417

1518
def main():
@@ -27,15 +30,15 @@ def main():
2730
tests = args.testNames.split(",")
2831
for test in tests: initBenchmarkMetrics(metricConfig, test, benchmarkMetrics)
2932
benchmarkMetrics_json = json.dumps(benchmarkMetrics)
30-
pathlib.Path(f"{args.runBase}").write_text(benchmarkMetrics_json, encoding="utf-8")
33+
pathlib.Path(f"{args.runBase}").write_text(benchmarkMetrics_json, encoding="utf-8") #serves as template populated by a single run
3134

3235
for test in tests:
3336
for metric in benchmarkMetrics[test].values():
3437
metric.update({"test" : {"values" : []}})
3538
metric.update({"baseline" : {"values" : []}})
3639

3740
benchmarkMetrics_json = json.dumps(benchmarkMetrics)
38-
pathlib.Path(f"{args.aggrBase}").write_text(benchmarkMetrics_json, encoding="utf-8")
41+
pathlib.Path(f"{args.aggrBase}").write_text(benchmarkMetrics_json, encoding="utf-8") #serves as aggregate file populated by all runs
3942

4043
if __name__ == "__main__":
4144
main()

buildenv/jenkins/metricConfig2JSON.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import argparse, pathlib, json, re
22

3+
#regex used to convert BenchmarkMetric.js into valid JSON file
34
RE_COMMENT = re.compile(r"""
45
//.*?$ |
56
/\*.*?\*/
@@ -13,6 +14,8 @@
1314

1415
RE_KEYS = re.compile(r"""([,{]\s*)([A-Za-z_]\w*)(\s*:)""")
1516

17+
#parses the BenchmarkMetric.js file by grabbing the BenchmarkMetricRegex element,
18+
#removing comments, and converting to proper JSON syntax
1619
def js_to_json(metrics_js):
1720
benchmark_parser = re.search(r"const\s+BenchmarkMetricRegex\s*=\s*({[\s\S]*?});", metrics_js)
1821
if not benchmark_parser:

buildenv/jenkins/perfPipeline.groovy

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ if (params.SETUP_LABEL) {
1111
SETUP_LABEL = params.SETUP_LABEL
1212
} else {
1313
if (PROCESS_METRICS && EXIT_EARLY) {
14-
SETUP_LABEL = "test-rhibmcloud-rhel9-x64-1"
14+
SETUP_LABEL = "test-rhibmcloud-rhel9-x64-1" //machine needs python
1515
} else {
1616
SETUP_LABEL = "ci.role.test&&hw.arch.x86&&sw.os.linux"
1717
}
@@ -62,7 +62,7 @@ node (SETUP_LABEL) {
6262
}
6363
}
6464

65-
if (PROCESS_METRICS) {
65+
if (PROCESS_METRICS) { //convert BenchmarkMetric.js to a JSON file optimized for metric processing
6666
def owner = params.ADOPTOPENJDK_REPO.tokenize('/')[2]
6767
getPythonDependencies(owner, params.ADOPTOPENJDK_BRANCH)
6868
sh "curl -Os https://raw.githubusercontent.com/adoptium/aqa-test-tools/refs/heads/master/TestResultSummaryService/parsers/BenchmarkMetric.js"
@@ -71,7 +71,8 @@ node (SETUP_LABEL) {
7171
testList = params.TARGET.split("=")[1].tokenize(",")
7272
metrics = readJSON file: aggrBase
7373
}
74-
else {
74+
75+
if (!EXIT_EARLY) {
7576
testParams << string(name: "TARGET", value: params.TARGET)
7677
baselineParams << string(name: "TARGET", value: params.TARGET)
7778
}
@@ -82,8 +83,8 @@ node (SETUP_LABEL) {
8283
//clone to avoid mutation
8384
def thisTestParams = testParams.collect()
8485
def thisBaselineParams = baselineParams.collect()
85-
if (PROCESS_METRICS) {
86-
//set the target, testlist should change if some metrics regress while others do not
86+
if (EXIT_EARLY) {
87+
//update TARGET, testlist should hold metrics that were not exited early
8788
testNames = testList.join(",")
8889
def TARGET = params.TARGET.replaceFirst(/(?<=TESTLIST=)[^ ]+/, testNames)
8990
thisTestParams << string(name: "TARGET", value: TARGET)
@@ -108,15 +109,17 @@ node (SETUP_LABEL) {
108109
aggregateLogs(baseRun, testNames, testList, runBase, metrics, "baseline")
109110
writeJSON file: "metrics.json", json: metrics, pretty: 4
110111
archiveArtifacts artifacts: "metrics.json"
112+
113+
//if we are on the final iteration, or we have executed enough iterations to decide likelihood of regression and have permission to exit early
111114
if (i == PERF_ITERATIONS-1 || (EXIT_EARLY && i >= PERF_ITERATIONS * 0.8)) {
112115
if (i == PERF_ITERATIONS-1) {
113116
echo "All iterations completed"
114117
} else {
115118
echo "Attempting early exit"
116119
}
117120
echo "checking for regressions"
118-
checkRegressions(metrics, testList)
119-
if (testList.size() == 0) break
121+
checkRegressions(metrics, testList) //compute relevant performance stats, check for regression
122+
if (testList.size() == 0) break //if all tests have been exited early we can end testing
120123
}
121124
}
122125
}

buildenv/jenkins/perfPipeline_root.groovy

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ node("worker || (ci.role.test&&hw.arch.x86&&sw.os.linux)") {
5959
baseParams << string(name: "BENCHMARK", value: item.BENCHMARK)
6060
baseParams << string(name: "TARGET", value: item.TARGET)
6161
baseParams << string(name: "BUILD_LIST", value: item.BUILD_LIST)
62-
baseParams << string(name: "PERF_ITERATIONS", value: item.PERF_ITERATIONS ? item.PERF_ITERATIONS.toString() : "4")
62+
baseParams << string(name: "PERF_ITERATIONS", value: item.PERF_ITERATIONS ? item.PERF_ITERATIONS.toString() : "4") //by default, test 4 pairs of test,baseline runs
6363

6464
item.PLAT_MACHINE_MAP.each { kv ->
6565
kv.each {p, m ->

0 commit comments

Comments
 (0)