77
88
99def _norm_rev (text ):
10- """revision 문자열에서 SHA 추출 + '@...' 제거"""
10+ # Normalize revision string to SHA
1111 if not text :
1212 return None
1313 text = text .split ("@" , 1 )[0 ]
1414 m = SHA_RE .search (text )
1515 return m .group (0 ) if m else text
1616
1717
18- # 공용: dict(k->v)을 "k=v-k=v-..." 형태의 정규화된 ID로 변환
18+ # Common: convert dict(k->v) to normalized ID in "k=v-k=v-..." format
1919def _normalize_kv_id (kv : dict , drop_keys = None ) -> str :
2020 drop = set (drop_keys or [])
2121 pairs = []
2222 for k , v in kv .items ():
23- if k in drop : # 메트릭/불필요 키 제거
23+ if k in drop :
2424 continue
2525 if v is None :
2626 continue
@@ -29,33 +29,33 @@ def _normalize_kv_id(kv: dict, drop_keys=None) -> str:
2929 if not k or not v :
3030 continue
3131 pairs .append ((k , v ))
32- pairs .sort (key = lambda x : x [0 ]) # 키 기준 정렬
32+ pairs .sort (key = lambda x : x [0 ])
3333 return "-" .join (f"{ k } ={ v } " for k , v in pairs )
3434
3535
36- # 1) W&B benchmark_id (예: "rigid_body-batch_size=...-env=...-...") -> 정규화 ID
37- def normalize_benchmark_id (bid : str ) -> str :
36+ # 1) W&B benchmark_id (e.g. "rigid_body-batch_size=...-env=...-...") -> normalized ID
37+ def wandb_normalize_benchmark_id (bid : str ) -> str :
3838 """
3939 'rigid_body-batch_size=30000-constraint_solver=Newton-env=batched_franka-...'
40- 형태에서 key=value 토큰만 뽑아 사전식 정렬 후 '-'로 이어 붙인 ID를 반환.
40+ from key=value tokens, sort alphabetically, and join with '-'
4141 """
4242 kv = {}
4343 for token in (p .strip () for p in bid .split ("-" ) if p .strip ()):
4444 if "=" not in token :
45- # 접두 토큰(예: 'rigid_body')은 버림
45+ # prefix token (e.g. 'rigid_body') is dropped
4646 continue
4747 k , v = token .split ("=" , 1 )
4848 kv [k .strip ()] = v .strip ()
49- # W&B쪽은 metrics가 포함되지 않지만, 규칙 통일을 위해 drop은 비워 둠
49+ # W&B side does not include metrics, but for consistency, drop is left empty
5050 return _normalize_kv_id (kv )
5151
5252
53- # 2) speed_test*.txt 라인들을 파싱해 {id: {runtime_fps, compile_time}} 맵으로 변환
54- def parse_speed_txt_lines (lines ):
53+ # 2) speed_test*.txt lines to {id: {runtime_fps, compile_time}} map
54+ def artifacts_parse_speed_txt_lines (lines ):
5555 """
56- speed_test.txt 내용을 받아
57- { normalized_id: {"runtime_fps": float|None, "compile_time": float|None} } 반환 .
58- normalized_id 규칙은 normalize_benchmark_id와 동일 .
56+ speed_test.txt content to
57+ { normalized_id: {"runtime_fps": float|None, "compile_time": float|None} } return .
58+ normalized_id rule is the same as wandb_normalize_benchmark_id .
5959 """
6060 METRIC_KEYS = {"compile_time" , "runtime_fps" , "realtime_factor" }
6161
@@ -64,17 +64,17 @@ def parse_speed_txt_lines(lines):
6464 if not line .strip ():
6565 continue
6666
67- # "env=... | batch_size=... | ..." 형태 파싱
67+ # "env=... | batch_size=... | ..." format parsing
6868 parts = [p .strip () for p in line .strip ().split ("|" ) if "=" in p ]
6969 kv = {}
7070 for p in parts :
7171 k , v = p .split ("=" , 1 )
7272 kv [k .strip ()] = v .strip ()
7373
74- # 정규화된 테스트 ID: metrics 키는 제외하고 W&B와 동일 규칙 적용
74+ # Normalized test ID: metrics key is excluded, and the same rule as W&B
7575 test_id = _normalize_kv_id (kv , drop_keys = METRIC_KEYS )
7676
77- # 수치 파싱
77+ # Numerical parsing
7878 rt = kv .get ("runtime_fps" )
7979 ct = kv .get ("compile_time" )
8080 try :
@@ -114,7 +114,7 @@ def parse_speed_txt_lines(lines):
114114
115115with open (current_txt_path , "r" , encoding = "utf-8" ) as f :
116116 current_txt_lines = f .readlines ()
117- current_benchmark = parse_speed_txt_lines (current_txt_lines )
117+ current_benchmark = artifacts_parse_speed_txt_lines (current_txt_lines )
118118
119119# ---------- Read event / find PR ----------
120120API = os .environ .get ("GITHUB_API_URL" , "https://api.github.com" )
@@ -136,7 +136,7 @@ def parse_speed_txt_lines(lines):
136136prs = wr .get ("pull_requests" ) or []
137137pr = prs [0 ] if prs else None
138138if not pr :
139- # 커밋에서 PR 역조회
139+ # Find PR from commit
140140 url = f"{ API } /repos/{ owner } /{ name } /commits/{ head_sha } /pulls"
141141 r = s .get (url , headers = {"Accept" : "application/vnd.github.groot-preview+json" })
142142 if r .ok and r .json ():
@@ -167,7 +167,7 @@ def parse_speed_txt_lines(lines):
167167runs_iter = api .runs (f"{ ENTITY } /{ PROJECT } " , order = "-created_at" )
168168
169169by_rev = {} # revision -> {benchmark_id: {runtime_fps, compile_time}}
170- rev_order = [] # 최신 -> 과거
170+ rev_order = [] # latest -> oldest
171171selected_revs = None
172172no_change_streak = 0
173173
@@ -220,7 +220,7 @@ def parse_speed_txt_lines(lines):
220220 if rowcnt >= 10 :
221221 break
222222
223- nbid = normalize_benchmark_id (bid )
223+ nbid = wandb_normalize_benchmark_id (bid )
224224
225225 by_rev [rev ][nbid ] = {
226226 "runtime_fps" : runtime_fps ,
@@ -294,6 +294,8 @@ def trunc(s, n=120):
294294lines = []
295295lines .append (":warning: **Benchmark regression detected (vs W&B history)**" )
296296lines .append (f"- Revisions considered: **{ len (rev_order )} **" )
297+ for i in range (len (rev_order )):
298+ lines .append (f"\t - Revision { i + 1 } : { rev_order [i ]} " )
297299lines .append (f"- Runtime tolerance: **-{ tol_rt :.1f} %**; Compile tolerance: **+{ tol_ct :.1f} %**" )
298300lines .append ("" )
299301
@@ -335,5 +337,15 @@ def trunc(s, n=120):
335337
336338print (body )
337339
338- resp = s .post (f"{ target_repo_api } /issues/{ pr_num } /comments" , json = {"body" : body })
339- print ("Comment status:" , resp .status_code , resp .text [:200 ])
340+ # ---------- Save to a local file; DO NOT POST here ----------
341+ comment_path = os .environ .get ("PR_COMMENT_PATH" , "pr_comment.md" )
342+
343+ # if runtime_regs or compile_regs:
344+ with open (comment_path , "w" , encoding = "utf-8" ) as f :
345+ f .write (body + "\n " )
346+ print (f"[INFO] Wrote PR comment to: { os .path .abspath (comment_path )} " )
347+ # else:
348+ # print("[INFO] No regressions; not writing pr_comment.md")
349+
350+ # resp = s.post(f"{target_repo_api}/issues/{pr_num}/comments", json={"body": body})
351+ # print("Comment status:", resp.status_code, resp.text[:200])
0 commit comments