Skip to content

Commit 76d9caa

Browse files
liyuying0000copybara-github
authored andcommitted
Compute runtime && iteration standard deviation.
PiperOrigin-RevId: 731888477 Change-Id: I04c4f2f6cf63097bed88f91ff86e84eb08e15147
1 parent ad6b189 commit 76d9caa

File tree

2 files changed

+24
-4
lines changed

2 files changed

+24
-4
lines changed

fleetbench/parallel/reporter.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,7 @@ def GenerateBenchmarkReport(
9292

9393
# Remove "fleetbench (" prefix and ")" suffix
9494
df["Benchmark"] = (
95-
df["Benchmark"]
96-
.astype(str)
97-
.str.replace(r"fleetbench \((.*)\)", r"\1", regex=True)
95+
df["Benchmark"].astype(str).str.replace(r".* \((.*)\)", r"\1", regex=True)
9896
)
9997

10098
grouped_results = (
@@ -104,21 +102,33 @@ def GenerateBenchmarkReport(
104102
Mean_Wall_Time=("WallTimes", "mean"),
105103
Mean_CPU_Time=("CPUTimes", "mean"),
106104
Mean_Iterations=("Iterations", "mean"),
105+
Wall_Time_std=("WallTimes", "std"),
106+
CPU_Time_std=("CPUTimes", "std"),
107+
Iterations_std=("Iterations", "std"),
107108
)
108109
.round(3)
109110
)
110111
grouped_results["Mean_Iterations"] = grouped_results[
111112
"Mean_Iterations"
112113
].astype(int)
113114

115+
# We only show the following columns in the console report.
116+
# More metrics can be founded in the dumped JSON file.
117+
selected_columns = [
118+
"Count",
119+
"Mean_Wall_Time",
120+
"Mean_CPU_Time",
121+
]
122+
114123
# Combine perf_counter_df and benchmark run results on the same
115124
# "benchmark" entry.
116125
if perf_counter_df is not None:
117126
grouped_results = pd.merge(
118127
grouped_results, perf_counter_df, on="Benchmark", how="left"
119128
)
129+
selected_columns.extend(perf_counter_df.columns.tolist())
120130

121-
print(grouped_results.to_string())
131+
print(grouped_results[selected_columns].to_string())
122132
return grouped_results
123133

124134

@@ -139,6 +149,9 @@ def SaveBenchmarkResults(output_dir, df: pd.DataFrame) -> None:
139149
"Mean_Wall_Time": "real_time",
140150
"Mean_CPU_Time": "cpu_time",
141151
"Mean_Iterations": "iterations",
152+
"Wall_Time_std": "real_time_std",
153+
"CPU_Time_std": "cpu_time_std",
154+
"Iterations_std": "iterations_std",
142155
}
143156
)
144157
data = df.reset_index().to_dict(orient="records")

fleetbench/parallel/reporter_test.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# limitations under the License.
1414

1515
import json
16+
import math
1617
import os
1718
import shutil
1819

@@ -150,6 +151,9 @@ def test_generate_benchmark_report(self):
150151
"Mean_Wall_Time": 4.5,
151152
"Mean_CPU_Time": 5.0,
152153
"Mean_Iterations": 30,
154+
"Wall_Time_std": 2.121,
155+
"CPU_Time_std": 2.828,
156+
"Iterations_std": 28.284,
153157
"instructions": 130.0,
154158
"cycles": 3.0,
155159
},
@@ -159,6 +163,9 @@ def test_generate_benchmark_report(self):
159163
"Mean_Wall_Time": 4,
160164
"Mean_CPU_Time": 5.0,
161165
"Mean_Iterations": 20,
166+
"Wall_Time_std": math.nan,
167+
"CPU_Time_std": math.nan,
168+
"Iterations_std": math.nan,
162169
"instructions": 200.0,
163170
"cycles": 2.0,
164171
},

0 commit comments

Comments
 (0)