@@ -92,9 +92,7 @@ def GenerateBenchmarkReport(
9292
9393 # Remove "fleetbench (" prefix and ")" suffix
9494 df ["Benchmark" ] = (
95- df ["Benchmark" ]
96- .astype (str )
97- .str .replace (r"fleetbench \((.*)\)" , r"\1" , regex = True )
95+ df ["Benchmark" ].astype (str ).str .replace (r".* \((.*)\)" , r"\1" , regex = True )
9896 )
9997
10098 grouped_results = (
@@ -104,21 +102,33 @@ def GenerateBenchmarkReport(
104102 Mean_Wall_Time = ("WallTimes" , "mean" ),
105103 Mean_CPU_Time = ("CPUTimes" , "mean" ),
106104 Mean_Iterations = ("Iterations" , "mean" ),
105+ Wall_Time_std = ("WallTimes" , "std" ),
106+ CPU_Time_std = ("CPUTimes" , "std" ),
107+ Iterations_std = ("Iterations" , "std" ),
107108 )
108109 .round (3 )
109110 )
110111 grouped_results ["Mean_Iterations" ] = grouped_results [
111112 "Mean_Iterations"
112113 ].astype (int )
113114
115+ # We only show the following columns in the console report.
116+ # More metrics can be founded in the dumped JSON file.
117+ selected_columns = [
118+ "Count" ,
119+ "Mean_Wall_Time" ,
120+ "Mean_CPU_Time" ,
121+ ]
122+
114123 # Combine perf_counter_df and benchmark run results on the same
115124 # "benchmark" entry.
116125 if perf_counter_df is not None :
117126 grouped_results = pd .merge (
118127 grouped_results , perf_counter_df , on = "Benchmark" , how = "left"
119128 )
129+ selected_columns .extend (perf_counter_df .columns .tolist ())
120130
121- print (grouped_results .to_string ())
131+ print (grouped_results [ selected_columns ] .to_string ())
122132 return grouped_results
123133
124134
@@ -139,6 +149,9 @@ def SaveBenchmarkResults(output_dir, df: pd.DataFrame) -> None:
139149 "Mean_Wall_Time" : "real_time" ,
140150 "Mean_CPU_Time" : "cpu_time" ,
141151 "Mean_Iterations" : "iterations" ,
152+ "Wall_Time_std" : "real_time_std" ,
153+ "CPU_Time_std" : "cpu_time_std" ,
154+ "Iterations_std" : "iterations_std" ,
142155 }
143156 )
144157 data = df .reset_index ().to_dict (orient = "records" )
0 commit comments