We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f596d6d commit cf03c27Copy full SHA for cf03c27
openevolve/evaluator.py
@@ -192,7 +192,7 @@ async def evaluate_program(
192
193
# Combine metrics
194
llm_scores = []
195
- for name, value in llm_result.metrics.items():
+ for name, value in llm_eval_result.metrics.items():
196
weighted_value = value * self.config.llm_feedback_weight
197
eval_result.metrics[f"llm_{name}"] = weighted_value
198
llm_scores.append(value) # Use unweighted value for average
0 commit comments