Skip to content

Commit

Permalink
Fix run_evaluation argument
Browse files Browse the repository at this point in the history
  • Loading branch information
pgmpablo157321 committed Dec 4, 2024
1 parent 7ccc9ab commit 421e14e
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 2 deletions.
1 change: 0 additions & 1 deletion language/llama3-405b/SUT_VLLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def __init__(
):

self.model_path = model_path or f"Meta-Llama-3.1-405B-Instruct{'-FP8' if dtype == 'float8' else ''}"
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"

if not batch_size:
batch_size = 1
Expand Down
2 changes: 1 addition & 1 deletion language/llama3-405b/evaluate-accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def main():

preds, targets = postprocess_text(preds_decoded_text, target_required)

result = run_evaluation(preds, targets, metrics)
result = run_evaluation(preds, targets, metrics_required)
result = dict(result)
prediction_lens = [len(pred) for pred in preds]
gen_num = len(preds)
Expand Down

0 comments on commit 421e14e

Please sign in to comment.