Skip to content

Commit e4a68b0

Browse files
fix tagged_lm_evaluate (#2979)
Fixes #2976
1 parent d8d9820 commit e4a68b0

File tree

1 file changed

+10
-2
lines changed

1 file changed

+10
-2
lines changed

lib/levanter/src/levanter/eval.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,14 +279,22 @@ def eval_callback(step: StepInfo):
279279
metrics_file = os.path.join(checkpoint_path, "eval_metrics.jsonl")
280280
fs, _, _ = fsspec.get_fs_token_paths(metrics_file)
281281
fs.makedirs(checkpoint_path, exist_ok=True)
282-
with fs.open(metrics_file, "a") as f:
282+
283+
if fs.exists(metrics_file):
284+
with fs.open(metrics_file, "r") as f:
285+
content = f.read()
286+
else:
287+
content = ""
288+
289+
with fs.open(metrics_file, "w") as f:
283290
# Convert numpy/jax floats to Python floats for JSON serialization
284291
serializable_metrics = {
285292
k: float(v) if isinstance(v, (np.floating, jnp.floating)) else v
286293
for k, v in metrics_to_write.items()
287294
}
288295
record = {"step": int(step_count), **serializable_metrics}
289-
f.write(json.dumps(record, sort_keys=True) + "\n")
296+
content += json.dumps(record, sort_keys=True) + "\n"
297+
f.write(content)
290298

291299
return
292300

0 commit comments

Comments
 (0)