Skip to content

Commit fa3cbf4

Browse files
committed
fix warmup in blla training
1 parent 44ca285 commit fa3cbf4

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

kraken/train/blla.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
533533
# linear warmup between 0 and the initial learning rate `lrate` in `warmup`
534534
# steps.
535535
if self.hparams.config.warmup and self.trainer.global_step < self.hparams.config.warmup:
536-
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams)
536+
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.config.warmup)
537537
for pg in optimizer.param_groups:
538538
pg["lr"] = lr_scale * self.hparams.config.lrate
539539

0 commit comments

Comments
 (0)