-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Expand file tree
/
Copy pathstt_duplex_train.py
More file actions
73 lines (61 loc) · 2.74 KB
/
stt_duplex_train.py
File metadata and controls
73 lines (61 loc) · 2.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import os
import torch
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import ModelCheckpoint
from omegaconf import OmegaConf, open_dict
from nemo.collections.speechlm2 import DataModule, DuplexSTTDataset, DuplexSTTModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
from nemo.utils.trainer_utils import resolve_trainer_cfg
# Set multiprocessing start method to 'spawn' for CUDA compatibility with DataLoader workers
# This prevents "Cannot re-initialize CUDA in forked subprocess" errors
try:
mp.set_start_method('spawn', force=True)
except RuntimeError:
pass # Start method already set
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
@hydra_runner(config_path="conf", config_name="s2s_duplex_stt")
def train(cfg):
OmegaConf.resolve(cfg)
torch.distributed.init_process_group(backend="nccl")
torch.set_float32_matmul_precision("medium")
torch.backends.cudnn.allow_tf32 = True
trainer = Trainer(**resolve_trainer_cfg(cfg.trainer))
log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
OmegaConf.save(cfg, log_dir / "exp_config.yaml")
# avoid using `=` in the checkpoint name
for callback in trainer.callbacks:
if isinstance(callback, ModelCheckpoint):
callback.CHECKPOINT_EQUALS_CHAR = "-"
with trainer.init_module():
model = DuplexSTTModel(OmegaConf.to_container(cfg.model, resolve=True))
common_kwargs = dict(
tokenizer=model.tokenizer,
frame_length=cfg.data.frame_length,
source_sample_rate=cfg.data.source_sample_rate,
input_roles=cfg.data.input_roles,
output_roles=cfg.data.output_roles,
aug_by_swap_role=cfg.data.get("aug_by_swap_role", False),
cfg=cfg.data,
model_cfg=cfg.model,
)
train_dataset = DuplexSTTDataset(**common_kwargs, is_training=True)
val_dataset = DuplexSTTDataset(**common_kwargs, is_training=False)
datamodule = DataModule(cfg.data, tokenizer=model.tokenizer, train_dataset=train_dataset, val_dataset=val_dataset)
trainer.fit(model, datamodule)
if __name__ == "__main__":
train()