-
Notifications
You must be signed in to change notification settings - Fork 350
Expand file tree
/
Copy pathmain.py
More file actions
160 lines (132 loc) · 5.33 KB
/
main.py
File metadata and controls
160 lines (132 loc) · 5.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
import datasets
import torch
import torch.distributed
import transformers
from accelerate.logging import get_logger
from transformers import AutoTokenizer
from trl import SFTTrainer
import modelopt.torch.opt as mto
from modelopt.torch.distill.plugins.huggingface import KDTrainer, LMLogitsLoss
logger = get_logger(__name__, log_level="INFO")
@dataclass
class ModelArguments:
teacher_name_or_path: str | None = None
student_name_or_path: str | None = None
@dataclass
class TrainingArguments(transformers.TrainingArguments):
do_train: bool = True
do_eval: bool = True
save_strategy: str = "no"
max_length: int = 1024
optim: str = "adamw_torch"
learning_rate: float = 1e-5
lr_scheduler_type: str = "cosine"
dataloader_drop_last: bool = True
dataset_num_proc: int = 8
bf16: bool = True
tf32: bool = True
def _format_smoltalk_chat_template(sample, tokenizer):
# smol-smoltalk-Interaction-SFT dataset has "query" and "answer" fields
# Convert them to messages format and use tokenizer's apply_chat_template
messages = [
{"role": "user", "content": sample["query"]},
{"role": "assistant", "content": sample["answer"]},
]
return tokenizer.apply_chat_template(messages, tokenize=False)
class KDSFTTrainer(KDTrainer, SFTTrainer):
pass
def train():
parser = transformers.HfArgumentParser((ModelArguments, TrainingArguments))
model_args, training_args = parser.parse_args_into_dataclasses()
# Enable automatic save/load of modelopt state huggingface checkpointing
# modelopt state will be saved automatically to "modelopt_state.pth"
mto.enable_huggingface_checkpointing()
# Set total batch size across all ranks to equal 64
total_batch_size = 64
num_accum_steps = total_batch_size / (
training_args.per_device_train_batch_size * torch.distributed.get_world_size()
)
if not num_accum_steps.is_integer():
raise ValueError(
f"`per_device_train_batch_size` * `world_size` must be a factor of {total_batch_size}"
)
training_args.gradient_accumulation_steps = int(num_accum_steps)
logger.info(
f"Using {int(num_accum_steps)} grad accumulation steps for effective batchsize of {total_batch_size}."
)
# Dataset
logger.info("Loading dataset...")
dset = datasets.load_dataset("ReactiveAI/smol-smoltalk-Interaction-SFT", split="train")
dset_splits = dset.train_test_split(train_size=12800, test_size=1280, seed=420)
dset_train, dset_eval = dset_splits["train"], dset_splits["test"]
logger.info("Dataset loaded.")
# Tokenizer
logger.info("Loading tokenizer...")
model_path = model_args.teacher_name_or_path or model_args.student_name_or_path
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
logger.info("Tokenizer loaded.")
# Model(s)
logger.info("Loading student model...")
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.student_name_or_path, dtype=torch.bfloat16 if training_args.bf16 else None
)
logger.info("Student loaded.")
logger.info("Loading teacher model...")
teacher_model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.teacher_name_or_path, dtype=torch.bfloat16 if training_args.bf16 else None
)
# Distillation configuration
kd_config = {
"teacher_model": teacher_model,
"criterion": LMLogitsLoss(),
}
# Fix problematic settings that logger.info excessive warnings
model.generation_config.temperature = None
model.generation_config.top_p = None
# Trainer
trainer = KDSFTTrainer(
model,
training_args,
distill_config=kd_config,
train_dataset=dset_train,
eval_dataset=dset_eval,
formatting_func=lambda sample: _format_smoltalk_chat_template(sample, tokenizer),
processing_class=tokenizer,
)
# Do training
if training_args.do_train:
logger.info("Beginning training...")
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
logger.info("Training done.")
# Do evaluation
if training_args.do_eval:
logger.info("Evaluating...")
eval_results = trainer.evaluate()
logger.info(eval_results)
logger.info("Evaluation complete.")
# Save checkpoint
logger.info("Saving checkpoint...")
trainer.save_state()
trainer.save_model(trainer.args.output_dir)
logger.info("Checkpoint saved.")
if __name__ == "__main__":
train()