forked from NVIDIA/Megatron-LM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmmlu.py
More file actions
238 lines (205 loc) · 8.61 KB
/
mmlu.py
File metadata and controls
238 lines (205 loc) · 8.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
"""Sample Generate GPT."""
import functools
import os
import sys
import warnings
import datasets
import logging
import torch.distributed as dist
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
import torch
from diskcache import Cache
from megatron.post_training.arguments import add_modelopt_args
from megatron.post_training.checkpointing import load_modelopt_checkpoint
from megatron.post_training.generate import simple_generate
from megatron.post_training.model_builder import modelopt_gpt_mamba_builder
from megatron.post_training.utils import report_current_memory_info
from megatron.training import get_args, get_model, get_tokenizer, initialize_megatron
from megatron.training.utils import print_rank_0, unwrap_model
import modelopt.torch.quantization as mtq
from model_provider import model_provider
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # set to debug if you need more logging
warnings.filterwarnings('ignore')
def add_mmlu_args(parser):
"""Add additional arguments for ModelOpt text generation PTQ."""
group = parser.add_argument_group(title='ModelOpt text generation ptq')
group.add_argument("--disable-tqdm", action="store_true", help="Disable tqdm.")
group.add_argument("--fraction", type=float, default=1.0, help="Fraction of dataset to use.")
group.add_argument("--lower-bound", type=float, default=None)
group.add_argument("--no-subject-prompt", action="store_true", help="Use empty prompt instead of subject-based prompt.")
group.add_argument("--mmlu-dataset", type=str, default="cais/mmlu", help="The default dataset to use is cais/mmlu from the HG hub.")
group.add_argument("--cache-dir", type=str, default=None)
add_modelopt_args(parser)
return parser
def get_all_subjects():
"""Return all MMLU subjects."""
return [
'abstract_algebra',
'anatomy',
'astronomy',
'business_ethics',
'clinical_knowledge',
'college_biology',
'college_chemistry',
'college_computer_science',
'college_mathematics',
'college_medicine',
'college_physics',
'computer_security',
'conceptual_physics',
'econometrics',
'electrical_engineering',
'elementary_mathematics',
'formal_logic',
'global_facts',
'high_school_biology',
'high_school_chemistry',
'high_school_computer_science',
'high_school_european_history',
'high_school_geography',
'high_school_government_and_politics',
'high_school_macroeconomics',
'high_school_mathematics',
'high_school_microeconomics',
'high_school_physics',
'high_school_psychology',
'high_school_statistics',
'high_school_us_history',
'high_school_world_history',
'human_aging',
'human_sexuality',
'international_law',
'jurisprudence',
'logical_fallacies',
'machine_learning',
'management',
'marketing',
'medical_genetics',
'miscellaneous',
'moral_disputes',
'moral_scenarios',
'nutrition',
'philosophy',
'prehistory',
'professional_accounting',
'professional_law',
'professional_medicine',
'professional_psychology',
'public_relations',
'security_studies',
'sociology',
'us_foreign_policy',
'virology',
'world_religions',
]
def format_example(example, include_answer: bool = True):
"""Format an example into a multi-choices problem."""
prompt = example["question"]
for choice, answer in zip(["A", "B", "C", "D"], example["choices"]):
prompt += "\n{}. {}".format(choice, answer)
if include_answer:
prompt += "\nAnswer: {}\n\n".format(["A", "B", "C", "D"][example["answer"]])
else:
prompt += "\nAnswer:"
return prompt
def generate_prompt(test_example, dev_examples, few_shots=0, no_subject_prompt=False):
"""Generating few-shot prompts."""
if no_subject_prompt:
prompt = ""
else:
prompt = "The following are multiple choice questions (with answers) about {}.\n\n".format(
" ".join(test_example["subject"].split("_"))
)
for i in range(few_shots):
prompt += format_example(dev_examples[i])
prompt += format_example(test_example, include_answer=False)
return prompt
if __name__ == "__main__":
initialize_megatron(
extra_args_provider=add_mmlu_args,
args_defaults={
'tokenizer_type': 'HuggingFaceTokenizer',
'no_load_rng': True,
'no_load_optim': True,
},
)
args = get_args()
cache = Cache(args.cache_dir)
# Meta device initialization for ParallelLinear only works if using cpu initialization.
# Meta device initialization is used such that models can be materialized in low-precision
# directly when ModelOpt real quant is used. Otherwise, the model is first initialized
# as BF16 in memory which may result in OOM and defeat the purpose of real quant.
if args.init_model_with_meta_device:
args.use_cpu_initialization = True
else:
warnings.warn(
"--init-model-with-meta-device is not set. If you would like to resume the "
"model in low-bit directly (low-memory initialization and skipping 16-bit), "
"--init-model-with-meta-device must be set.",
UserWarning,
)
model = get_model(functools.partial(model_provider, modelopt_gpt_mamba_builder), wrap_with_ddp=False)
report_current_memory_info()
# Materialize the model from meta device to gpu before loading the checkpoint.
unwrapped_model = unwrap_model(model)[0]
unwrapped_model.eval()
unwrapped_model.to_empty(device="cuda")
report_current_memory_info()
disable_tqdm = args.disable_tqdm or torch.distributed.get_rank() > 0
tokenizer = get_tokenizer()._tokenizer
if args.load is not None:
load_modelopt_checkpoint(model, strict=not args.untie_embeddings_and_output_weights)
print_rank_0("Done loading checkpoint")
# Fold the scalars into weight for speedup.
# [TODO]: fold_weight current assumes all weight_quantizer has weight allocated;
# however, this is not the case when share_embeddings_and_output_weights is False.
if not getattr(unwrapped_model, "share_embeddings_and_output_weights", False):
mtq.fold_weight(unwrapped_model)
all_subjects = get_all_subjects()
all_correct = {}
for subject in all_subjects:
test_data = datasets.load_dataset(args.mmlu_dataset, subject, split="test")
dev_data = datasets.load_dataset(args.mmlu_dataset, subject, split="dev")
correct = []
for idx, test_example in enumerate(test_data):
if idx > args.fraction * len(test_data):
break
label = ["A", "B", "C", "D"][test_example["answer"]]
prompt = generate_prompt(test_example, dev_data, few_shots=0, no_subject_prompt=args.no_subject_prompt)
cache_key = f"{args.load}_{subject}_{prompt}" # model name, subject, prompt
if cache_key in cache:
predict = cache[cache_key]
if dist.get_rank() == 0:
logger.debug(f"Cache hit for {args.load}_{subject}")
else:
tokens = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
generated_ids = simple_generate(
unwrapped_model, tokens.input_ids.cuda(), osl=2, disable_tqdm=disable_tqdm
)
predict = tokenizer.batch_decode(generated_ids)[0].strip()
if torch.distributed.get_rank() == 0:
cache.add(cache_key, predict)
correct += [True] if predict.startswith(label) else [False]
all_correct[subject] = correct
if torch.distributed.get_rank() == 0:
print(
"{:48}| {:.3f} | {:5}/{:5}".format(
subject, sum(correct) / len(correct), sum(correct), len(correct)
),
flush=True,
)
avg_correct = []
for subject, correct in all_correct.items():
avg_correct += correct
if torch.distributed.get_rank() == 0:
print(
"{:48}| {:.3f} | {:5}/{:5}".format(
"average", sum(avg_correct) / len(avg_correct), sum(avg_correct), len(avg_correct)
),
flush=True,
)
if args.lower_bound is not None:
assert sum(avg_correct) / len(avg_correct) > args.lower_bound