forked from alibaba/ROLL
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrlvr_lora_zero3.yaml
More file actions
266 lines (241 loc) · 6.33 KB
/
rlvr_lora_zero3.yaml
File metadata and controls
266 lines (241 loc) · 6.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
defaults:
- ../config/deepspeed_zero@_here_
- ../config/deepspeed_zero2@_here_
- ../config/deepspeed_zero3@_here_
- ../config/deepspeed_zero3_cpuoffload@_here_
hydra:
run:
dir: .
output_subdir: null
exp_name: "qwen2.5-7B-rlvr-config"
seed: 42
logging_dir: ./output/logs
output_dir: ./output
system_envs:
USE_MODELSCOPE: '1'
checkpoint_config:
type: file_system
output_dir: /data/cpfs_0/rl_examples/models/${exp_name}
#track_with: wandb
#tracker_kwargs:
# api_key:
# project: roll_examples
# notes: roll_examples
# tags:
# - rlvr
# - baseline
track_with: tensorboard
tracker_kwargs:
log_dir: /data/oss_bucket_0/rl_examples/llm/tensorboard/roll_exp/rlvr
num_gpus_per_node: 8
max_steps: 500
save_steps: 100
logging_steps: 1
eval_steps: 10
resume_from_checkpoint: false
rollout_batch_size: 64 # prompt
prompt_length: 2048
response_length: 4096
num_return_sequences_in_group: 8
ppo_epochs: 1
adv_estimator: "reinforce"
# clip
value_clip: 0.5
reward_clip: 10
advantage_clip: 2.0
dual_clip_loss: true
# normalize
norm_mean_type: ~
norm_std_type: ~
# data mask
max_len_mask: true
difficulty_mask: true
difficulty_low_threshold: 0.1
difficulty_high_threshold: 0.95
error_max_len_clip: false
# data weight
difficulty_loss_weight: false
length_loss_weight: false
# reward
add_token_level_kl: false
# advantage
whiten_advantages: true
# lora
lora_target: o_proj,q_proj,k_proj,v_proj
lora_rank: 32
lora_alpha: 32
# dynamic sampling scheduler
# use_additional_prompts: true
# max_running_requests: 256
# is_num_return_sequences_expand: false
pretrain: /data/cpfs_0/common/models/Qwen2.5-7B
reward_pretrain: /data/cpfs_0/common/models/Qwen2.5-7B
validation:
data_args:
template: qwen2_5
file_name:
- data/math_benchmarks.jsonl
generating_args:
max_new_tokens: ${response_length}
top_p: 0.6
top_k: 50
num_beams: 1
temperature: 0.6
num_return_sequences: 1
actor_train:
model_args:
attn_implementation: fa2
# Recomputed tensor size does not match for LoRA with Zero3 when activating checkpointing, See https://github.com/huggingface/transformers/issues/34928 for details
disable_gradient_checkpointing: true
dtype: bf16
lora_target: ${lora_target}
lora_rank: ${lora_rank}
lora_alpha: ${lora_alpha}
model_type: ~
training_args:
learning_rate: 1.0e-5
weight_decay: 0
per_device_train_batch_size: 1
gradient_accumulation_steps: 32
warmup_steps: 20
num_train_epochs: 50
data_args:
template: qwen2_5
file_name:
- data/code_KodCode_data.jsonl
- data/llm_judge_Multi-subject-RLVR_deal_new.jsonl
- data/math_deepmath_deal.jsonl
- data/general_ifeval_train_deal.jsonl
- data/general_CrossThink-QA_deal.jsonl
domain_interleave_probs:
math_rule: 0.4
code_sandbox: 0.3
llm_judge: 0.1
crossthinkqa: 0.1
ifeval: 0.1
dataset_dir: data
messages: messages
interleave_probs: "1.0"
preprocessing_num_workers: 16
strategy_args:
strategy_name: deepspeed_train
strategy_config: ${deepspeed_zero3}
device_mapping: list(range(0,16))
infer_batch_size: 4
actor_infer:
model_args:
attn_implementation: fa2
disable_gradient_checkpointing: true
dtype: bf16
lora_target: ${lora_target}
lora_rank: ${lora_rank}
lora_alpha: ${lora_alpha}
generating_args:
max_new_tokens: ${response_length}
top_p: 0.99
top_k: 100
num_beams: 1
temperature: 0.99
num_return_sequences: ${num_return_sequences_in_group}
data_args:
template: qwen2_5
strategy_args:
strategy_name: vllm
strategy_config:
gpu_memory_utilization: 0.6
# https://github.com/vllm-project/vllm/issues/9452
enforce_eager: false
block_size: 16
max_model_len: 8000
device_mapping: list(range(0,12))
infer_batch_size: 1
reference:
model_args:
attn_implementation: fa2
disable_gradient_checkpointing: true
dtype: bf16
model_type: ~
data_args:
template: qwen2_5
strategy_args:
strategy_name: hf_infer
strategy_config: ~
device_mapping: list(range(0,16))
infer_batch_size: 8
rewards:
crossthinkqa:
worker_cls: roll.pipeline.rlvr.rewards.crossthinkqa_rule_reward_worker.CrossThinkQARuleRewardWorker
reward_type: soft
response_length_penalty_coef: 0.0
model_args:
model_name_or_path: ${reward_pretrain}
data_args:
template: qwen2_5
tag_included: [crossthinkqa]
world_size: 8
infer_batch_size: 4
ifeval:
worker_cls: roll.pipeline.rlvr.rewards.ifeval_rule_reward_worker.GeneralRuleRewardWorker
reward_type: soft
model_args:
model_name_or_path: ${reward_pretrain}
data_args:
template: qwen2_5
tag_included: [ifeval]
world_size: 8
infer_batch_size: 4
math_rule:
worker_cls: roll.pipeline.rlvr.rewards.math_rule_reward_worker.MathRuleRewardWorker
model_args:
model_name_or_path: ${reward_pretrain}
data_args:
template: qwen2_5
tag_included: [deepmath_103k, aime]
world_size: 8
infer_batch_size: 1
# dynamic filter config
# query_filter_config:
# type: mean_filter
# filter_args:
# threshold_up: 0.9
# threshold_down: 0.1
code_sandbox:
use_local: true
worker_cls: roll.pipeline.rlvr.rewards.code_sandbox_reward_worker.CodeSandboxRewardWorker
tag_included: [KodCode]
model_args:
model_name_or_path: ${reward_pretrain}
data_args:
template: qwen2_5
world_size: 8
infer_batch_size: 1
# query_filter_config:
# type: std_filter
# filter_args:
# std_threshold: 0
llm_judge:
# NOTE: llm as judge 也需要gpu, 不能和actor infer共享gpu
worker_cls: roll.pipeline.rlvr.rewards.llm_judge_reward_worker.LLMJudgeRewardWorker
judge_prompt: Qwen2.5-7B-Instruct-RLVR-prompt
judge_model_type: inference
tag_included: [RLVR]
model_args:
model_name_or_path: /data/cpfs_0/common/models/Qwen2.5-7B-Instruct-RLVR/
attn_implementation: fa2
disable_gradient_checkpointing: true
dtype: bf16
model_type: trl
generating_args:
max_new_tokens: 100
top_p: 0.8
top_k: 50
num_beams: 1
temperature: 0.8
num_return_sequences: 1
data_args:
template: qwen2_5
strategy_args:
strategy_name: hf_infer
strategy_config: null
device_mapping: list(range(12,16))
infer_batch_size: 4