-
Notifications
You must be signed in to change notification settings - Fork 349
Expand file tree
/
Copy pathgrpo-deepseek-v3-32n8g.yaml
More file actions
57 lines (57 loc) · 1.46 KB
/
grpo-deepseek-v3-32n8g.yaml
File metadata and controls
57 lines (57 loc) · 1.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
defaults: ../../../grpo_math_1B.yaml
grpo:
num_prompts_per_step: 32
num_generations_per_prompt: 16
max_num_steps: 500
val_batch_size: 5
max_val_samples: 16
loss_fn:
use_importance_sampling_correction: true
checkpointing:
checkpoint_dir: results/grpo-deepseek-v3-32n8g
policy:
model_name: "Follow instructions in https://github.com/NVIDIA-NeMo/RL/blob/main/docs/guides/deepseek.md"
train_micro_batch_size: 1
logprob_batch_size: 1
max_total_sequence_length: 1536
make_sequence_length_divisible_by: 1
dtensor_cfg:
enabled: false
megatron_cfg:
enabled: true
empty_unused_memory_level: 1
converter_type: LlamaForCausalLM
pipeline_model_parallel_size: 16
expert_model_parallel_size: 16
activation_checkpointing: true
moe_grouped_gemm: true
num_layers_in_first_pipeline_stage: 3
num_layers_in_last_pipeline_stage: 2
apply_rope_fusion: false
defer_fp32_logits: true
# MTP — disabled
mtp_num_layers: 0
optimizer:
lr: 5.0e-07
min_lr: 5.0e-08
weight_decay: 0.0
use_precision_aware_optimizer: true
scheduler:
lr_warmup_iters: 2
lr_warmup_init: 5.0e-08
fp8_cfg:
enabled: false
generation:
vllm_cfg:
tensor_parallel_size: 32
async_engine: true
logger:
log_dir: logs/grpo-deepseek-v3-32n8g
wandb_enabled: true
tensorboard_enabled: true
wandb:
project: nemo-rl
name: grpo-deepseek-v3-32n8g
cluster:
gpus_per_node: 8
num_nodes: 32