-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Expand file tree
/
Copy pathpacking.sh
More file actions
52 lines (52 loc) · 1.52 KB
/
packing.sh
File metadata and controls
52 lines (52 loc) · 1.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# 8 * 80GiB
PYTORCH_CUDA_ALLOC_CONF='expandable_segments:True' \
NPROC_PER_NODE=8 \
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
MAX_PIXELS=1003520 \
VIDEO_MAX_PIXELS=50176 \
FPS_MAX_FRAMES=12 \
SWIFT_USE_MCORE_GDN=1 \
megatron sft \
--model Qwen/Qwen3.5-35B-A3B \
--save_safetensors true \
--dataset 'AI-ModelScope/LongAlpaca-12k' \
--load_from_cache_file true \
--add_non_thinking_prefix true \
--split_dataset_ratio 0.01 \
--tuner_type full \
--tensor_model_parallel_size 4 \
--expert_model_parallel_size 8 \
--moe_permute_fusion true \
--moe_grouped_gemm true \
--moe_shared_expert_overlap true \
--moe_aux_loss_coeff 1e-6 \
--micro_batch_size 1 \
--global_batch_size 4 \
--recompute_granularity full \
--recompute_method uniform \
--recompute_num_layers 1 \
--num_train_epochs 1 \
--packing true \
--finetune true \
--freeze_llm false \
--freeze_vit true \
--freeze_aligner true \
--cross_entropy_loss_fusion true \
--lr 1e-5 \
--lr_warmup_fraction 0.05 \
--min_lr 1e-6 \
--output_dir megatron_output/Qwen3.5-35B-A3B \
--eval_steps 200 \
--save_steps 200 \
--max_length 32768 \
--dataloader_num_workers 8 \
--dataset_num_proc 8 \
--no_save_optim true \
--no_save_rng true \
--sequence_parallel true \
--moe_expert_capacity_factor 2 \
--mtp_num_layers 1 \
--optimizer_cpu_offload true \
--use_precision_aware_optimizer true \
--optimizer_offload_fraction 0.64 \
--attention_backend flash