forked from NVIDIA/TensorRT-LLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathqwen3.5_moe_400b.yaml
More file actions
67 lines (67 loc) · 2.25 KB
/
qwen3.5_moe_400b.yaml
File metadata and controls
67 lines (67 loc) · 2.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
runtime: trtllm
compile_backend: torch-cudagraph
attn_backend: trtllm
max_seq_len: 262144
max_num_tokens: 16000
max_batch_size: 256
cuda_graph_config:
batch_sizes: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 64, 128, 256]
world_size: 8
enable_chunked_prefill: true
# For text-only mode, use AutoModelForCausalLM until issue #12699 is resolved
# Once issue #12699 is resolved, consider to unify the factory to Qwen3_5MoeForConditionGeneration for both VLM and text mode
# model_factory: Qwen3_5MoeForConditionalGeneration
model_factory: AutoModelForCausalLM
kv_cache_config:
enable_block_reuse: false
free_gpu_memory_fraction: 0.8
tokens_per_block: 32
model_kwargs:
torch_dtype: bfloat16
transforms:
# disable for text only use case (AutoModelForCausalLM does not consume mrope_delta_cache)
initialize_mrope_delta_cache:
enabled: false
export_to_gm:
num_moe_experts_for_export: 2
fuse_gemms_mixed_children:
enabled: true
fuse_nvfp4_moe:
backend: trtllm_gen
detect_sharding:
# for long input, tp8ep1 gives better performance
# dist_mapping: {moe_tp: 8, moe_ep: 1}
allreduce_strategy: SYMM_MEM
shard_all_unprocessed: true
simple_shard_filter: "lm_head"
sharding_dims: ['tp','ep', 'bmm']
# use only manual config for TP sharding
sharding_source: ['manual']
manual_config:
tp_plan:
# GDN layer
"in_proj_qkv": "delta"
# attention layer
"q_proj": "colwise"
"k_proj": "colwise"
"v_proj": "colwise"
"o_proj": "rowwise"
# lm_head: "gather" = column split + all_gather (not "colwise" which
# requires a LayerSubgraph and crashes for standalone unprocessed nodes)
"lm_head": "gather"
# replicating shared experts (keep them commented out)
# "shared_expert_gate_proj": "colwise"
# "shared_expert_up_proj": "colwise"
# "shared_expert_down_proj": "rowwise"
# gating layer should be replicated as well
# "gate": "gather"
multi_stream_moe:
stage: compile
enabled: true
multi_stream_gemm:
stage: compile
enabled: true
gather_logits_before_lm_head:
enabled: true
compile_model:
piecewise_enabled: true