|
| 1 | +# Adapted from https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/configs/kimi_linear.py |
| 2 | +# (which itself is adapted from vllm's kimi_linear config). |
| 3 | +from transformers.configuration_utils import PretrainedConfig |
| 4 | + |
| 5 | + |
| 6 | +class KimiLinearConfig(PretrainedConfig): |
| 7 | + model_type = "kimi_linear" |
| 8 | + keys_to_ignore_at_inference = ["past_key_values"] |
| 9 | + |
| 10 | + def __init__( |
| 11 | + self, |
| 12 | + model_type="kimi_linear", |
| 13 | + vocab_size=163840, |
| 14 | + hidden_size=4096, |
| 15 | + head_dim=None, |
| 16 | + intermediate_size=11008, |
| 17 | + num_hidden_layers=32, |
| 18 | + num_attention_heads=32, |
| 19 | + num_key_value_heads=None, |
| 20 | + hidden_act="silu", |
| 21 | + initializer_range=0.02, |
| 22 | + rms_norm_eps=1e-6, |
| 23 | + use_cache=True, |
| 24 | + pad_token_id=0, |
| 25 | + bos_token_id=1, |
| 26 | + eos_token_id=2, |
| 27 | + rope_theta=10000.0, |
| 28 | + rope_scaling=None, |
| 29 | + tie_word_embeddings=False, |
| 30 | + moe_intermediate_size: int | None = None, |
| 31 | + moe_renormalize: bool = True, |
| 32 | + moe_router_activation_func: str = "sigmoid", |
| 33 | + num_experts: int | None = None, |
| 34 | + num_experts_per_token: int | None = None, |
| 35 | + num_shared_experts: int = 0, |
| 36 | + routed_scaling_factor: float = 1.0, |
| 37 | + first_k_dense_replace: int = 0, |
| 38 | + moe_layer_freq: int = 1, |
| 39 | + use_grouped_topk: bool = True, |
| 40 | + num_expert_group: int = 1, |
| 41 | + topk_group: int = 1, |
| 42 | + q_lora_rank: int | None = None, |
| 43 | + kv_lora_rank: int | None = None, |
| 44 | + qk_nope_head_dim: int | None = None, |
| 45 | + qk_rope_head_dim: int | None = None, |
| 46 | + v_head_dim: int | None = None, |
| 47 | + mla_use_nope: bool | None = False, |
| 48 | + num_nextn_predict_layers: int = 0, |
| 49 | + linear_attn_config: dict | None = None, |
| 50 | + **kwargs, |
| 51 | + ): |
| 52 | + self.model_type = model_type |
| 53 | + self.vocab_size = vocab_size |
| 54 | + self.hidden_size = hidden_size |
| 55 | + self.head_dim = head_dim if head_dim is not None else hidden_size // num_attention_heads |
| 56 | + self.intermediate_size = intermediate_size |
| 57 | + self.num_hidden_layers = num_hidden_layers |
| 58 | + self.num_attention_heads = num_attention_heads |
| 59 | + |
| 60 | + # for backward compatibility |
| 61 | + if num_key_value_heads is None: |
| 62 | + num_key_value_heads = num_attention_heads |
| 63 | + |
| 64 | + self.num_key_value_heads = num_key_value_heads |
| 65 | + self.hidden_act = hidden_act |
| 66 | + self.initializer_range = initializer_range |
| 67 | + self.rms_norm_eps = rms_norm_eps |
| 68 | + self.use_cache = use_cache |
| 69 | + self.rope_theta = rope_theta |
| 70 | + self.rope_scaling = rope_scaling |
| 71 | + |
| 72 | + self.q_lora_rank = q_lora_rank |
| 73 | + self.kv_lora_rank = kv_lora_rank |
| 74 | + self.qk_nope_head_dim = qk_nope_head_dim |
| 75 | + self.qk_rope_head_dim = qk_rope_head_dim |
| 76 | + self.v_head_dim = v_head_dim |
| 77 | + self.mla_use_nope = mla_use_nope |
| 78 | + # moe config |
| 79 | + self.n_routed_experts = self.num_experts = num_experts |
| 80 | + self.num_experts_per_token = num_experts_per_token |
| 81 | + self.moe_renormalize = moe_renormalize |
| 82 | + self.num_shared_experts = num_shared_experts |
| 83 | + self.routed_scaling_factor = routed_scaling_factor |
| 84 | + self.moe_router_activation_func = moe_router_activation_func |
| 85 | + assert self.moe_router_activation_func in ("softmax", "sigmoid") |
| 86 | + self.moe_intermediate_size = moe_intermediate_size |
| 87 | + self.first_k_dense_replace = first_k_dense_replace |
| 88 | + self.moe_layer_freq = moe_layer_freq |
| 89 | + self.use_grouped_topk = use_grouped_topk |
| 90 | + self.num_expert_group = num_expert_group |
| 91 | + self.topk_group = topk_group |
| 92 | + self.num_nextn_predict_layers = num_nextn_predict_layers |
| 93 | + |
| 94 | + if linear_attn_config is not None: |
| 95 | + assert linear_attn_config["kda_layers"] is not None |
| 96 | + assert linear_attn_config["full_attn_layers"] is not None |
| 97 | + self.linear_attn_config = linear_attn_config |
| 98 | + |
| 99 | + super().__init__( |
| 100 | + pad_token_id=pad_token_id, |
| 101 | + bos_token_id=bos_token_id, |
| 102 | + eos_token_id=eos_token_id, |
| 103 | + tie_word_embeddings=tie_word_embeddings, |
| 104 | + **kwargs, |
| 105 | + ) |
| 106 | + |
| 107 | + @property |
| 108 | + def is_mla(self): |
| 109 | + return ( |
| 110 | + self.q_lora_rank is not None |
| 111 | + or self.kv_lora_rank is not None |
| 112 | + or self.qk_nope_head_dim is not None |
| 113 | + or self.qk_rope_head_dim is not None |
| 114 | + or self.v_head_dim is not None |
| 115 | + or self.mla_use_nope is True |
| 116 | + ) |
| 117 | + |
| 118 | + @property |
| 119 | + def is_moe(self): |
| 120 | + return self.num_experts is not None |
| 121 | + |
| 122 | + @property |
| 123 | + def is_linear_attn(self) -> bool: |
| 124 | + return not ( |
| 125 | + self.linear_attn_config is None |
| 126 | + or ( |
| 127 | + isinstance(self.linear_attn_config, dict) |
| 128 | + and self.linear_attn_config["kda_layers"] is not None |
| 129 | + and len(self.linear_attn_config["kda_layers"]) == 0 |
| 130 | + ) |
| 131 | + ) |
| 132 | + |
| 133 | + def is_kda_layer(self, layer_idx: int): |
| 134 | + return ( |
| 135 | + self.linear_attn_config is not None |
| 136 | + and (layer_idx + 1) in self.linear_attn_config["kda_layers"] |
| 137 | + ) |
| 138 | + |
| 139 | + @property |
| 140 | + def linear_layer_ids(self): |
| 141 | + return [i for i in range(self.num_hidden_layers) if self.is_kda_layer(i)] |
| 142 | + |
| 143 | + @property |
| 144 | + def full_attention_layer_ids(self): |
| 145 | + return [i for i in range(self.num_hidden_layers) if not self.is_kda_layer(i)] |
0 commit comments