Skip to content

Commit 5ea1f4f

Browse files
[Bugfix] Fix attn_metadata is none
Signed-off-by: chenmenglong <[email protected]>
1 parent a5cb8e4 commit 5ea1f4f

File tree

6 files changed

+8
-10
lines changed

6 files changed

+8
-10
lines changed

vllm_ascend/spec_decode/eagle_proposer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def load_model(self, model: nn.Module) -> None:
117117
def dummy_run(self,
118118
num_tokens: int,
119119
with_prefill: bool = False,
120-
skip_attn: bool = False,
120+
in_graph_capturing: bool = False,
121121
num_reqs: int = 0,
122122
num_tokens_across_dp: Optional[torch.Tensor] = None,
123123
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,

vllm_ascend/spec_decode/interface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def load_model(self, model):
3232
def dummy_run(self,
3333
num_tokens: int,
3434
with_prefill: bool = False,
35-
skip_attn: bool = False,
35+
in_graph_capturing: bool = False,
3636
num_reqs: int = 0,
3737
num_tokens_across_dp: Optional[torch.Tensor] = None,
3838
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,

vllm_ascend/spec_decode/mtp_proposer.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def load_model(self, model) -> None:
227227
def dummy_run(self,
228228
num_tokens: int,
229229
with_prefill: bool = False,
230-
skip_attn: bool = False,
230+
in_graph_capturing: bool = False,
231231
num_reqs: int = 0,
232232
num_tokens_across_dp=None,
233233
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
@@ -251,9 +251,7 @@ def dummy_run(self,
251251
moe_comm_type = (MoECommType.ALLTOALL if moe_comm_type
252252
== MoECommType.FUSED_ALLTOALL else moe_comm_type)
253253

254-
if skip_attn:
255-
attn_metadata = None
256-
elif aclgraph_runtime_mode == CUDAGraphMode.FULL:
254+
if aclgraph_runtime_mode == CUDAGraphMode.FULL:
257255
if len(self.runner.attn_groups) > 0:
258256
num_computed_tokens_cpu = (
259257
self.runner.input_batch.
@@ -298,7 +296,7 @@ def dummy_run(self,
298296
positions = self.positions[:num_tokens]
299297
previous_hidden_states = self.hidden_states[:num_tokens]
300298
for i in range(self.num_speculative_tokens):
301-
if i > 0 and not skip_attn and aclgraph_runtime_mode == CUDAGraphMode.FULL:
299+
if i > 0 and not in_graph_capturing and aclgraph_runtime_mode == CUDAGraphMode.FULL:
302300
aclgraph_runtime_mode = CUDAGraphMode.NONE
303301
with set_ascend_forward_context(
304302
attn_metadata,

vllm_ascend/spec_decode/ngram_proposer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def load_model(self, *args, **kwargs):
2222
def dummy_run(self,
2323
num_tokens,
2424
with_prefill=None,
25-
skip_attn=None,
25+
in_graph_capturing=None,
2626
num_reqs=None,
2727
num_tokens_across_dp=None,
2828
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,

vllm_ascend/spec_decode/suffix_proposer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def load_model(self, *args, **kwargs):
2222
def dummy_run(self,
2323
num_tokens,
2424
with_prefill=None,
25-
skip_attn=None,
25+
in_graph_capturing=None,
2626
num_reqs=None,
2727
num_tokens_across_dp=None,
2828
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,

vllm_ascend/worker/model_runner_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2296,7 +2296,7 @@ def dummy_drafter_compute_logits(hidden_states):
22962296
aclgraph_runtime_mode=aclgraph_runtime_mode,
22972297
batch_descriptor=batch_descriptor,
22982298
dummy_compute_logits=dummy_drafter_compute_logits,
2299-
skip_attn=not force_attention)
2299+
in_graph_capturing=not force_attention)
23002300
if self.in_profile_run and self.dynamic_eplb:
23012301
self.model.clear_all_moe_loads()
23022302
if not self.in_profile_run and self.dynamic_eplb:

0 commit comments

Comments
 (0)