Skip to content

Commit 925f89a

Browse files
committed
Fix(test): Skip MLA backend test on non-Hopper (sm_79) GPUs
1 parent 41c10e6 commit 925f89a

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

python/sglang/test/attention/test_flashattn_mla_backend.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import unittest
22

3+
import pytest
34
import torch
45

56
from sglang.srt.configs.model_config import AttentionArch
@@ -67,7 +68,10 @@ def __init__(self, batch_size, seq_len, device):
6768
)
6869

6970

70-
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
71+
@pytest.mark.skipif(
72+
not torch.cuda.is_available() or torch.cuda.get_device_capability()[0] < 9,
73+
reason="Test requires Hopper (sm_90) or newer GPU.",
74+
)
7175
class TestFlashAttentionMLABackend(CustomTestCase):
7276
def setUp(self):
7377
# Test parameters

0 commit comments

Comments
 (0)