Skip to content

Commit 069ad68

Browse files
authored
[None][fix] AutoDeploy: skip mxfp4_moe test unless on Hopper (NVIDIA#10729)
Signed-off-by: Fridah-nv <201670829+Fridah-nv@users.noreply.github.com>
1 parent 0b748d5 commit 069ad68

1 file changed

Lines changed: 5 additions & 0 deletions

File tree

tests/unittest/_torch/auto_deploy/unit/multigpu/custom_ops/test_mxfp4_moe_ep.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import torch
55
import torch.distributed as dist
66
from _dist_test_utils import get_device_counts
7+
from utils.util import getSMVersion
78

89
from tensorrt_llm._torch.auto_deploy.custom_ops.fused_moe.mxfp4_moe import (
910
IS_TRITON_KERNELS_AVAILABLE,
@@ -109,6 +110,10 @@ def _rand_scales(shape):
109110
torch.testing.assert_close(part_out, ref_out, rtol=5e-2, atol=5e-2, equal_nan=True)
110111

111112

113+
@pytest.mark.skipif(
114+
getSMVersion() != 90,
115+
reason="triton_mxfp4_moe is only supported in Hopper architecture",
116+
)
112117
@pytest.mark.skipif(
113118
not IS_TRITON_KERNELS_AVAILABLE,
114119
reason="triton_kernels unavailable",

0 commit comments

Comments
 (0)