Skip to content

Commit 47513ae

Browse files
committed
Update on "[nvfp4] Make per_tensor_scale optional for triton kernel path"
Summary: MSLK now supports optional global scale in its triton quantize kernel (MSLK#233, commit c01f06c). This change relaxes the corresponding constraint in torchao so the triton kernel path can be used without a per_tensor_scale (single-level block-wise scaling only). Changes: - Remove `assert per_tensor_scale is not None` from `to_nvfp4` triton branch - Update `mslk_quantize_nvfp4` and its custom op to accept `Optional[torch.Tensor]`, passing `None` through to MSLK (which treats it as global_scale=1.0) - Relax `_addmm_nvfp4_dispatch` to allow mixed per_tensor_scale states between operands (treat None as 1.0) instead of asserting both-or-neither Test Plan: Requires SM100+ GPU with MSLK nightly installed. ``` python -m pytest test/prototype/mx_formats/test_nvfp4_tensor.py::test_triton_nvfp4_quantize_equivalence -v python -m pytest test/prototype/mx_formats/test_nvfp4_tensor.py::test_nvfp4_matmul_optional_per_tensor_scale -v python -m pytest test/prototype/mx_formats/test_inference_workflow.py::test_inference_workflow_nvfp4 -k "test_inference_workflow_nvfp4" -v ``` Performance: with global scale: ``` python benchmarks/float8/float8_inference_roofline.py --recipe_name nvfp4 --enable_fusion_modeling True --skip_printing_detailed_metrics True Parameter Value ---------------------- ------------------------ GPU NVIDIA GB200 torch version 2.12.0.dev20260316+cu128 torchao version 0.17.0+git95281b63b recipe_name nvfp4 do_benchmarks True shape_gen_name pow2 enable_fusion_modeling True op_name linear MKN None None None DHW None None None kernel_size stride 1 padding 0 bf16_gemm_time_sympy Max(2.0e-6, 1.13960113960114e-15*K*M*N, 2.71739130434783e-13*K*M + 2.71739130434783e-13*K*N + 2.71739130434783e-13*M*N) bf16_ovhd_time_sympy Max(2.0e-6, 5.43478260869565e-13*K*M) fp8_gemm_time_sympy Max(2.0e-6, 2.84900284900285e-16*K*M*N, 6.79347826086956e-14*K*M + 6.79347826086956e-14*K*N + 2.71739130434783e-13*M*N + 6.79347826086956e-14*floor(K*M/16 + K*N/16)) fp8_ovhd_time_sympy Max(2.0e-6, 6.11413043478261e-13*K*M + 1.35869565217391e-13*M*floor(K/16)) fwd_M fwd_K fwd_N r_fp8_gemm_and_ovhd_spdp b_fp8_e2e_spdp 0 1024 1024 1024 1.00 0.45 1 2048 2048 2048 2.39 0.66 2 4096 4096 4096 2.92 1.29 3 8192 8192 8192 3.34 1.74 4 16384 16384 16384 3.63 2.84 ``` without global scale: ``` python benchmarks/float8/float8_inference_roofline.py --recipe_name nvfp4_no_global_scale --enable_fusion_modeling True --skip_printing_detailed_metrics True Parameter Value ---------------------- ------------------------ GPU NVIDIA GB200 torch version 2.12.0.dev20260316+cu128 torchao version 0.17.0+gitabb103d3b recipe_name nvfp4_no_global_scale do_benchmarks True shape_gen_name pow2 enable_fusion_modeling True op_name linear MKN None None None DHW None None None kernel_size stride 1 padding 0 bf16_gemm_time_sympy Max(2.0e-6, 1.13960113960114e-15*K*M*N, 2.71739130434783e-13*K*M + 2.71739130434783e-13*K*N + 2.71739130434783e-13*M*N) bf16_ovhd_time_sympy Max(2.0e-6, 5.43478260869565e-13*K*M) fp8_gemm_time_sympy Max(2.0e-6, 2.84900284900285e-16*K*M*N, 6.79347826086956e-14*K*M + 6.79347826086956e-14*K*N + 2.71739130434783e-13*M*N + 6.79347826086956e-14*floor(K*M/16 + K*N/16)) fp8_ovhd_time_sympy Max(2.0e-6, 3.39673913043478e-13*K*M + 1.35869565217391e-13*M*floor(K/16)) fwd_M fwd_K fwd_N r_fp8_gemm_and_ovhd_spdp b_fp8_e2e_spdp 0 1024 1024 1024 1.00 0.73 1 2048 2048 2048 2.71 1.09 2 4096 4096 4096 3.44 2.22 3 8192 8192 8192 3.68 2.82 4 16384 16384 16384 3.83 3.65 ``` [ghstack-poisoned]
2 parents e4b902c + 1ae9ae9 commit 47513ae

File tree

25 files changed

+746
-87
lines changed

25 files changed

+746
-87
lines changed

.github/workflows/regression_test.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,12 @@ jobs:
2525
include:
2626
- name: CUDA Nightly
2727
runs-on: linux.g5.12xlarge.nvidia.gpu
28-
torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cu126'
28+
torch-spec: '--pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu126'
2929
gpu-arch-type: "cuda"
3030
gpu-arch-version: "12.6"
3131
- name: CPU Nightly
3232
runs-on: linux.4xlarge
33-
torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cpu'
33+
torch-spec: '--pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cpu'
3434
gpu-arch-type: "cpu"
3535
gpu-arch-version: ""
3636

@@ -61,38 +61,38 @@ jobs:
6161
include:
6262
- name: CUDA 2.8
6363
runs-on: linux.g5.12xlarge.nvidia.gpu
64-
torch-spec: 'torch==2.8.0'
64+
torch-spec: 'torch==2.8.0 torchvision==0.23.0'
6565
gpu-arch-type: "cuda"
6666
gpu-arch-version: "12.6"
6767
dev-requirements-overrides: ""
6868
- name: CUDA 2.9
6969
runs-on: linux.g5.12xlarge.nvidia.gpu
70-
torch-spec: 'torch==2.9.1'
70+
torch-spec: 'torch==2.9.1 torchvision==0.24.1'
7171
gpu-arch-type: "cuda"
7272
gpu-arch-version: "12.6"
7373
dev-requirements-overrides: ""
7474
- name: CUDA 2.10
7575
runs-on: linux.g5.12xlarge.nvidia.gpu
76-
torch-spec: 'torch==2.10.0'
76+
torch-spec: 'torch==2.10.0 torchvision==0.25.0'
7777
gpu-arch-type: "cuda"
7878
gpu-arch-version: "12.6"
7979
dev-requirements-overrides: ""
8080

8181
- name: CPU 2.8
8282
runs-on: linux.4xlarge
83-
torch-spec: 'torch==2.8.0 --index-url https://download.pytorch.org/whl/cpu'
83+
torch-spec: 'torch==2.8.0 torchvision==0.23.0 --index-url https://download.pytorch.org/whl/cpu'
8484
gpu-arch-type: "cpu"
8585
gpu-arch-version: ""
8686
dev-requirements-overrides: ""
8787
- name: CPU 2.9
8888
runs-on: linux.4xlarge
89-
torch-spec: 'torch==2.9.1 --index-url https://download.pytorch.org/whl/cpu'
89+
torch-spec: 'torch==2.9.1 torchvision==0.24.1 --index-url https://download.pytorch.org/whl/cpu'
9090
gpu-arch-type: "cpu"
9191
gpu-arch-version: ""
9292
dev-requirements-overrides: ""
9393
- name: CPU 2.10
9494
runs-on: linux.4xlarge
95-
torch-spec: 'torch==2.10.0 --index-url https://download.pytorch.org/whl/cpu'
95+
torch-spec: 'torch==2.10.0 torchvision==0.25.0 --index-url https://download.pytorch.org/whl/cpu'
9696
gpu-arch-type: "cpu"
9797
gpu-arch-version: ""
9898
dev-requirements-overrides: ""

.github/workflows/xpu_test.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ on:
88
push:
99
tags:
1010
- ciflow/xpu/*
11+
workflow_dispatch:
12+
schedule:
13+
# Every Saturday at 4 PM UTC
14+
- cron: '0 16 * * 6'
1115

1216
permissions:
1317
id-token: write

CLAUDE.md

Lines changed: 50 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,51 @@
1-
# TorchAO Claude Instructions
1+
# TorchAO
22

3-
Fill me in
3+
PyTorch-native library for quantization, sparsity, and low-precision training.
4+
5+
## Config Classes
6+
7+
All configs inherit from `AOBaseConfig`. Defined in `torchao/quantization/quant_api.py`. Use `FqnToConfig` to apply different configs to different layers by module name.
8+
9+
## Stable vs Prototype
10+
11+
- **Stable** (`torchao/quantization/`, `torchao/float8/`, `torchao/sparsity/`, `torchao/optim/`): API stability guaranteed.
12+
- **Prototype** (`torchao/prototype/`): Experimental, API may change without notice.
13+
14+
See [docs/source/workflows/index.md](docs/source/workflows/index.md) for the full dtype x hardware status matrix.
15+
16+
## Architecture and Contributing
17+
18+
- [Quantization Overview](docs/source/contributing/quantization_overview.rst) - full stack walkthrough, tensor subclasses, quantization flows
19+
- [Contributor Guide](docs/source/contributing/contributor_guide.rst) - how to add tensors, kernels, configs
20+
- [Inference Workflows](docs/source/workflows/inference.md) - which config to use for which hardware
21+
- [PT2E Quantization](docs/source/pt2e_quantization/index.rst) - PyTorch 2 Export quantization for deployment backends (X86, XPU, ExecuTorch)
22+
23+
These render at https://docs.pytorch.org/ao/main/
24+
25+
## Deprecated APIs
26+
27+
Do not use or recommend these:
28+
- `AffineQuantizedTensor` (AQT) in `torchao/dtypes/` - old v1 system, being removed
29+
- `autoquant()` - deleted
30+
- Layout registration system (`PlainLayout`, `Float8Layout`, `TensorCoreTiledLayout`, etc.) - deleted
31+
- `TorchAODType` - deprecated
32+
- `change_linear_weights_to_int4_woqtensors` - deleted, use `quantize_(model, Int4WeightOnlyConfig())`
33+
34+
New tensor types should inherit from `TorchAOBaseTensor` in `torchao/utils.py`, not AQT.
35+
36+
## Development
37+
38+
```bash
39+
# Setup
40+
USE_CPP=0 pip install -e . --no-build-isolation # CPU-only
41+
USE_CUDA=1 pip install -e . --no-build-isolation # With CUDA
42+
43+
# Test (mirrors source structure)
44+
pytest test/quantization/test_quant_api.py
45+
pytest test/float8/
46+
pytest test/prototype/mx_formats/
47+
```
48+
49+
## Commit Messages
50+
51+
- Do not commit without explicit request from the user

README.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -309,14 +309,14 @@ TorchAO is integrated into some of the leading open-source libraries including:
309309

310310
If you find the torchao library useful, please cite it in your work as below.
311311

312-
<!-- TODO: update to cite CodeML paper after Jul 2025 -->
313312
```bibtex
314-
@software{torchao,
313+
@misc{or2025torchao,
315314
title={TorchAO: PyTorch-Native Training-to-Serving Model Optimization},
316-
author={torchao},
317-
url={https://github.com/pytorch/ao},
318-
license={BSD-3-Clause},
319-
month={oct},
320-
year={2024}
315+
author={Andrew Or and Apurva Jain and Daniel Vega-Myhre and Jesse Cai and Charles David Hernandez and Zhenrui Zheng and Driss Guessous and Vasiliy Kuznetsov and Christian Puhrsch and Mark Saroufim and Supriya Rao and Thien Tran and Aleksandar Samardžić},
316+
year={2025},
317+
eprint={2507.16099},
318+
archivePrefix={arXiv},
319+
primaryClass={cs.LG},
320+
url={https://arxiv.org/abs/2507.16099},
321321
}
322322
```

benchmarks/float8/float8_inference_roofline.py

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -112,12 +112,7 @@ def get_gemm_times(
112112

113113
bf16_time_s = get_gpu_kernel_gemm_time_s(torch.mm, x_bf16, w_bf16)
114114

115-
if recipe_name in (
116-
"mxfp4_cutlass",
117-
"nvfp4",
118-
"nvfp4_static",
119-
"nvfp4_no_global_scale",
120-
):
115+
if recipe_name in ("mxfp4_cutlass", "nvfp4", "nvfp4_static"):
121116
d1, d2, d3 = torch.float4_e2m1fn_x2, torch.float4_e2m1fn_x2, torch.bfloat16
122117
A = torch.randint(0, 255, (M, K // 2), device=device, dtype=torch.uint8).view(
123118
d1
@@ -156,7 +151,7 @@ def get_gemm_times(
156151
scale_b = torch.ones(N, K // 32, device=device, dtype=torch.float8_e8m0fnu)
157152
scale_a = to_blocked(scale_a)
158153
scale_b = to_blocked(scale_b)
159-
elif recipe_name in ("nvfp4", "nvfp4_static", "nvfp4_no_global_scale"):
154+
elif recipe_name in ("nvfp4", "nvfp4_static"):
160155
scale_a = torch.ones(M, K // 16, device=device, dtype=torch.float8_e4m3fn)
161156
scale_b = torch.ones(N, K // 16, device=device, dtype=torch.float8_e4m3fn)
162157
scale_a = to_blocked(scale_a)
@@ -182,7 +177,7 @@ def do_matmul(A, B):
182177
swizzle_b=SwizzleType.SWIZZLE_32_4_4,
183178
output_dtype=d3,
184179
)
185-
if recipe_name in ("nvfp4", "nvfp4_static", "nvfp4_no_global_scale"):
180+
if recipe_name in ("nvfp4", "nvfp4_static"):
186181
return torch._scaled_mm(
187182
A, B, scale_a, scale_b, out_dtype=d3, use_fast_accum=False
188183
)
@@ -802,10 +797,6 @@ def run(
802797
config = NVFP4DynamicActivationNVFP4WeightConfig(
803798
use_dynamic_per_tensor_scale=True,
804799
)
805-
elif recipe_name == "nvfp4_no_global_scale":
806-
config = NVFP4DynamicActivationNVFP4WeightConfig(
807-
use_dynamic_per_tensor_scale=False,
808-
)
809800
elif recipe_name == "nvfp4_static":
810801
config_calib = NVFP4DynamicActivationNVFP4WeightConfig(
811802
step="prepare",

docs/source/conf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,9 @@
243243
# so a file named "default.css" will overwrite the builtin "default.css".
244244
html_static_path = ["_static"]
245245

246+
# Files to copy to the docs root (served at docs.pytorch.org/ao/llms.txt)
247+
html_extra_path = ["llms.txt"]
248+
246249
# -- Options for HTMLHelp output ------------------------------------------
247250

248251
# Output file base name for HTML help builder.

docs/source/llms.txt

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# TorchAO
2+
3+
> PyTorch-native library for quantization, sparsity, and low-precision training. Provides the quantize_() API with Config classes for int4/int8/float8/MX weight and activation quantization, composable with torch.compile.
4+
5+
## Docs
6+
7+
- [Quick Start](https://docs.pytorch.org/ao/stable/quick_start.html)
8+
- [Workflows Matrix](https://docs.pytorch.org/ao/main/workflows.html): Status of every dtype x hardware combination
9+
- [API Reference](https://docs.pytorch.org/ao/stable/api_reference/index.html)
10+
- [Inference Quantization](https://docs.pytorch.org/ao/main/workflows/inference.html)
11+
- [Float8 Training](https://docs.pytorch.org/ao/main/workflows/training.html)
12+
- [QAT](https://docs.pytorch.org/ao/main/workflows/qat.html)
13+
- [Quantization Overview](https://docs.pytorch.org/ao/main/contributing/quantization_overview.html): Architecture and internals
14+
- [Contributor Guide](https://docs.pytorch.org/ao/main/contributing/contributor_guide.html): How to add tensors, kernels, configs
15+
- [PT2E Quantization](https://docs.pytorch.org/ao/main/pt2e_quantization/index.html): PyTorch 2 Export quantization for deployment backends (X86, XPU, ExecuTorch)
16+
17+
## Code
18+
19+
- [quantize_() and Config classes](https://github.com/pytorch/ao/blob/main/torchao/quantization/quant_api.py): Main entry point
20+
- [Tensor subclasses](https://github.com/pytorch/ao/tree/main/torchao/quantization/quantize_/workflows): Int4Tensor, Int8Tensor, Float8Tensor, etc.
21+
- [Granularity](https://github.com/pytorch/ao/blob/main/torchao/quantization/granularity.py): PerTensor, PerRow, PerGroup, PerBlock, PerToken
22+
- [Float8 training](https://github.com/pytorch/ao/tree/main/torchao/float8): Scaled float8 training recipes
23+
- [Sparsity](https://github.com/pytorch/ao/tree/main/torchao/sparsity): Semi-structured 2:4 sparsity
24+
- [Quantized optimizers](https://github.com/pytorch/ao/tree/main/torchao/optim): AdamW8bit, AdamW4bit, AdamWFp8
25+
- [QAT](https://github.com/pytorch/ao/tree/main/torchao/quantization/qat): Quantization-aware training
26+
- [MX formats](https://github.com/pytorch/ao/tree/main/torchao/prototype/mx_formats): MXFP8, MXFP4, NVFP4 (prototype)
27+
- [MoE training](https://github.com/pytorch/ao/tree/main/torchao/prototype/moe_training): MXFP8 MoE training (prototype)
28+
29+
## Deprecated APIs
30+
31+
Do not use or recommend these:
32+
- `AffineQuantizedTensor` (AQT) in `torchao/dtypes/` - old v1 system, being removed. New tensor types inherit from `TorchAOBaseTensor`
33+
- `autoquant()` - deleted
34+
- Layout registration system (`PlainLayout`, `Float8Layout`, `TensorCoreTiledLayout`, etc.) - deleted
35+
- `TorchAODType` - deprecated
36+
- `change_linear_weights_to_int4_woqtensors` - deleted, use `quantize_(model, Int4WeightOnlyConfig())`
37+
38+
## Optional
39+
40+
- [Tutorials](https://github.com/pytorch/ao/tree/main/tutorials)
41+
- [Benchmarks](https://github.com/pytorch/ao/tree/main/benchmarks)
42+
- [Contributing](https://github.com/pytorch/ao/blob/main/CONTRIBUTING.md)
43+
- [MSLK kernels](https://github.com/pytorch/MSLK): Optional accelerated kernels

test/dtypes/test_nf4.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -771,6 +771,9 @@ def world_size(self) -> int:
771771

772772
@skip_if_lt_x_gpu(2)
773773
@unittest.skipIf(not torch.accelerator.is_available(), "Need GPU available")
774+
@unittest.skip(
775+
"Skipped due to PyTorch autograd metadata issue with DTensor redistribute"
776+
)
774777
def test_comm(self):
775778
self.run_subtests(
776779
{"input_size": [512, 2048]},

test/kernel/test_autotuner.py

Lines changed: 84 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import torch
1414
from parameterized import parameterized
1515

16-
from torchao.utils import is_sm_at_least_90
16+
from torchao.utils import is_sm_at_least_90, torch_version_at_least
1717

1818
logging.basicConfig(level=logging.INFO)
1919

@@ -96,5 +96,88 @@ def test_int_scaled_mm(self, device, dtype):
9696
torch.testing.assert_allclose(out32_1, out32_2)
9797

9898

99+
class TestIntScaledMatmulCPUPaths(unittest.TestCase):
100+
"""
101+
Tests for the CPU-specific paths inside _int_scaled_matmul_cpu.
102+
Because the u8s8 VNNI branch is gated on runtime CPU feature detection,
103+
CI machines are unlikely to exercise it naturally. We monkeypatch the
104+
two helper functions so each branch can be tested on any machine.
105+
"""
106+
107+
def _make_inputs(self, m=64, k=32, n=16, dtype=torch.bfloat16):
108+
a = torch.randint(-128, 127, (m, k), dtype=torch.int8)
109+
b = torch.randint(-128, 127, (k, n), dtype=torch.int8)
110+
scales = torch.randn(m, 1, dtype=dtype)
111+
return a, b, scales
112+
113+
def _reference(self, a, b, scales):
114+
from torchao.kernel.intmm import safe_int_mm
115+
116+
return safe_int_mm(a, b).to(scales.dtype) * scales
117+
118+
@unittest.skipIf(not torch_version_at_least("2.12.0.dev"), "Need torch 2.12+")
119+
def test_vnni_path_via_monkeypatch(self):
120+
"""Force the u8s8 VNNI branch and verify against the reference result."""
121+
import torchao.kernel.intmm as intmm_mod
122+
123+
a, b, scales = self._make_inputs()
124+
expected = self._reference(a, b, scales)
125+
126+
orig_amx = intmm_mod._cpu_is_amx_tile_supported
127+
orig_vnni = intmm_mod._cpu_is_vnni_supported
128+
try:
129+
# Simulate: no AMX, but VNNI present → u8s8 compensation path
130+
intmm_mod._cpu_is_amx_tile_supported = lambda: False
131+
intmm_mod._cpu_is_vnni_supported = lambda: True
132+
result = intmm_mod._int_scaled_matmul_cpu(a, b, scales)
133+
finally:
134+
intmm_mod._cpu_is_amx_tile_supported = orig_amx
135+
intmm_mod._cpu_is_vnni_supported = orig_vnni
136+
137+
torch.testing.assert_close(result, expected)
138+
139+
@unittest.skipIf(not torch_version_at_least("2.12.0.dev"), "Need torch 2.12+")
140+
def test_amx_path_via_monkeypatch(self):
141+
"""Force the s8s8 AMX/fallback branch and verify against the reference result."""
142+
import torchao.kernel.intmm as intmm_mod
143+
144+
a, b, scales = self._make_inputs()
145+
expected = self._reference(a, b, scales)
146+
147+
orig_amx = intmm_mod._cpu_is_amx_tile_supported
148+
orig_vnni = intmm_mod._cpu_is_vnni_supported
149+
try:
150+
# Simulate: AMX present → s8s8 direct path (no compensation)
151+
intmm_mod._cpu_is_amx_tile_supported = lambda: True
152+
intmm_mod._cpu_is_vnni_supported = lambda: False
153+
result = intmm_mod._int_scaled_matmul_cpu(a, b, scales)
154+
finally:
155+
intmm_mod._cpu_is_amx_tile_supported = orig_amx
156+
intmm_mod._cpu_is_vnni_supported = orig_vnni
157+
158+
torch.testing.assert_close(result, expected)
159+
160+
@unittest.skipIf(not torch_version_at_least("2.12.0.dev"), "Need torch 2.12+")
161+
def test_no_simd_path_via_monkeypatch(self):
162+
"""Force the no-AMX/no-VNNI branch and verify against the reference result."""
163+
import torchao.kernel.intmm as intmm_mod
164+
165+
a, b, scales = self._make_inputs()
166+
expected = self._reference(a, b, scales)
167+
168+
orig_amx = intmm_mod._cpu_is_amx_tile_supported
169+
orig_vnni = intmm_mod._cpu_is_vnni_supported
170+
try:
171+
# Simulate: neither AMX nor VNNI → s8s8 reference path
172+
intmm_mod._cpu_is_amx_tile_supported = lambda: False
173+
intmm_mod._cpu_is_vnni_supported = lambda: False
174+
result = intmm_mod._int_scaled_matmul_cpu(a, b, scales)
175+
finally:
176+
intmm_mod._cpu_is_amx_tile_supported = orig_amx
177+
intmm_mod._cpu_is_vnni_supported = orig_vnni
178+
179+
torch.testing.assert_close(result, expected)
180+
181+
99182
if __name__ == "__main__":
100183
unittest.main()

0 commit comments

Comments
 (0)