File tree Expand file tree Collapse file tree 1 file changed +4
-4
lines changed
Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Original file line number Diff line number Diff line change 2323
2424try :
2525 from transformers .integrations .finegrained_fp8 import FP8Linear
26- except ImportError :
26+ except :
2727 FP8Linear = None
2828 logger .log ("Unsloth: FP8 models need importing FP8Linear from `transformers.integrations.finegrained_fp8` but we don't see it." )
2929
3030try :
3131 from transformers .integrations .fbgemm_fp8 import FbgemmFp8Linear
32- except ImportError :
32+ except :
3333 FbgemmFp8Linear = None
3434 logger .log ("Unsloth: FP8 models need importing FbgemmFP8Linear from `transformers.integrations.fbgemm_fp8` but we don't see it." )
3535
3636try :
3737 from fbgemm_gpu .experimental .gemm .triton_gemm .fp8_gemm import triton_quantize_fp8_block
38- except ImportError :
38+ except :
3939 triton_quantize_fp8_block = None
4040 logger .log ("Unsloth: Could not find fbgemm_gpu.experimental.gemm.triton_gemm.fp8_gemm.triton_quantize_fp8_block" )
4141
4242try :
4343 from torchao .prototype .blockwise_fp8_inference .blockwise_quantization import (
4444 blockwise_fp8_gemm as torchao_blockwise_gemm ,
4545 )
46- except ImportError :
46+ except :
4747 torchao_blockwise_gemm = None
4848 logger .log ("Unsloth: Could not find torchao.prototype.blockwise_fp8_inference.blockwise_quantization.blockwise_fp8_gemm" )
4949
You can’t perform that action at this time.
0 commit comments