62
62
["Tensor<[1, 16, 1, 60]> self = ?" , "Tensor<[]> other = ?" ],
63
63
]
64
64
aten__log_softmax_default_blocklist = [["Tensor<[19, 256008]> self = ?" , "int dim = 1" , "bool half_to_float = False" ]]
65
- aten_full_default_blocklist = [
66
- [
67
- "List[int] size = [19, 19]" ,
68
- "number fill_value = -3.4028234663852886e+38" ,
69
- "Optional[Device] device = cpu" ,
70
- "Optional[bool] pin_memory = False" ,
71
- ],
72
- [
73
- "List[int] size = [7, 7]" ,
74
- "number fill_value = -3.3895313892515355e+38" ,
75
- "Optional[Device] device = cpu" ,
76
- "Optional[bool] pin_memory = False" ,
77
- ],
78
- [
79
- "List[int] size = [45, 45]" ,
80
- "number fill_value = -3.3895313892515355e+38" ,
81
- "Optional[Device] device = cpu" ,
82
- "Optional[bool] pin_memory = False" ,
83
- ],
84
- [
85
- "List[int] size = [59, 59]" ,
86
- "number fill_value = -3.3895313892515355e+38" ,
87
- "Optional[Device] device = cpu" ,
88
- "Optional[bool] pin_memory = False" ,
89
- ],
90
- [
91
- "List[int] size = [19, 19]" ,
92
- "number fill_value = -3.3895313892515355e+38" ,
93
- "Optional[Device] device = cpu" ,
94
- "Optional[bool] pin_memory = False" ,
95
- ],
96
- ]
97
- # TODO(#615): Dynamic shape is not supported yet
98
- aten_full_like_default_blocklist = [
99
- [
100
- "Tensor<[s0 + 1, s0 + 1]> self = ?" ,
101
- "number fill_value = 31" ,
102
- "Optional[bool] pin_memory = False" ,
103
- ],
104
- ]
105
65
aten__scaled_dot_product_flash_attention_default_blocklist = [
106
66
["Tensor<[1, 16, 197, 64]> query = ?" , "Tensor<[1, 16, 197, 64]> key = ?" , "Tensor<[1, 16, 197, 64]> value = ?" ],
107
67
["Tensor<[1, 12, 197, 64]> query = ?" , "Tensor<[1, 12, 197, 64]> key = ?" , "Tensor<[1, 12, 197, 64]> value = ?" ],
@@ -1402,8 +1362,6 @@ def guard_aten(blocklist, node):
1402
1362
torch .ops .aten .clamp .default : partial (guard_aten , aten_clamp_default_blocklist ),
1403
1363
torch .ops .aten .maximum .default : partial (guard_aten , aten_maximum_default_blocklist ),
1404
1364
torch .ops .aten ._log_softmax .default : partial (guard_aten , aten__log_softmax_default_blocklist ),
1405
- torch .ops .aten .full .default : partial (guard_aten , aten_full_default_blocklist ),
1406
- torch .ops .aten .full_like .default : partial (guard_aten , aten_full_like_default_blocklist ),
1407
1365
torch .ops .aten ._scaled_dot_product_flash_attention .default : partial (
1408
1366
guard_aten , aten__scaled_dot_product_flash_attention_default_blocklist
1409
1367
),
@@ -1430,8 +1388,6 @@ def guard_aten(blocklist, node):
1430
1388
"torch.ops.aten.clamp.default" ,
1431
1389
"torch.ops.aten.maximum.default" ,
1432
1390
"torch.ops.aten._log_softmax.default" ,
1433
- "torch.ops.aten.full.default" ,
1434
- "torch.ops.aten.full_like.default" ,
1435
1391
"torch.ops.aten.rsub.Scalar" ,
1436
1392
"torch.ops.aten._scaled_dot_product_flash_attention.default" ,
1437
1393
"torch.ops.aten.transpose.int" ,
0 commit comments