-
Notifications
You must be signed in to change notification settings - Fork 664
Expand file tree
/
Copy pathgather.mlir
More file actions
113 lines (103 loc) · 8.64 KB
/
gather.mlir
File metadata and controls
113 lines (103 loc) · 8.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
// RUN: torch-mlir-opt <%s -convert-torch-to-stablehlo -split-input-file -verify-diagnostics | FileCheck %s
// CHECK-LABEL: func.func @torch.aten.index_select$basic(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,4],f32>, %[[ARG1:.*]]: !torch.vtensor<[2],si64>) -> !torch.vtensor<[2,4],f32> {
// CHECK-DAG: %[[T0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,4],f32> -> tensor<?x4xf32>
// CHECK-DAG: %[[T1:.*]] = torch_c.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[2],si64> -> tensor<2xi64>
// CHECK: %[[INT0:.*]] = torch.constant.int 0
// CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[T2:.*]] = tensor.dim %[[T0]], %[[C1]] : tensor<?x4xf32>
// CHECK: %[[T3:.*]] = arith.index_cast %[[T2]] : index to i64
// CHECK: %[[T4:.*]] = tensor.from_elements %[[C1_I64]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T5:.*]] = "stablehlo.dynamic_gather"(%[[T0]], %[[T1]], %[[T4]]) <{dimension_numbers = #stablehlo.gather<offset_dims = [1], collapsed_slice_dims = [0], start_index_map = [0], index_vector_dim = 1>, indices_are_sorted = false}> : (tensor<?x4xf32>, tensor<2xi64>, tensor<2xi64>) -> tensor<2x4xf32>
// CHECK: %[[T6:.*]] = stablehlo.convert %[[T5]] : tensor<2x4xf32>
// CHECK: %[[T7:.*]] = torch_c.from_builtin_tensor %[[T6]] : tensor<2x4xf32> -> !torch.vtensor<[2,4],f32>
// CHECK: return %[[T7]] : !torch.vtensor<[2,4],f32>
func.func @torch.aten.index_select$basic(%arg0: !torch.vtensor<[?,4],f32>, %arg1: !torch.vtensor<[2],si64>) -> !torch.vtensor<[2,4],f32> {
%int0 = torch.constant.int 0
%0 = torch.aten.index_select %arg0, %int0, %arg1 : !torch.vtensor<[?,4],f32>, !torch.int, !torch.vtensor<[2],si64> -> !torch.vtensor<[2,4],f32>
return %0 : !torch.vtensor<[2,4],f32>
}
// CHECK-LABEL: func.func @torch.aten.embedding$basic(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],f32>, %[[ARG1:.*]]: !torch.vtensor<[?],si64>) -> !torch.vtensor<[?,?],f32> {
// CHECK-DAG: %[[T0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
// CHECK-DAG: %[[T1:.*]] = torch_c.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[?],si64> -> tensor<?xi64>
// CHECK: %[[FALSE:.*]] = torch.constant.bool false
// CHECK: %[[INT:.*]]-1 = torch.constant.int -1
// CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[T2:.*]] = tensor.dim %[[T0]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[T3:.*]] = arith.index_cast %[[T2]] : index to i64
// CHECK: %[[T4:.*]] = tensor.from_elements %[[C1_I64]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T5:.*]] = "stablehlo.dynamic_gather"(%[[T0]], %[[T1]], %[[T4]]) <{dimension_numbers = #stablehlo.gather<offset_dims = [1], collapsed_slice_dims = [0], start_index_map = [0], index_vector_dim = 1>, indices_are_sorted = false}> : (tensor<?x?xf32>, tensor<?xi64>, tensor<2xi64>) -> tensor<?x?xf32>
// CHECK: %[[T6:.*]] = stablehlo.convert %[[T5]] : tensor<?x?xf32>
// CHECK: %[[T7:.*]] = torch_c.from_builtin_tensor %[[T6]] : tensor<?x?xf32> -> !torch.vtensor<[?,?],f32>
// CHECK: return %[[T7]] : !torch.vtensor<[?,?],f32>
func.func @torch.aten.embedding$basic(%weight: !torch.vtensor<[?,?],f32>, %indices: !torch.vtensor<[?], si64>) -> !torch.vtensor<[?,?],f32> {
%false = torch.constant.bool false
%int-1 = torch.constant.int -1
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,?],f32>
return %ret: !torch.vtensor<[?,?],f32>
}
// CHECK-LABEL: func.func @torch.aten.embedding$rank_two_indices(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,?],f32>, %[[ARG1:.*]]: !torch.vtensor<[?,1],si64>) -> !torch.vtensor<[?,1,?],f32> {
// CHECK-DAG: %[[T0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,?],f32> -> tensor<?x?xf32>
// CHECK-DAG: %[[T1:.*]] = torch_c.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[?,1],si64> -> tensor<?x1xi64>
// CHECK: %[[FALSE:.*]] = torch.constant.bool false
// CHECK: %[[INT:.*]]-1 = torch.constant.int -1
// CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[T2:.*]] = tensor.dim %[[T0]], %[[C1]] : tensor<?x?xf32>
// CHECK: %[[T3:.*]] = arith.index_cast %[[T2]] : index to i64
// CHECK: %[[T4:.*]] = tensor.from_elements %[[C1_I64]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T5:.*]] = "stablehlo.dynamic_gather"(%[[T0]], %[[T1]], %[[T4]]) <{dimension_numbers = #stablehlo.gather<offset_dims = [2], collapsed_slice_dims = [0], start_index_map = [0], index_vector_dim = 2>, indices_are_sorted = false}> : (tensor<?x?xf32>, tensor<?x1xi64>, tensor<2xi64>) -> tensor<?x1x?xf32>
// CHECK: %[[T6:.*]] = stablehlo.convert %[[T5]] : tensor<?x1x?xf32>
// CHECK: %[[T7:.*]] = torch_c.from_builtin_tensor %[[T6]] : tensor<?x1x?xf32> -> !torch.vtensor<[?,1,?],f32>
// CHECK: return %[[T7]] : !torch.vtensor<[?,1,?],f32>
func.func @torch.aten.embedding$rank_two_indices(%weight: !torch.vtensor<[?,?],f32>, %indices: !torch.vtensor<[?,1], si64>) -> !torch.vtensor<[?,1,?],f32> {
%false = torch.constant.bool false
%int-1 = torch.constant.int -1
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32>
return %ret: !torch.vtensor<[?,1,?],f32>
}
// -----
// CHECK-LABEL: func.func @torch.aten.index_select$empty_indices(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[4,8],f32>, %[[ARG1:.*]]: !torch.vtensor<[0],si64>) -> !torch.vtensor<[0,8],f32> {
// CHECK: %[[INT0:.*]] = torch.constant.int 0
// CHECK: %[[CST:.*]] = stablehlo.constant dense<0.000000e+00> : tensor<0x8xf32>
// CHECK: %[[CONVERT:.*]] = stablehlo.convert %[[CST]] : tensor<0x8xf32>
// CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[CONVERT]] : tensor<0x8xf32> -> !torch.vtensor<[0,8],f32>
// CHECK: return %[[RESULT]] : !torch.vtensor<[0,8],f32>
func.func @torch.aten.index_select$empty_indices(%arg0: !torch.vtensor<[4,8],f32>, %arg1: !torch.vtensor<[0],si64>) -> !torch.vtensor<[0,8],f32> {
%int0 = torch.constant.int 0
%0 = torch.aten.index_select %arg0, %int0, %arg1 : !torch.vtensor<[4,8],f32>, !torch.int, !torch.vtensor<[0],si64> -> !torch.vtensor<[0,8],f32>
return %0 : !torch.vtensor<[0,8],f32>
}
// -----
// CHECK-LABEL: func.func @torch.aten.embedding$empty_indices(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[10,8],f32>, %[[ARG1:.*]]: !torch.vtensor<[0],si64>) -> !torch.vtensor<[0,8],f32> {
// CHECK: %[[FALSE:.*]] = torch.constant.bool false
// CHECK: %[[INT_NEG1:.*]] = torch.constant.int -1
// CHECK: %[[CST:.*]] = stablehlo.constant dense<0.000000e+00> : tensor<0x8xf32>
// CHECK: %[[CONVERT:.*]] = stablehlo.convert %[[CST]] : tensor<0x8xf32>
// CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[CONVERT]] : tensor<0x8xf32> -> !torch.vtensor<[0,8],f32>
// CHECK: return %[[RESULT]] : !torch.vtensor<[0,8],f32>
func.func @torch.aten.embedding$empty_indices(%weight: !torch.vtensor<[10,8],f32>, %indices: !torch.vtensor<[0], si64>) -> !torch.vtensor<[0,8],f32> {
%false = torch.constant.bool false
%int-1 = torch.constant.int -1
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[10,8],f32>, !torch.vtensor<[0], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[0,8],f32>
return %ret: !torch.vtensor<[0,8],f32>
}
// -----
// CHECK-LABEL: func.func @torch.aten.index_select$empty_indices_dim1(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[4,8],f32>, %[[ARG1:.*]]: !torch.vtensor<[0],si64>) -> !torch.vtensor<[4,0],f32> {
// CHECK: %[[INT1:.*]] = torch.constant.int 1
// CHECK: %[[CST:.*]] = stablehlo.constant dense<0.000000e+00> : tensor<4x0xf32>
// CHECK: %[[CONVERT:.*]] = stablehlo.convert %[[CST]] : tensor<4x0xf32>
// CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[CONVERT]] : tensor<4x0xf32> -> !torch.vtensor<[4,0],f32>
// CHECK: return %[[RESULT]] : !torch.vtensor<[4,0],f32>
func.func @torch.aten.index_select$empty_indices_dim1(%arg0: !torch.vtensor<[4,8],f32>, %arg1: !torch.vtensor<[0],si64>) -> !torch.vtensor<[4,0],f32> {
%int1 = torch.constant.int 1
%0 = torch.aten.index_select %arg0, %int1, %arg1 : !torch.vtensor<[4,8],f32>, !torch.int, !torch.vtensor<[0],si64> -> !torch.vtensor<[4,0],f32>
return %0 : !torch.vtensor<[4,0],f32>
}