@@ -53,12 +53,17 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
53
53
size_t elemsize = bottom_blob.elemsize ;
54
54
const int * start_ptr = starts;
55
55
const int * end_ptr = ends;
56
- const float * axes_ptr = axes;
56
+ const int * axes_ptr = axes;
57
57
const int * step_ptr = steps;
58
58
if (starts.w > dims || ends.w > dims) {
59
59
fprintf (stderr, " start/end attributes shape error!\n " );
60
60
return -100 ;
61
61
}
62
+ if (axes.w != 1 ) {
63
+ fprintf (stderr,
64
+ " axes.w must be 1 because any of multiaxes slice is regarded as "
65
+ " multi-staged onnx slice in pytorch2onnx." );
66
+ }
62
67
if (dims == 1 ) {
63
68
for (int i = 0 ; i < axes.w ; i++) {
64
69
int positive_axis = axes_ptr[i] < 0 ? dims + axes_ptr[i] : axes_ptr[i];
@@ -106,6 +111,8 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
106
111
int start = start_ptr[i];
107
112
int end = end_ptr[i];
108
113
int dim_shape = get_shape_by_axes (bottom_blob, positive_axis, dims);
114
+ int dim_shape_test =
115
+ get_shape_by_axes (bottom_blob, positive_axis, dims - 1 );
109
116
if (dim_shape < 0 ) {
110
117
return -1 ;
111
118
}
@@ -127,6 +134,7 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
127
134
return -100 ;
128
135
}
129
136
active_indice[positive_axis - 1 ] = temp_indice;
137
+ active_indice[positive_axis - 1 ].resize (temp_indice.size ());
130
138
}
131
139
top_blob.create ((int )active_indice[1 ].size (), (int )active_indice[0 ].size (),
132
140
elemsize, opt.blob_allocator );
@@ -138,6 +146,7 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
138
146
}
139
147
return 0 ;
140
148
}
149
+
141
150
if (dims == 3 ) {
142
151
std::vector<std::vector<int > > active_indice;
143
152
std::vector<int > indices;
@@ -177,7 +186,8 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
177
186
fprintf (stderr, " step should not be 0!\n " );
178
187
return -100 ;
179
188
}
180
- active_indice[positive_axis] = temp_indice;
189
+ active_indice[positive_axis - 1 ] = temp_indice;
190
+ active_indice[positive_axis - 1 ].resize (temp_indice.size ());
181
191
}
182
192
top_blob.create ((int )active_indice[2 ].size (), (int )active_indice[1 ].size (),
183
193
(int )active_indice[0 ].size (), elemsize, opt.blob_allocator );
@@ -192,6 +202,7 @@ int TensorSlice::forward(const Mat& bottom_blob, Mat& top_blob,
192
202
}
193
203
return 0 ;
194
204
}
205
+
195
206
return 0 ;
196
207
}
197
208
0 commit comments