@@ -1880,6 +1880,15 @@ void xnn_pack_qu8_weights_and_biases(
1880
1880
packed_weights_ptr, extra_bytes, params);
1881
1881
}
1882
1882
1883
+ void transpose_weights (const float * in, float * out, size_t height,
1884
+ size_t width) {
1885
+ for (size_t i = 0 ; i < height; ++i) {
1886
+ for (size_t j = 0 ; j < width; ++j) {
1887
+ out[j * height + i] = in[i * width + j];
1888
+ }
1889
+ }
1890
+ }
1891
+
1883
1892
#if XNN_ENABLE_KLEIDIAI
1884
1893
size_t xnn_packed_stride_kai_qs4_weights_and_biases_sme (
1885
1894
const struct xnn_gemm_config * gemm_config, size_t k, size_t unused_k_stride,
@@ -2357,15 +2366,6 @@ void xnn_pack_kai_qb4_weights_and_biases(
2357
2366
}
2358
2367
#endif // XNN_ENABLE_KLEIDIAI
2359
2368
2360
- void transpose_weights (const float * in, float * out, size_t height,
2361
- size_t width) {
2362
- for (size_t i = 0 ; i < height; ++i) {
2363
- for (size_t j = 0 ; j < width; ++j) {
2364
- out[j * height + i] = in[i * width + j];
2365
- }
2366
- }
2367
- }
2368
-
2369
2369
void xnn_pack_f32_run_pack_rhs (size_t num_groups, size_t n, size_t k, size_t nr, size_t kr,
2370
2370
size_t sr, size_t rhs_stride, const void *rhs,
2371
2371
const void *bias, const void *scale, void *rhs_packed,
0 commit comments