|
| 1 | +// Copyright 2025 Google LLC |
| 2 | +// |
| 3 | +// This source code is licensed under the BSD-style license found in the |
| 4 | +// LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +#include "ynnpack/base/simd/hexagon_hvx.h" |
| 7 | + |
| 8 | +#include <hexagon_protos.h> |
| 9 | +#include <hexagon_types.h> |
| 10 | +#include <hvx_hexagon_protos.h> |
| 11 | + |
| 12 | +#include <cstddef> |
| 13 | +#include <cstdint> |
| 14 | +#include <type_traits> |
| 15 | + |
| 16 | +#include "ynnpack/base/base.h" |
| 17 | +#include "ynnpack/base/bfloat16.h" |
| 18 | +#include "ynnpack/base/half.h" |
| 19 | +#include "ynnpack/base/simd/vec.h" |
| 20 | +#include "ynnpack/kernels/reduce/generic.h" |
| 21 | +#include "ynnpack/kernels/reduce/min_max_accumulator.h" |
| 22 | +#include "ynnpack/kernels/reduce/sum_accumulator.h" |
| 23 | + |
| 24 | +namespace ynn { |
| 25 | + |
| 26 | +namespace simd { |
| 27 | + |
| 28 | +static s32x32 reduce_add( |
| 29 | + s32x32 a, u8x128 b, Identity /*map_fn*/, |
| 30 | + std::integral_constant<size_t, 4> /*horizontal_factor*/) { |
| 31 | + a.v = Q6_Vw_vrmpyacc_VwVubRb(a.v, b.v, 0x01010101); |
| 32 | + return a; |
| 33 | +} |
| 34 | + |
| 35 | +static s32x32 reduce_add( |
| 36 | + s32x32 a, u8x128 b, Square /*map_fn*/, |
| 37 | + std::integral_constant<size_t, 4> /*horizontal_factor*/) { |
| 38 | + a.v = Q6_Vuw_vrmpyacc_VuwVubVub(a.v, b.v, b.v); |
| 39 | + return a; |
| 40 | +} |
| 41 | + |
| 42 | +static s32x32 reduce_add( |
| 43 | + s32x32 a, s8x128 b, Identity /*map_fn*/, |
| 44 | + std::integral_constant<size_t, 4> /*horizontal_factor*/) { |
| 45 | + const auto ones = Q6_V_vsplat_R(0x01010101); |
| 46 | + a.v = Q6_Vw_vrmpyacc_VwVbVb(a.v, b.v, ones); |
| 47 | + return a; |
| 48 | +} |
| 49 | + |
| 50 | +static s32x32 reduce_add( |
| 51 | + s32x32 a, s8x128 b, Square /*map_fn*/, |
| 52 | + std::integral_constant<size_t, 4> /*horizontal_factor*/) { |
| 53 | + a.v = Q6_Vw_vrmpyacc_VwVbVb(a.v, b.v, b.v); |
| 54 | + return a; |
| 55 | +} |
| 56 | + |
| 57 | +} // namespace simd |
| 58 | + |
| 59 | +using simd::bf16x64; |
| 60 | +using simd::f16x64; |
| 61 | +using simd::f32x32; |
| 62 | +using simd::s16x64; |
| 63 | +using simd::s32x32; |
| 64 | +using simd::s8x128; |
| 65 | +using simd::u8x128; |
| 66 | +using s32x128 = simd::vec<int32_t, 128>; |
| 67 | +using f32x128 = simd::vec<float, 128>; |
| 68 | + |
| 69 | +using bf16x64_rvar = float16_wrapper<bf16x64, s16x64>; |
| 70 | + |
| 71 | +MIN_MAX_KERNEL(min_max_fp32_4x32_hvx, f32x32, f32x32, float, 32); |
| 72 | +MIN_MAX_KERNEL(min_max_fp16_4x64_hvx, f16x64, f16x64, half, 64); |
| 73 | +MIN_MAX_KERNEL(min_max_bf16_4x64_hvx, bf16x64_rvar, bf16x64_rvar, bfloat16, 64); |
| 74 | +MIN_MAX_KERNEL(min_max_uint8_4x128_hvx, u8x128, u8x128, uint8_t, 128); |
| 75 | +MIN_MAX_KERNEL(min_max_int8_4x128_hvx, s8x128, s8x128, int8_t, 128); |
| 76 | + |
| 77 | +MIN_MAX_KERNEL(min_fp32_4x32_hvx, f32x32, dummy_t, float, 32); |
| 78 | +MIN_MAX_KERNEL(min_fp16_4x64_hvx, f16x64, dummy_t, half, 64); |
| 79 | +MIN_MAX_KERNEL(min_bf16_4x64_hvx, bf16x64_rvar, dummy_t, bfloat16, 64); |
| 80 | +MIN_MAX_KERNEL(min_uint8_4x128_hvx, u8x128, dummy_t, uint8_t, 128); |
| 81 | +MIN_MAX_KERNEL(min_int8_4x128_hvx, s8x128, dummy_t, int8_t, 128); |
| 82 | + |
| 83 | +MIN_MAX_KERNEL(max_fp32_4x32_hvx, dummy_t, f32x32, float, 32); |
| 84 | +MIN_MAX_KERNEL(max_fp16_4x64_hvx, dummy_t, f16x64, half, 64); |
| 85 | +MIN_MAX_KERNEL(max_bf16_4x64_hvx, dummy_t, bf16x64_rvar, bfloat16, 64); |
| 86 | +MIN_MAX_KERNEL(max_uint8_4x128_hvx, dummy_t, u8x128, uint8_t, 128); |
| 87 | +MIN_MAX_KERNEL(max_int8_4x128_hvx, dummy_t, s8x128, int8_t, 128); |
| 88 | + |
| 89 | +void sum_uint8_int32_hvx(size_t n, size_t k3, size_t k2, size_t k1, |
| 90 | + size_t a_stride_n, size_t a_stride_k3, |
| 91 | + size_t a_stride_k2, const void* a, size_t, void* c) { |
| 92 | + if (k1 == 1 && a_stride_n == sizeof(uint8_t)) { |
| 93 | + // TODO(b/482435301): This case is poorly optimized. It naively converts to |
| 94 | + // int32 and does a 32-bit add. We should be using a widening op, and |
| 95 | + // storing the accumulators interleaved until `sum_rows`. |
| 96 | + stream_reduce<sum_accumulator_k1_1<s32x128>, uint8_t, int32_t>( |
| 97 | + n, k3, k2, a_stride_k3, a_stride_k2, |
| 98 | + reinterpret_cast<const uint8_t*>(a), |
| 99 | + /*C_stride_m=*/0, reinterpret_cast<int32_t*>(c)); |
| 100 | + } else { |
| 101 | + tiled_reduce<sum_accumulator_x32<s32x32, 128, Identity>, uint8_t, int32_t>( |
| 102 | + n, k3, k2, k1, a_stride_n, a_stride_k3, a_stride_k2, |
| 103 | + reinterpret_cast<const uint8_t*>(a), /*C_stride_m=*/0, |
| 104 | + reinterpret_cast<int32_t*>(c)); |
| 105 | + } |
| 106 | +} |
| 107 | + |
| 108 | +void sum_squared_uint8_int32_hvx(size_t n, size_t k3, size_t k2, size_t k1, |
| 109 | + size_t a_stride_n, size_t a_stride_k3, |
| 110 | + size_t a_stride_k2, const void* a, size_t, |
| 111 | + void* c) { |
| 112 | + if (k1 == 1 && a_stride_n == sizeof(uint8_t)) { |
| 113 | + // TODO(b/482435301): This case is poorly optimized. It naively converts to |
| 114 | + // int32 and does a 32-bit add. We should be using a widening op, and |
| 115 | + // storing the accumulators interleaved until `sum_rows`. |
| 116 | + stream_reduce<sum_accumulator_k1_1<s32x128, Square>, uint8_t, int32_t>( |
| 117 | + n, k3, k2, a_stride_k3, a_stride_k2, |
| 118 | + reinterpret_cast<const uint8_t*>(a), |
| 119 | + /*C_stride_m=*/0, reinterpret_cast<int32_t*>(c)); |
| 120 | + } else { |
| 121 | + tiled_reduce<sum_accumulator_x32<s32x32, 128, Square>, uint8_t, int32_t>( |
| 122 | + n, k3, k2, k1, a_stride_n, a_stride_k3, a_stride_k2, |
| 123 | + reinterpret_cast<const uint8_t*>(a), /*C_stride_m=*/0, |
| 124 | + reinterpret_cast<int32_t*>(c)); |
| 125 | + } |
| 126 | +} |
| 127 | + |
| 128 | +void sum_int8_int32_hvx(size_t n, size_t k3, size_t k2, size_t k1, |
| 129 | + size_t a_stride_n, size_t a_stride_k3, |
| 130 | + size_t a_stride_k2, const void* a, size_t, void* c) { |
| 131 | + if (k1 == 1 && a_stride_n == sizeof(int8_t)) { |
| 132 | + // TODO(b/482435301): This case is poorly optimized. It naively converts to |
| 133 | + // int32 and does a 32-bit add. We should be using a widening op, and |
| 134 | + // storing the accumulators interleaved until `sum_rows`. |
| 135 | + stream_reduce<sum_accumulator_k1_1<s32x128>, int8_t, int32_t>( |
| 136 | + n, k3, k2, a_stride_k3, a_stride_k2, reinterpret_cast<const int8_t*>(a), |
| 137 | + /*C_stride_m=*/0, reinterpret_cast<int32_t*>(c)); |
| 138 | + } else { |
| 139 | + tiled_reduce<sum_accumulator_x32<s32x32, 128, Identity>, int8_t, int32_t>( |
| 140 | + n, k3, k2, k1, a_stride_n, a_stride_k3, a_stride_k2, |
| 141 | + reinterpret_cast<const int8_t*>(a), /*C_stride_m=*/0, |
| 142 | + reinterpret_cast<int32_t*>(c)); |
| 143 | + } |
| 144 | +} |
| 145 | + |
| 146 | +void sum_squared_int8_int32_hvx(size_t n, size_t k3, size_t k2, size_t k1, |
| 147 | + size_t a_stride_n, size_t a_stride_k3, |
| 148 | + size_t a_stride_k2, const void* a, size_t, |
| 149 | + void* c) { |
| 150 | + if (k1 == 1 && a_stride_n == sizeof(int8_t)) { |
| 151 | + // TODO(b/482435301): This case is poorly optimized. It naively converts to |
| 152 | + // int32 and does a 32-bit add. We should be using a widening op, and |
| 153 | + // storing the accumulators interleaved until `sum_rows`. |
| 154 | + stream_reduce<sum_accumulator_k1_1<s32x128, Square>, int8_t, int32_t>( |
| 155 | + n, k3, k2, a_stride_k3, a_stride_k2, reinterpret_cast<const int8_t*>(a), |
| 156 | + /*C_stride_m=*/0, reinterpret_cast<int32_t*>(c)); |
| 157 | + } else { |
| 158 | + tiled_reduce<sum_accumulator_x32<s32x32, 128, Square>, int8_t, int32_t>( |
| 159 | + n, k3, k2, k1, a_stride_n, a_stride_k3, a_stride_k2, |
| 160 | + reinterpret_cast<const int8_t*>(a), /*C_stride_m=*/0, |
| 161 | + reinterpret_cast<int32_t*>(c)); |
| 162 | + } |
| 163 | +} |
| 164 | + |
| 165 | +} // namespace ynn |
0 commit comments