@@ -59,7 +59,7 @@ simde_vld2_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
59
59
simde_vget_high_s8 (q )
60
60
};
61
61
return u ;
62
- #elif defined(SIMDE_RISCV_V_NATIVE )
62
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
63
63
simde_int8x8_private a_ [2 ];
64
64
vint8m1x2_t dest = __riscv_vlseg2e8_v_i8m1x2 (& ptr [0 ], 8 );
65
65
a_ [0 ].sv64 = __riscv_vget_v_i8m1x2_i8m1 (dest , 0 );
@@ -102,7 +102,7 @@ simde_int16x4x2_t
102
102
simde_vld2_s16 (int16_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
103
103
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
104
104
return vld2_s16 (ptr );
105
- #elif defined(SIMDE_RISCV_V_NATIVE )
105
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
106
106
simde_int16x4_private a_ [2 ];
107
107
vint16m1x2_t dest = __riscv_vlseg2e16_v_i16m1x2 (& ptr [0 ], 4 );
108
108
a_ [0 ].sv64 = __riscv_vget_v_i16m1x2_i16m1 (dest , 0 );
@@ -152,7 +152,7 @@ simde_int32x2x2_t
152
152
simde_vld2_s32 (int32_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
153
153
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
154
154
return vld2_s32 (ptr );
155
- #elif defined(SIMDE_RISCV_V_NATIVE )
155
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
156
156
simde_int32x2_private a_ [2 ];
157
157
vint32m1x2_t dest = __riscv_vlseg2e32_v_i32m1x2 (& ptr [0 ], 2 );
158
158
a_ [0 ].sv64 = __riscv_vget_v_i32m1x2_i32m1 (dest , 0 );
@@ -195,7 +195,7 @@ simde_int64x1x2_t
195
195
simde_vld2_s64 (int64_t const ptr [HEDLEY_ARRAY_PARAM (2 )]) {
196
196
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
197
197
return vld2_s64 (ptr );
198
- #elif defined(SIMDE_RISCV_V_NATIVE )
198
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
199
199
simde_int64x1_private a_ [2 ];
200
200
vint64m1x2_t dest = __riscv_vlseg2e64_v_i64m1x2 (& ptr [0 ], 1 );
201
201
a_ [0 ].sv64 = __riscv_vget_v_i64m1x2_i64m1 (dest , 0 );
@@ -249,7 +249,7 @@ simde_vld2_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
249
249
simde_vget_high_u8 (q )
250
250
};
251
251
return u ;
252
- #elif defined(SIMDE_RISCV_V_NATIVE )
252
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
253
253
simde_uint8x8_private a_ [2 ];
254
254
vuint8m1x2_t dest = __riscv_vlseg2e8_v_u8m1x2 (& ptr [0 ], 8 );
255
255
a_ [0 ].sv64 = __riscv_vget_v_u8m1x2_u8m1 (dest , 0 );
@@ -292,7 +292,7 @@ simde_uint16x4x2_t
292
292
simde_vld2_u16 (uint16_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
293
293
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
294
294
return vld2_u16 (ptr );
295
- #elif defined(SIMDE_RISCV_V_NATIVE )
295
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
296
296
simde_uint16x4_private a_ [2 ];
297
297
vuint16m1x2_t dest = __riscv_vlseg2e16_v_u16m1x2 (& ptr [0 ], 4 );
298
298
a_ [0 ].sv64 = __riscv_vget_v_u16m1x2_u16m1 (dest , 0 );
@@ -342,7 +342,7 @@ simde_uint32x2x2_t
342
342
simde_vld2_u32 (uint32_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
343
343
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
344
344
return vld2_u32 (ptr );
345
- #elif defined(SIMDE_RISCV_V_NATIVE )
345
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
346
346
simde_uint32x2_private a_ [2 ];
347
347
vuint32m1x2_t dest = __riscv_vlseg2e32_v_u32m1x2 (& ptr [0 ], 2 );
348
348
a_ [0 ].sv64 = __riscv_vget_v_u32m1x2_u32m1 (dest , 0 );
@@ -385,7 +385,7 @@ simde_uint64x1x2_t
385
385
simde_vld2_u64 (uint64_t const ptr [HEDLEY_ARRAY_PARAM (2 )]) {
386
386
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
387
387
return vld2_u64 (ptr );
388
- #elif defined(SIMDE_RISCV_V_NATIVE )
388
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
389
389
simde_uint64x1_private a_ [2 ];
390
390
vuint64m1x2_t dest = __riscv_vlseg2e64_v_u64m1x2 (& ptr [0 ], 1 );
391
391
a_ [0 ].sv64 = __riscv_vget_v_u64m1x2_u64m1 (dest , 0 );
@@ -428,7 +428,8 @@ simde_float16x4x2_t
428
428
simde_vld2_f16 (simde_float16_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
429
429
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && defined(SIMDE_ARM_NEON_FP16 )
430
430
return vld2_f16 (ptr );
431
- #elif defined(SIMDE_RISCV_V_NATIVE ) && SIMDE_ARCH_RISCV_ZVFH && (SIMDE_NATURAL_VECTOR_SIZE >= 128 )
431
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined(SIMDE_ARCH_RISCV_ZVLSSEG ) \
432
+ && SIMDE_ARCH_RISCV_ZVFH && (SIMDE_NATURAL_VECTOR_SIZE >= 128 )
432
433
simde_float16x4_private r_ [2 ];
433
434
vfloat16m1x2_t dest = __riscv_vlseg2e16_v_f16m1x2 ((_Float16 * )& ptr [0 ], 4 );
434
435
r_ [0 ].sv64 = __riscv_vget_v_f16m1x2_f16m1 (dest , 0 );
@@ -466,7 +467,7 @@ simde_float32x2x2_t
466
467
simde_vld2_f32 (simde_float32_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
467
468
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
468
469
return vld2_f32 (ptr );
469
- #elif defined(SIMDE_RISCV_V_NATIVE )
470
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
470
471
simde_float32x2_private r_ [2 ];
471
472
vfloat32m1x2_t dest = __riscv_vlseg2e32_v_f32m1x2 (& ptr [0 ], 2 );
472
473
r_ [0 ].sv64 = __riscv_vget_v_f32m1x2_f32m1 (dest , 0 );
@@ -509,7 +510,7 @@ simde_float64x1x2_t
509
510
simde_vld2_f64 (simde_float64_t const ptr [HEDLEY_ARRAY_PARAM (2 )]) {
510
511
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE )
511
512
return vld2_f64 (ptr );
512
- #elif defined(SIMDE_RISCV_V_NATIVE )
513
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
513
514
simde_float64x1_private r_ [2 ];
514
515
vfloat64m1x2_t dest = __riscv_vlseg2e64_v_f64m1x2 (& ptr [0 ], 1 );
515
516
r_ [0 ].sv64 = __riscv_vget_v_f64m1x2_f64m1 (dest , 0 );
@@ -552,7 +553,7 @@ simde_int8x16x2_t
552
553
simde_vld2q_s8 (int8_t const ptr [HEDLEY_ARRAY_PARAM (32 )]) {
553
554
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
554
555
return vld2q_s8 (ptr );
555
- #elif defined(SIMDE_RISCV_V_NATIVE )
556
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
556
557
simde_int8x16_private a_ [2 ];
557
558
vint8m1x2_t dest = __riscv_vlseg2e8_v_i8m1x2 (& ptr [0 ], 16 );
558
559
a_ [0 ].sv128 = __riscv_vget_v_i8m1x2_i8m1 (dest , 0 );
@@ -602,7 +603,7 @@ simde_int32x4x2_t
602
603
simde_vld2q_s32 (int32_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
603
604
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
604
605
return vld2q_s32 (ptr );
605
- #elif defined(SIMDE_RISCV_V_NATIVE )
606
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
606
607
simde_int32x4_private a_ [2 ];
607
608
vint32m1x2_t dest = __riscv_vlseg2e32_v_i32m1x2 (& ptr [0 ], 4 );
608
609
a_ [0 ].sv128 = __riscv_vget_v_i32m1x2_i32m1 (dest , 0 );
@@ -652,7 +653,7 @@ simde_int16x8x2_t
652
653
simde_vld2q_s16 (int16_t const ptr [HEDLEY_ARRAY_PARAM (16 )]) {
653
654
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
654
655
return vld2q_s16 (ptr );
655
- #elif defined(SIMDE_RISCV_V_NATIVE )
656
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
656
657
simde_int16x8_private r_ [2 ];
657
658
vint16m1x2_t dest = __riscv_vlseg2e16_v_i16m1x2 (& ptr [0 ], 8 );
658
659
r_ [0 ].sv128 = __riscv_vget_v_i16m1x2_i16m1 (dest , 0 );
@@ -702,7 +703,7 @@ simde_int64x2x2_t
702
703
simde_vld2q_s64 (int64_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
703
704
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE )
704
705
return vld2q_s64 (ptr );
705
- #elif defined(SIMDE_RISCV_V_NATIVE )
706
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
706
707
simde_int64x2_private r_ [2 ];
707
708
vint64m1x2_t dest = __riscv_vlseg2e64_v_i64m1x2 (& ptr [0 ], 2 );
708
709
r_ [0 ].sv128 = __riscv_vget_v_i64m1x2_i64m1 (dest , 0 );
@@ -739,7 +740,7 @@ simde_uint8x16x2_t
739
740
simde_vld2q_u8 (uint8_t const ptr [HEDLEY_ARRAY_PARAM (32 )]) {
740
741
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
741
742
return vld2q_u8 (ptr );
742
- #elif defined(SIMDE_RISCV_V_NATIVE )
743
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
743
744
simde_uint8x16_private r_ [2 ];
744
745
vuint8m1x2_t dest = __riscv_vlseg2e8_v_u8m1x2 (& ptr [0 ], 16 );
745
746
r_ [0 ].sv128 = __riscv_vget_v_u8m1x2_u8m1 (dest , 0 );
@@ -789,7 +790,7 @@ simde_uint16x8x2_t
789
790
simde_vld2q_u16 (uint16_t const ptr [HEDLEY_ARRAY_PARAM (16 )]) {
790
791
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
791
792
return vld2q_u16 (ptr );
792
- #elif defined(SIMDE_RISCV_V_NATIVE )
793
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
793
794
simde_uint16x8_private r_ [2 ];
794
795
vuint16m1x2_t dest = __riscv_vlseg2e16_v_u16m1x2 (& ptr [0 ], 8 );
795
796
r_ [0 ].sv128 = __riscv_vget_v_u16m1x2_u16m1 (dest , 0 );
@@ -839,7 +840,7 @@ simde_uint32x4x2_t
839
840
simde_vld2q_u32 (uint32_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
840
841
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
841
842
return vld2q_u32 (ptr );
842
- #elif defined(SIMDE_RISCV_V_NATIVE )
843
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
843
844
simde_uint32x4_private r_ [2 ];
844
845
vuint32m1x2_t dest = __riscv_vlseg2e32_v_u32m1x2 (& ptr [0 ], 4 );
845
846
r_ [0 ].sv128 = __riscv_vget_v_u32m1x2_u32m1 (dest , 0 );
@@ -889,7 +890,7 @@ simde_uint64x2x2_t
889
890
simde_vld2q_u64 (uint64_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
890
891
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE )
891
892
return vld2q_u64 (ptr );
892
- #elif defined(SIMDE_RISCV_V_NATIVE )
893
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
893
894
simde_uint64x2_private r_ [2 ];
894
895
vuint64m1x2_t dest = __riscv_vlseg2e64_v_u64m1x2 (& ptr [0 ], 2 );
895
896
r_ [0 ].sv128 = __riscv_vget_v_u64m1x2_u64m1 (dest , 0 );
@@ -926,7 +927,8 @@ simde_float16x8x2_t
926
927
simde_vld2q_f16 (simde_float16_t const ptr [HEDLEY_ARRAY_PARAM (16 )]) {
927
928
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && defined(SIMDE_ARM_NEON_FP16 )
928
929
return vld2q_f16 (ptr );
929
- #elif defined(SIMDE_RISCV_V_NATIVE ) && SIMDE_ARCH_RISCV_ZVFH && (SIMDE_NATURAL_VECTOR_SIZE >= 128 )
930
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined(SIMDE_ARCH_RISCV_ZVLSSEG ) \
931
+ && SIMDE_ARCH_RISCV_ZVFH && (SIMDE_NATURAL_VECTOR_SIZE >= 128 )
930
932
simde_float16x8_private r_ [2 ];
931
933
vfloat16m1x2_t dest = __riscv_vlseg2e16_v_f16m1x2 ((_Float16 * )& ptr [0 ], 8 );
932
934
r_ [0 ].sv128 = __riscv_vget_v_f16m1x2_f16m1 (dest , 0 );
@@ -971,7 +973,7 @@ simde_float32x4x2_t
971
973
simde_vld2q_f32 (simde_float32_t const ptr [HEDLEY_ARRAY_PARAM (8 )]) {
972
974
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE )
973
975
return vld2q_f32 (ptr );
974
- #elif defined(SIMDE_RISCV_V_NATIVE )
976
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
975
977
simde_float32x4_private r_ [2 ];
976
978
vfloat32m1x2_t dest = __riscv_vlseg2e32_v_f32m1x2 (& ptr [0 ], 4 );
977
979
r_ [0 ].sv128 = __riscv_vget_v_f32m1x2_f32m1 (dest , 0 );
@@ -1021,7 +1023,7 @@ simde_float64x2x2_t
1021
1023
simde_vld2q_f64 (simde_float64_t const ptr [HEDLEY_ARRAY_PARAM (4 )]) {
1022
1024
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE )
1023
1025
return vld2q_f64 (ptr );
1024
- #elif defined(SIMDE_RISCV_V_NATIVE )
1026
+ #elif defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1025
1027
simde_float64x2_private r_ [2 ];
1026
1028
vfloat64m1x2_t dest = __riscv_vlseg2e64_v_f64m1x2 (& ptr [0 ], 2 );
1027
1029
r_ [0 ].sv128 = __riscv_vget_v_f64m1x2_f64m1 (dest , 0 );
@@ -1060,7 +1062,7 @@ simde_vld2_p8(simde_poly8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
1060
1062
return vld2_p8 (ptr );
1061
1063
#else
1062
1064
simde_poly8x8_private r_ [2 ];
1063
- #if defined(SIMDE_RISCV_V_NATIVE )
1065
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1064
1066
vuint8m1x2_t dest = __riscv_vlseg2e8_v_u8m1x2 (& ptr [0 ], 8 );
1065
1067
r_ [0 ].sv64 = __riscv_vget_v_u8m1x2_u8m1 (dest , 0 );
1066
1068
r_ [1 ].sv64 = __riscv_vget_v_u8m1x2_u8m1 (dest , 1 );
@@ -1095,7 +1097,7 @@ simde_vld2_p16(simde_poly16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
1095
1097
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
1096
1098
#endif
1097
1099
simde_poly16x4_private r_ [2 ];
1098
- #if defined(SIMDE_RISCV_V_NATIVE )
1100
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1099
1101
vuint16m1x2_t dest = __riscv_vlseg2e16_v_u16m1x2 (& ptr [0 ], 4 );
1100
1102
r_ [0 ].sv64 = __riscv_vget_v_u16m1x2_u16m1 (dest , 0 );
1101
1103
r_ [1 ].sv64 = __riscv_vget_v_u16m1x2_u16m1 (dest , 1 );
@@ -1131,7 +1133,7 @@ simde_vld2_p64(simde_poly64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) {
1131
1133
#else
1132
1134
simde_poly64x1_private r_ [2 ];
1133
1135
1134
- #if defined(SIMDE_RISCV_V_NATIVE )
1136
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1135
1137
vuint64m1x2_t dest = __riscv_vlseg2e64_v_u64m1x2 (& ptr [0 ], 1 );
1136
1138
r_ [0 ].sv64 = __riscv_vget_v_u64m1x2_u64m1 (dest , 0 );
1137
1139
r_ [1 ].sv64 = __riscv_vget_v_u64m1x2_u64m1 (dest , 1 );
@@ -1168,7 +1170,7 @@ simde_vld2q_p8(simde_poly8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) {
1168
1170
#endif
1169
1171
simde_poly8x16_private r_ [2 ];
1170
1172
1171
- #if defined(SIMDE_RISCV_V_NATIVE )
1173
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1172
1174
vuint8m1x2_t dest = __riscv_vlseg2e8_v_u8m1x2 (& ptr [0 ], 16 );
1173
1175
r_ [0 ].sv128 = __riscv_vget_v_u8m1x2_u8m1 (dest , 0 );
1174
1176
r_ [1 ].sv128 = __riscv_vget_v_u8m1x2_u8m1 (dest , 1 );
@@ -1208,7 +1210,7 @@ simde_vld2q_p16(simde_poly16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
1208
1210
#endif
1209
1211
simde_poly16x8_private r_ [2 ];
1210
1212
1211
- #if defined(SIMDE_RISCV_V_NATIVE )
1213
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1212
1214
vuint16m1x2_t dest = __riscv_vlseg2e16_v_u16m1x2 (& ptr [0 ], 8 );
1213
1215
r_ [0 ].sv128 = __riscv_vget_v_u16m1x2_u16m1 (dest , 0 );
1214
1216
r_ [1 ].sv128 = __riscv_vget_v_u16m1x2_u16m1 (dest , 1 );
@@ -1244,7 +1246,7 @@ simde_vld2q_p64(simde_poly64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
1244
1246
#else
1245
1247
simde_poly64x2_private r_ [2 ];
1246
1248
1247
- #if defined(SIMDE_RISCV_V_NATIVE )
1249
+ #if defined(SIMDE_RISCV_V_NATIVE ) && defined( SIMDE_ARCH_RISCV_ZVLSSEG )
1248
1250
vuint64m1x2_t dest = __riscv_vlseg2e64_v_u64m1x2 (& ptr [0 ], 2 );
1249
1251
r_ [0 ].sv128 = __riscv_vget_v_u64m1x2_u64m1 (dest , 0 );
1250
1252
r_ [1 ].sv128 = __riscv_vget_v_u64m1x2_u64m1 (dest , 1 );
0 commit comments