9
9
*/
10
10
11
11
#include <asm/neon.h>
12
+ #include <asm/simd.h>
12
13
#include <asm/unaligned.h>
13
14
#include <crypto/aes.h>
14
15
#include <linux/cpufeature.h>
@@ -21,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
21
22
MODULE_AUTHOR (
"Ard Biesheuvel <[email protected] >" );
22
23
MODULE_LICENSE ("GPL v2" );
23
24
25
+ asmlinkage void __aes_arm64_encrypt (u32 * rk , u8 * out , const u8 * in , int rounds );
26
+ asmlinkage void __aes_arm64_decrypt (u32 * rk , u8 * out , const u8 * in , int rounds );
27
+
24
28
struct aes_block {
25
29
u8 b [AES_BLOCK_SIZE ];
26
30
};
@@ -45,7 +49,12 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
45
49
void * dummy0 ;
46
50
int dummy1 ;
47
51
48
- kernel_neon_begin_partial (4 );
52
+ if (!may_use_simd ()) {
53
+ __aes_arm64_encrypt (ctx -> key_enc , dst , src , num_rounds (ctx ));
54
+ return ;
55
+ }
56
+
57
+ kernel_neon_begin ();
49
58
50
59
__asm__(" ld1 {v0.16b}, %[in] ;"
51
60
" ld1 {v1.4s}, [%[key]], #16 ;"
@@ -90,7 +99,12 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
90
99
void * dummy0 ;
91
100
int dummy1 ;
92
101
93
- kernel_neon_begin_partial (4 );
102
+ if (!may_use_simd ()) {
103
+ __aes_arm64_decrypt (ctx -> key_dec , dst , src , num_rounds (ctx ));
104
+ return ;
105
+ }
106
+
107
+ kernel_neon_begin ();
94
108
95
109
__asm__(" ld1 {v0.16b}, %[in] ;"
96
110
" ld1 {v1.4s}, [%[key]], #16 ;"
@@ -170,7 +184,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
170
184
for (i = 0 ; i < kwords ; i ++ )
171
185
ctx -> key_enc [i ] = get_unaligned_le32 (in_key + i * sizeof (u32 ));
172
186
173
- kernel_neon_begin_partial ( 2 );
187
+ kernel_neon_begin ( );
174
188
for (i = 0 ; i < sizeof (rcon ); i ++ ) {
175
189
u32 * rki = ctx -> key_enc + (i * kwords );
176
190
u32 * rko = rki + kwords ;
0 commit comments