Skip to content

Commit aaf08a0

Browse files
Ard Biesheuvelmosimchah
Ard Biesheuvel
authored andcommitted
UPSTREAM: crypto: arm64/aes-ce-cipher: add non-SIMD generic fallback
The arm64 kernel will shortly disallow nested kernel mode NEON, so add a fallback to scalar code that can be invoked in that case. Signed-off-by: Ard Biesheuvel <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Link: https://git.kernel.org/linus/b8fb993a836cd432309410eadf083bbe9c0e9e9c Signed-off-by: Nathan Chancellor <[email protected]>
1 parent dc8b7db commit aaf08a0

File tree

2 files changed

+18
-3
lines changed

2 files changed

+18
-3
lines changed

arch/arm64/crypto/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ config CRYPTO_AES_ARM64_CE
3131
tristate "AES core cipher using ARMv8 Crypto Extensions"
3232
depends on ARM64 && KERNEL_MODE_NEON
3333
select CRYPTO_ALGAPI
34+
select CRYPTO_AES_ARM64
3435

3536
config CRYPTO_AES_ARM64_CE_CCM
3637
tristate "AES in CCM mode using ARMv8 Crypto Extensions"

arch/arm64/crypto/aes-ce-cipher.c

+17-3
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
*/
1010

1111
#include <asm/neon.h>
12+
#include <asm/simd.h>
1213
#include <asm/unaligned.h>
1314
#include <crypto/aes.h>
1415
#include <linux/cpufeature.h>
@@ -21,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
2122
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
2223
MODULE_LICENSE("GPL v2");
2324

25+
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
26+
asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
27+
2428
struct aes_block {
2529
u8 b[AES_BLOCK_SIZE];
2630
};
@@ -45,7 +49,12 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
4549
void *dummy0;
4650
int dummy1;
4751

48-
kernel_neon_begin_partial(4);
52+
if (!may_use_simd()) {
53+
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
54+
return;
55+
}
56+
57+
kernel_neon_begin();
4958

5059
__asm__(" ld1 {v0.16b}, %[in] ;"
5160
" ld1 {v1.4s}, [%[key]], #16 ;"
@@ -90,7 +99,12 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
9099
void *dummy0;
91100
int dummy1;
92101

93-
kernel_neon_begin_partial(4);
102+
if (!may_use_simd()) {
103+
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
104+
return;
105+
}
106+
107+
kernel_neon_begin();
94108

95109
__asm__(" ld1 {v0.16b}, %[in] ;"
96110
" ld1 {v1.4s}, [%[key]], #16 ;"
@@ -170,7 +184,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
170184
for (i = 0; i < kwords; i++)
171185
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
172186

173-
kernel_neon_begin_partial(2);
187+
kernel_neon_begin();
174188
for (i = 0; i < sizeof(rcon); i++) {
175189
u32 *rki = ctx->key_enc + (i * kwords);
176190
u32 *rko = rki + kwords;

0 commit comments

Comments
 (0)