The patch below does not apply to the 4.19-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-4.19.y git checkout FETCH_HEAD git cherry-pick -x 3b2f2d22fb424e9bebda4dbf6676cbfc7f9f62cd # <resolve conflicts, build, test, etc.> git commit -s git send-email --to 'stable@vger.kernel.org' --in-reply-to '2024120239-stung-preteen-1a68@gregkh' --subject-prefix 'PATCH 4.19.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From 3b2f2d22fb424e9bebda4dbf6676cbfc7f9f62cd Mon Sep 17 00:00:00 2001 From: Eric Biggers ebiggers@google.com Date: Wed, 16 Oct 2024 17:00:42 -0700 Subject: [PATCH] crypto: x86/aegis128 - access 32-bit arguments as 32-bit
Fix the AEGIS assembly code to access 'unsigned int' arguments as 32-bit values instead of 64-bit, since the upper bits of the corresponding 64-bit registers are not guaranteed to be zero.
Note: there haven't been any reports of this bug actually causing incorrect behavior. Neither gcc nor clang guarantee zero-extension to 64 bits, but zero-extension is likely to happen in practice because most instructions that operate on 32-bit registers zero-extend to 64 bits.
Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations") Cc: stable@vger.kernel.org Reviewed-by: Ondrej Mosnacek omosnace@redhat.com Signed-off-by: Eric Biggers ebiggers@google.com Signed-off-by: Herbert Xu herbert@gondor.apana.org.au
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index ad7f4c891625..2de859173940 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -21,7 +21,7 @@ #define T1 %xmm7
#define STATEP %rdi -#define LEN %rsi +#define LEN %esi #define SRC %rdx #define DST %rcx
@@ -76,32 +76,32 @@ SYM_FUNC_START_LOCAL(__load_partial) xor %r9d, %r9d pxor MSG, MSG
- mov LEN, %r8 + mov LEN, %r8d and $0x1, %r8 jz .Lld_partial_1
- mov LEN, %r8 + mov LEN, %r8d and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b
.Lld_partial_1: - mov LEN, %r8 + mov LEN, %r8d and $0x2, %r8 jz .Lld_partial_2
- mov LEN, %r8 + mov LEN, %r8d and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w
.Lld_partial_2: - mov LEN, %r8 + mov LEN, %r8d and $0x4, %r8 jz .Lld_partial_4
- mov LEN, %r8 + mov LEN, %r8d and $0x18, %r8 add SRC, %r8 shl $32, %r9 @@ -111,11 +111,11 @@ SYM_FUNC_START_LOCAL(__load_partial) .Lld_partial_4: movq %r9, MSG
- mov LEN, %r8 + mov LEN, %r8d and $0x8, %r8 jz .Lld_partial_8
- mov LEN, %r8 + mov LEN, %r8d and $0x10, %r8 add SRC, %r8 pslldq $8, MSG @@ -139,7 +139,7 @@ SYM_FUNC_END(__load_partial) * %r10 */ SYM_FUNC_START_LOCAL(__store_partial) - mov LEN, %r8 + mov LEN, %r8d mov DST, %r9
movq T0, %r10 @@ -677,7 +677,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) call __store_partial
/* mask with byte count: */ - movq LEN, T0 + movd LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 @@ -702,7 +702,8 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, - * u64 assoclen, u64 cryptlen); + * unsigned int assoclen, + * unsigned int cryptlen); */ SYM_FUNC_START(crypto_aegis128_aesni_final) FRAME_BEGIN @@ -715,8 +716,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final) movdqu 0x40(STATEP), STATE4
/* prepare length block: */ - movq %rdx, MSG - movq %rcx, T0 + movd %edx, MSG + movd %ecx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */
linux-stable-mirror@lists.linaro.org