summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRohan McLure <rmclure@linux.ibm.com>2024-01-04 10:25:50 +0100
committerTomas Mraz <tomas@openssl.org>2024-01-09 15:46:22 +0100
commit5b139f95c9a47a55a0c54100f3837b1eee942b04 (patch)
treeaf3d3377628fc343023290aee74961e1f11767ba
parent4001226834c502859186e2c1d0c5c91f63f542c0 (diff)
poly1305-ppc.pl: Fix vector register clobbering
Fixes CVE-2023-6129 The POLY1305 MAC (message authentication code) implementation in OpenSSL for PowerPC CPUs saves the the contents of vector registers in different order than they are restored. Thus the contents of some of these vector registers is corrupted when returning to the caller. The vulnerable code is used only on newer PowerPC processors supporting the PowerISA 2.07 instructions. Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Richard Levitte <levitte@openssl.org> Reviewed-by: Tomas Mraz <tomas@openssl.org> (Merged from https://github.com/openssl/openssl/pull/23200) (cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f)
-rwxr-xr-xcrypto/poly1305/asm/poly1305-ppc.pl42
1 files changed, 21 insertions, 21 deletions
diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
index 9f86134d92..2e601bb9c2 100755
--- a/crypto/poly1305/asm/poly1305-ppc.pl
+++ b/crypto/poly1305/asm/poly1305-ppc.pl
@@ -744,7 +744,7 @@ ___
my $LOCALS= 6*$SIZE_T;
my $VSXFRAME = $LOCALS + 6*$SIZE_T;
$VSXFRAME += 128; # local variables
- $VSXFRAME += 13*16; # v20-v31 offload
+ $VSXFRAME += 12*16; # v20-v31 offload
my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0;
@@ -919,12 +919,12 @@ __poly1305_blocks_vsx:
addi r11,r11,32
stvx v22,r10,$sp
addi r10,r10,32
- stvx v23,r10,$sp
- addi r10,r10,32
- stvx v24,r11,$sp
+ stvx v23,r11,$sp
addi r11,r11,32
- stvx v25,r10,$sp
+ stvx v24,r10,$sp
addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
stvx v26,r10,$sp
addi r10,r10,32
stvx v27,r11,$sp
@@ -1153,12 +1153,12 @@ __poly1305_blocks_vsx:
addi r11,r11,32
stvx v22,r10,$sp
addi r10,r10,32
- stvx v23,r10,$sp
- addi r10,r10,32
- stvx v24,r11,$sp
+ stvx v23,r11,$sp
addi r11,r11,32
- stvx v25,r10,$sp
+ stvx v24,r10,$sp
addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
stvx v26,r10,$sp
addi r10,r10,32
stvx v27,r11,$sp
@@ -1899,26 +1899,26 @@ Ldone_vsx:
mtspr 256,r12 # restore vrsave
lvx v20,r10,$sp
addi r10,r10,32
- lvx v21,r10,$sp
- addi r10,r10,32
- lvx v22,r11,$sp
+ lvx v21,r11,$sp
addi r11,r11,32
- lvx v23,r10,$sp
+ lvx v22,r10,$sp
addi r10,r10,32
- lvx v24,r11,$sp
+ lvx v23,r11,$sp
addi r11,r11,32
- lvx v25,r10,$sp
+ lvx v24,r10,$sp
addi r10,r10,32
- lvx v26,r11,$sp
+ lvx v25,r11,$sp
addi r11,r11,32
- lvx v27,r10,$sp
+ lvx v26,r10,$sp
addi r10,r10,32
- lvx v28,r11,$sp
+ lvx v27,r11,$sp
addi r11,r11,32
- lvx v29,r10,$sp
+ lvx v28,r10,$sp
addi r10,r10,32
- lvx v30,r11,$sp
- lvx v31,r10,$sp
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
$POP r27,`$VSXFRAME-$SIZE_T*5`($sp)
$POP r28,`$VSXFRAME-$SIZE_T*4`($sp)
$POP r29,`$VSXFRAME-$SIZE_T*3`($sp)