summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYangyu Chen <cyy@cyyself.name>2024-04-22 10:40:25 +0800
committerTomas Mraz <tomas@openssl.org>2024-05-08 11:10:45 +0200
commitf6ce48f5b8ad4d8d748ea87d2490cbed08db9936 (patch)
tree10ef611112f24f0ba69ec660f1d7b6c31a07919a
parentc857205407a0a074baf0db7fa7237a469f297c83 (diff)
chacha-riscv64-v-zbb.pl: better format
This patch merged the `add` and `xor` part of chacha_sub_round, which are same in RISC-V Vector only and Zvkb implementation. There is no change to the generated ASM code except for the indent. Signed-off-by: Yangyu Chen <cyy@cyyself.name> Reviewed-by: Paul Dale <ppzgs1@gmail.com> Reviewed-by: Tomas Mraz <tomas@openssl.org> (Merged from https://github.com/openssl/openssl/pull/24069)
-rwxr-xr-xcrypto/chacha/asm/chacha-riscv64-v-zbb.pl65
1 files changed, 28 insertions, 37 deletions
diff --git a/crypto/chacha/asm/chacha-riscv64-v-zbb.pl b/crypto/chacha/asm/chacha-riscv64-v-zbb.pl
index 8bc7fbc4ad..6518e3fc24 100755
--- a/crypto/chacha/asm/chacha-riscv64-v-zbb.pl
+++ b/crypto/chacha/asm/chacha-riscv64-v-zbb.pl
@@ -111,26 +111,29 @@ sub chacha_sub_round {
$V_T0, $V_T1, $V_T2, $V_T3,
) = @_;
- # a += b; c ^= a; c <<<= $ROL_SHIFT;
+ # a += b; c ^= a;
+ my $code = <<___;
+ @{[vadd_vv $A0, $A0, $B0]}
+ add $S_A0, $S_A0, $S_B0
+ @{[vadd_vv $A1, $A1, $B1]}
+ add $S_A1, $S_A1, $S_B1
+ @{[vadd_vv $A2, $A2, $B2]}
+ add $S_A2, $S_A2, $S_B2
+ @{[vadd_vv $A3, $A3, $B3]}
+ add $S_A3, $S_A3, $S_B3
+ @{[vxor_vv $C0, $C0, $A0]}
+ xor $S_C0, $S_C0, $S_A0
+ @{[vxor_vv $C1, $C1, $A1]}
+ xor $S_C1, $S_C1, $S_A1
+ @{[vxor_vv $C2, $C2, $A2]}
+ xor $S_C2, $S_C2, $S_A2
+ @{[vxor_vv $C3, $C3, $A3]}
+ xor $S_C3, $S_C3, $S_A3
+___
+ # c <<<= $ROL_SHIFT;
if ($use_zvkb) {
- my $code = <<___;
- @{[vadd_vv $A0, $A0, $B0]}
- add $S_A0, $S_A0, $S_B0
- @{[vadd_vv $A1, $A1, $B1]}
- add $S_A1, $S_A1, $S_B1
- @{[vadd_vv $A2, $A2, $B2]}
- add $S_A2, $S_A2, $S_B2
- @{[vadd_vv $A3, $A3, $B3]}
- add $S_A3, $S_A3, $S_B3
- @{[vxor_vv $C0, $C0, $A0]}
- xor $S_C0, $S_C0, $S_A0
- @{[vxor_vv $C1, $C1, $A1]}
- xor $S_C1, $S_C1, $S_A1
- @{[vxor_vv $C2, $C2, $A2]}
- xor $S_C2, $S_C2, $S_A2
- @{[vxor_vv $C3, $C3, $A3]}
- xor $S_C3, $S_C3, $S_A3
+ my $ror_part = <<___;
@{[vror_vi $C0, $C0, 32 - $ROL_SHIFT]}
@{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
@{[vror_vi $C1, $C1, 32 - $ROL_SHIFT]}
@@ -140,25 +143,10 @@ sub chacha_sub_round {
@{[vror_vi $C3, $C3, 32 - $ROL_SHIFT]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
- return $code;
+
+ $code .= $ror_part;
} else {
- my $code = <<___;
- @{[vadd_vv $A0, $A0, $B0]}
- add $S_A0, $S_A0, $S_B0
- @{[vadd_vv $A1, $A1, $B1]}
- add $S_A1, $S_A1, $S_B1
- @{[vadd_vv $A2, $A2, $B2]}
- add $S_A2, $S_A2, $S_B2
- @{[vadd_vv $A3, $A3, $B3]}
- add $S_A3, $S_A3, $S_B3
- @{[vxor_vv $C0, $C0, $A0]}
- xor $S_C0, $S_C0, $S_A0
- @{[vxor_vv $C1, $C1, $A1]}
- xor $S_C1, $S_C1, $S_A1
- @{[vxor_vv $C2, $C2, $A2]}
- xor $S_C2, $S_C2, $S_A2
- @{[vxor_vv $C3, $C3, $A3]}
- xor $S_C3, $S_C3, $S_A3
+ my $ror_part = <<___;
@{[vsll_vi $V_T0, $C0, $ROL_SHIFT]}
@{[vsll_vi $V_T1, $C1, $ROL_SHIFT]}
@{[vsll_vi $V_T2, $C2, $ROL_SHIFT]}
@@ -176,8 +164,11 @@ ___
@{[vor_vv $C3, $C3, $V_T3]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
- return $code;
+
+ $code .= $ror_part;
}
+
+ return $code;
}
sub chacha_quad_round_group {