summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorJerry Shih <bignose1007@gmail.com>2023-09-28 13:51:17 +0800
committerHugo Landau <hlandau@openssl.org>2023-10-26 15:55:50 +0100
commit3645eb0be22a4cea4300ab5afbf248d195d0f45b (patch)
tree39cf602aa0109e42c0048e9f1deb0f67befbaef6 /crypto
parentd26d01e5ec29ce0c94ae18c9cdedd8f1c036fcc3 (diff)
Update for Zvkb extension.
https://github.com/riscv/riscv-crypto/blob/c8ddeb7e64a3444dda0438316af1238aeed72041/doc/vector/riscv-crypto-vector-zvkb.adoc Create `RISCV_HAS_ZVKB()` macro. Use zvkb for SM4 instead of zvbb. Use zvkb for ghash instead of zvbb. We could just use the zvbb's subset `zvkb` for flexibility. Signed-off-by: Jerry Shih <jerry.shih@sifive.com> Signed-off-by: Phoebe Chen <phoebe.chen@sifive.com> Reviewed-by: Tomas Mraz <tomas@openssl.org> Reviewed-by: Paul Dale <pauli@openssl.org> Reviewed-by: Hugo Landau <hlandau@openssl.org> (Merged from https://github.com/openssl/openssl/pull/21923)
Diffstat (limited to 'crypto')
-rw-r--r--crypto/modes/asm/ghash-riscv64-zvkb-zvbc.pl (renamed from crypto/modes/asm/ghash-riscv64-zvbb-zvbc.pl)40
-rw-r--r--crypto/modes/asm/ghash-riscv64-zvkg.pl19
-rw-r--r--crypto/modes/build.info4
-rw-r--r--crypto/modes/gcm128.c22
-rw-r--r--crypto/perlasm/riscv.pm13
-rw-r--r--crypto/sm4/asm/sm4-riscv64-zvksed.pl6
6 files changed, 59 insertions, 45 deletions
diff --git a/crypto/modes/asm/ghash-riscv64-zvbb-zvbc.pl b/crypto/modes/asm/ghash-riscv64-zvkb-zvbc.pl
index c64211c3ab..5eb748bdc2 100644
--- a/crypto/modes/asm/ghash-riscv64-zvbb-zvbc.pl
+++ b/crypto/modes/asm/ghash-riscv64-zvkb-zvbc.pl
@@ -35,9 +35,9 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - Vector Bit-manipulation used in Cryptography ('Zvbb')
-# - Vector Carryless Multiplication ('Zvbc')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+# - RISC-V Vector Carryless Multiplication extension ('Zvbc')
use strict;
use warnings;
@@ -59,20 +59,20 @@ my $code=<<___;
___
################################################################################
-# void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
-# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvbb_zvbc and
-# gcm_ghash_rv64i_zvbb_zvbc
+# output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvkb_zvbc and
+# gcm_ghash_rv64i_zvkb_zvbc
{
my ($Htable,$H,$TMP0,$TMP1,$TMP2) = ("a0","a1","t0","t1","t2");
my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
$code .= <<___;
.p2align 3
-.globl gcm_init_rv64i_zvbb_zvbc
-.type gcm_init_rv64i_zvbb_zvbc,\@function
-gcm_init_rv64i_zvbb_zvbc:
+.globl gcm_init_rv64i_zvkb_zvbc
+.type gcm_init_rv64i_zvkb_zvbc,\@function
+gcm_init_rv64i_zvkb_zvbc:
# Load/store data in reverse order.
# This is needed as a part of endianness swap.
add $H, $H, 8
@@ -110,12 +110,12 @@ gcm_init_rv64i_zvbb_zvbc:
@{[vse64_v $V1, $Htable]} # vse64.v v1, (a0)
ret
-.size gcm_init_rv64i_zvbb_zvbc,.-gcm_init_rv64i_zvbb_zvbc
+.size gcm_init_rv64i_zvkb_zvbc,.-gcm_init_rv64i_zvkb_zvbc
___
}
################################################################################
-# void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
+# void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
#
# input: Xi: current hash value
# Htable: preprocessed H
@@ -127,9 +127,9 @@ my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
$code .= <<___;
.text
.p2align 3
-.globl gcm_gmult_rv64i_zvbb_zvbc
-.type gcm_gmult_rv64i_zvbb_zvbc,\@function
-gcm_gmult_rv64i_zvbb_zvbc:
+.globl gcm_gmult_rv64i_zvkb_zvbc
+.type gcm_gmult_rv64i_zvkb_zvbc,\@function
+gcm_gmult_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
@@ -228,12 +228,12 @@ gcm_gmult_rv64i_zvbb_zvbc:
@{[vrev8_v $V2, $V2]} # vrev8.v v2, v2
@{[vsse64_v $V2, $Xi, $TMP4]} # vsse64.v v2, (a0), t4
ret
-.size gcm_gmult_rv64i_zvbb_zvbc,.-gcm_gmult_rv64i_zvbb_zvbc
+.size gcm_gmult_rv64i_zvkb_zvbc,.-gcm_gmult_rv64i_zvkb_zvbc
___
}
################################################################################
-# void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
+# void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
# const u8 *inp, size_t len);
#
# input: Xi: current hash value
@@ -247,9 +247,9 @@ my ($V0,$V1,$V2,$V3,$V4,$V5,$V6,$Vinp) = ("v0","v1","v2","v3","v4","v5","v6","v7
$code .= <<___;
.p2align 3
-.globl gcm_ghash_rv64i_zvbb_zvbc
-.type gcm_ghash_rv64i_zvbb_zvbc,\@function
-gcm_ghash_rv64i_zvbb_zvbc:
+.globl gcm_ghash_rv64i_zvkb_zvbc
+.type gcm_ghash_rv64i_zvkb_zvbc,\@function
+gcm_ghash_rv64i_zvkb_zvbc:
ld $TMP0, ($Htable)
ld $TMP1, 8($Htable)
li $TMP2, 63
@@ -361,7 +361,7 @@ Lstep:
@{[vsse64_v $V5, $Xi, $M8]} # vsse64.v v2, (a0), t4
ret
-.size gcm_ghash_rv64i_zvbb_zvbc,.-gcm_ghash_rv64i_zvbb_zvbc
+.size gcm_ghash_rv64i_zvkb_zvbc,.-gcm_ghash_rv64i_zvkb_zvbc
___
}
diff --git a/crypto/modes/asm/ghash-riscv64-zvkg.pl b/crypto/modes/asm/ghash-riscv64-zvkg.pl
index c3217598e4..8423ae9cf8 100644
--- a/crypto/modes/asm/ghash-riscv64-zvkg.pl
+++ b/crypto/modes/asm/ghash-riscv64-zvkg.pl
@@ -35,8 +35,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - RISC-V vector crypto GHASH extension ('Zvkg')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector GCM/GMAC extension ('Zvkg')
+#
+# Optional:
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
use strict;
use warnings;
@@ -59,7 +62,7 @@ ___
################################################################################
# void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 H[2]);
-# void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 H[2]);
+# void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 H[2]);
#
# input: H: 128-bit H - secret parameter E(K, 0^128)
# output: Htable: Copy of secret parameter (in normalized byte order)
@@ -88,15 +91,15 @@ my ($Htable,$H,$V0) = ("a0","a1","v0");
$code .= <<___;
.p2align 3
-.globl gcm_init_rv64i_zvkg_zvbb
-.type gcm_init_rv64i_zvkg_zvbb,\@function
-gcm_init_rv64i_zvkg_zvbb:
- @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
+.globl gcm_init_rv64i_zvkg_zvkb
+.type gcm_init_rv64i_zvkg_zvkb,\@function
+gcm_init_rv64i_zvkg_zvkb:
+ @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, ta, ma
@{[vle64_v $V0, $H]} # vle64.v v0, (a1)
@{[vrev8_v $V0, $V0]} # vrev8.v v0, v0
@{[vse64_v $V0, $Htable]} # vse64.v v0, (a0)
ret
-.size gcm_init_rv64i_zvkg_zvbb,.-gcm_init_rv64i_zvkg_zvbb
+.size gcm_init_rv64i_zvkg_zvkb,.-gcm_init_rv64i_zvkg_zvkb
___
}
diff --git a/crypto/modes/build.info b/crypto/modes/build.info
index 7b188fba81..9ebb5cc7a8 100644
--- a/crypto/modes/build.info
+++ b/crypto/modes/build.info
@@ -43,7 +43,7 @@ IF[{- !$disabled{asm} -}]
$MODESASM_c64xplus=ghash-c64xplus.s
$MODESDEF_c64xplus=GHASH_ASM
- $MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvbb-zvbc.s ghash-riscv64-zvkg.s
+ $MODESASM_riscv64=ghash-riscv64.s ghash-riscv64-zvkb-zvbc.s ghash-riscv64-zvkg.s
$MODESDEF_riscv64=GHASH_ASM
# Now that we have defined all the arch specific variables, use the
@@ -91,5 +91,5 @@ GENERATE[ghash-s390x.S]=asm/ghash-s390x.pl
INCLUDE[ghash-s390x.o]=..
GENERATE[ghash-c64xplus.S]=asm/ghash-c64xplus.pl
GENERATE[ghash-riscv64.s]=asm/ghash-riscv64.pl
-GENERATE[ghash-riscv64-zvbb-zvbc.s]=asm/ghash-riscv64-zvbb-zvbc.pl
+GENERATE[ghash-riscv64-zvkb-zvbc.s]=asm/ghash-riscv64-zvkb-zvbc.pl
GENERATE[ghash-riscv64-zvkg.s]=asm/ghash-riscv64-zvkg.pl
diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c
index 4b49d202a4..6f293ef794 100644
--- a/crypto/modes/gcm128.c
+++ b/crypto/modes/gcm128.c
@@ -413,14 +413,14 @@ void gcm_ghash_rv64i_zbc(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
void gcm_ghash_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
-/* Zvbb/Zvbc (vector crypto with vclmul) based routines. */
-void gcm_init_rv64i_zvbb_zvbc(u128 Htable[16], const u64 Xi[2]);
-void gcm_gmult_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16]);
-void gcm_ghash_rv64i_zvbb_zvbc(u64 Xi[2], const u128 Htable[16],
+/* zvkb/Zvbc (vector crypto with vclmul) based routines. */
+void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 Xi[2]);
+void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
/* Zvkg (vector crypto with vgmul.vv and vghsh.vv). */
void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 Xi[2]);
-void gcm_init_rv64i_zvkg_zvbb(u128 Htable[16], const u64 Xi[2]);
+void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 Xi[2]);
void gcm_gmult_rv64i_zvkg(u64 Xi[2], const u128 Htable[16]);
void gcm_ghash_rv64i_zvkg(u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len);
@@ -524,16 +524,16 @@ static void gcm_get_funcs(struct gcm_funcs_st *ctx)
ctx->ghash = gcm_ghash_4bit;
if (RISCV_HAS_ZVKG() && riscv_vlen() >= 128) {
- if (RISCV_HAS_ZVBB())
- ctx->ginit = gcm_init_rv64i_zvkg_zvbb;
+ if (RISCV_HAS_ZVKB())
+ ctx->ginit = gcm_init_rv64i_zvkg_zvkb;
else
ctx->ginit = gcm_init_rv64i_zvkg;
ctx->gmult = gcm_gmult_rv64i_zvkg;
ctx->ghash = gcm_ghash_rv64i_zvkg;
- } else if (RISCV_HAS_ZVBB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
- ctx->ginit = gcm_init_rv64i_zvbb_zvbc;
- ctx->gmult = gcm_gmult_rv64i_zvbb_zvbc;
- ctx->ghash = gcm_ghash_rv64i_zvbb_zvbc;
+ } else if (RISCV_HAS_ZVKB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
+ ctx->ginit = gcm_init_rv64i_zvkb_zvbc;
+ ctx->gmult = gcm_gmult_rv64i_zvkb_zvbc;
+ ctx->ghash = gcm_ghash_rv64i_zvkb_zvbc;
} else if (RISCV_HAS_ZBC()) {
if (RISCV_HAS_ZBKB()) {
ctx->ginit = gcm_init_rv64i_zbc__zbkb;
diff --git a/crypto/perlasm/riscv.pm b/crypto/perlasm/riscv.pm
index 14434e2848..8d602d8493 100644
--- a/crypto/perlasm/riscv.pm
+++ b/crypto/perlasm/riscv.pm
@@ -746,7 +746,18 @@ sub vxor_vv {
# Vector crypto instructions
-## Zvbb instructions
+## Zvbb and Zvkb instructions
+##
+## vandn (also in zvkb)
+## vbrev
+## vbrev8 (also in zvkb)
+## vrev8 (also in zvkb)
+## vclz
+## vctz
+## vcpop
+## vrol (also in zvkb)
+## vror (also in zvkb)
+## vwsll
sub vrev8_v {
# vrev8.v vd, vs2, vm
diff --git a/crypto/sm4/asm/sm4-riscv64-zvksed.pl b/crypto/sm4/asm/sm4-riscv64-zvksed.pl
index ba600d53d7..0734e5fa4c 100644
--- a/crypto/sm4/asm/sm4-riscv64-zvksed.pl
+++ b/crypto/sm4/asm/sm4-riscv64-zvksed.pl
@@ -36,9 +36,9 @@
# The generated code of this file depends on the following RISC-V extensions:
# - RV64I
-# - RISC-V vector ('V') with VLEN >= 128
-# - Vector Bit-manipulation used in Cryptography ('Zvbb')
-# - Vector ShangMi Suite: SM4 Block Cipher ('Zvksed')
+# - RISC-V Vector ('V') with VLEN >= 128
+# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+# - RISC-V Vector SM4 Block Cipher extension ('Zvksed')
use strict;
use warnings;