summaryrefslogtreecommitdiffstats
path: root/crypto/aes/asm/aesfx-sparcv9.pl
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2016-04-23 19:21:18 +0200
committerAndy Polyakov <appro@openssl.org>2016-07-16 23:37:18 +0200
commitd41de45a335a7275c66a21287ae13a84e45921b5 (patch)
tree13223da8e5405af5f88426b21d1274bf4d060fef /crypto/aes/asm/aesfx-sparcv9.pl
parente10aeee104383b711a6a58a13ed172fdb8642340 (diff)
aes/asm/aesfx-sparcv9.pl: add "teaser" CBC and CTR subroutines.
[Also optimize aligaddr usage in single-block subroutines.] Reviewed-by: Rich Salz <rsalz@openssl.org>
Diffstat (limited to 'crypto/aes/asm/aesfx-sparcv9.pl')
-rwxr-xr-xcrypto/aes/asm/aesfx-sparcv9.pl775
1 files changed, 747 insertions, 28 deletions
diff --git a/crypto/aes/asm/aesfx-sparcv9.pl b/crypto/aes/asm/aesfx-sparcv9.pl
index c72f865552..e52a579bfa 100755
--- a/crypto/aes/asm/aesfx-sparcv9.pl
+++ b/crypto/aes/asm/aesfx-sparcv9.pl
@@ -18,6 +18,14 @@
#
# Initial support for Fujitsu SPARC64 X/X+ comprises minimally
# required key setup and single-block procedures.
+#
+# April 2016
+#
+# Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
+# that parallelizeable nature of CBC decrypt and CTR is not utilized
+# yet. CBC encrypt on the other hand is as good as it can possibly
+# get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
+# This is ~6x faster than pure software implementation...
$output = pop;
open STDOUT,">$output";
@@ -26,15 +34,19 @@ open STDOUT,">$output";
my ($inp,$out,$key,$rounds,$tmp,$mask) = map("%o$_",(0..5));
$code.=<<___;
+#include "sparc_arch.h"
+
+#define LOCALS (STACK_BIAS+STACK_FRAME)
+
.text
.globl aes_fx_encrypt
.align 32
aes_fx_encrypt:
and $inp, 7, $tmp ! is input aligned?
- alignaddr $inp, %g0, $inp
+ andn $inp, 7, $inp
ld [$key + 240], $rounds
- ldd [$key + 0], %f6
+ ldd [$key + 0], %f6 ! round[0]
ldd [$key + 8], %f8
ldd [$inp + 0], %f0 ! load input
@@ -42,18 +54,19 @@ aes_fx_encrypt:
ldd [$inp + 8], %f2
ldd [$inp + 16], %f4
+ alignaddr $inp, $tmp, %g0
faligndata %f0, %f2, %f0
faligndata %f2, %f4, %f2
.Lenc_inp_aligned:
- ldd [$key + 16], %f10
+ ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12
- add $key, 32, $key
fxor %f0, %f6, %f0 ! ^=round[0]
fxor %f2, %f8, %f2
- ldd [$key + 0], %f6
- ldd [$key + 8], %f8
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+ add $key, 32, $key
sub $rounds, 4, $rounds
.Loop_enc:
@@ -75,8 +88,6 @@ aes_fx_encrypt:
andcc $out, 7, $tmp ! is output aligned?
mov 0xff, $mask
- alignaddrl $out, %g0, $out
- srl $mask, $tmp, $mask
fmovd %f0, %f4
faesencx %f2, %f10, %f0
@@ -85,14 +96,16 @@ aes_fx_encrypt:
faesenclx %f2, %f6, %f0
faesenclx %f4, %f8, %f2
- bnz,pn %icc, .Lenc_out_unaligned
- nop
+ bnz,a,pn %icc, .Lenc_out_unaligned
+ srl $mask, $tmp, $mask
std %f0, [$out + 0]
retl
std %f2, [$out + 8]
+.align 16
.Lenc_out_unaligned:
+ alignaddrl $out, %g0, $out
faligndata %f0, %f0, %f4
faligndata %f0, %f2, %f6
faligndata %f2, %f2, %f8
@@ -101,8 +114,9 @@ aes_fx_encrypt:
std %f6, [$out + 8]
add $out, 16, $out
orn %g0, $mask, $mask
- retl
stda %f8, [$out + $mask]0xc0 ! partial store
+ retl
+ nop
.type aes_fx_encrypt,#function
.size aes_fx_encrypt,.-aes_fx_encrypt
@@ -110,9 +124,9 @@ aes_fx_encrypt:
.align 32
aes_fx_decrypt:
and $inp, 7, $tmp ! is input aligned?
- alignaddr $inp, %g0, $inp
+ andn $inp, 7, $inp
ld [$key + 240], $rounds
- ldd [$key + 0], %f6
+ ldd [$key + 0], %f6 ! round[0]
ldd [$key + 8], %f8
ldd [$inp + 0], %f0 ! load input
@@ -120,18 +134,19 @@ aes_fx_decrypt:
ldd [$inp + 8], %f2
ldd [$inp + 16], %f4
+ alignaddr $inp, $tmp, $inp
faligndata %f0, %f2, %f0
faligndata %f2, %f4, %f2
.Ldec_inp_aligned:
- ldd [$key + 16], %f10
+ ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12
- add $key, 32, $key
fxor %f0, %f6, %f0 ! ^=round[0]
fxor %f2, %f8, %f2
- ldd [$key + 0], %f6
- ldd [$key + 8], %f8
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+ add $key, 32, $key
sub $rounds, 4, $rounds
.Loop_dec:
@@ -153,8 +168,6 @@ aes_fx_decrypt:
andcc $out, 7, $tmp ! is output aligned?
mov 0xff, $mask
- alignaddrl $out, %g0, $out
- srl $mask, $tmp, $mask
fmovd %f0, %f4
faesdecx %f2, %f10, %f0
@@ -163,14 +176,16 @@ aes_fx_decrypt:
faesdeclx %f2, %f6, %f0
faesdeclx %f4, %f8, %f2
- bnz,pn %icc, .Ldec_out_unaligned
- nop
+ bnz,a,pn %icc, .Ldec_out_unaligned
+ srl $mask, $tmp, $mask
std %f0, [$out + 0]
retl
std %f2, [$out + 8]
+.align 16
.Ldec_out_unaligned:
+ alignaddrl $out, %g0, $out
faligndata %f0, %f0, %f4
faligndata %f0, %f2, %f6
faligndata %f2, %f2, %f8
@@ -179,8 +194,9 @@ aes_fx_decrypt:
std %f6, [$out + 8]
add $out, 16, $out
orn %g0, $mask, $mask
- retl
stda %f8, [$out + $mask]0xc0 ! partial store
+ retl
+ nop
.type aes_fx_decrypt,#function
.size aes_fx_decrypt,.-aes_fx_decrypt
___
@@ -202,10 +218,10 @@ aes_fx_set_decrypt_key:
.align 32
aes_fx_set_encrypt_key:
mov 1, $inc
+ nop
.Lset_encrypt_key:
and $inp, 7, $tmp
- alignaddr $inp, %g0, $inp
- nop
+ andn $inp, 7, $inp
cmp $bits, 192
ldd [$inp + 0], %f0
@@ -218,6 +234,7 @@ aes_fx_set_encrypt_key:
ldd [$inp + 24], %f6
ldd [$inp + 32], %f8
+ alignaddr $inp, $tmp, %g0
faligndata %f0, %f2, %f0
faligndata %f2, %f4, %f2
faligndata %f4, %f6, %f4
@@ -250,11 +267,11 @@ $code.=<<___;
std %f2, [$out + 8]
add $out, $inc, $out
faeskeyx %f0, 0x00, %f2
- std %f4,[$out+0]
- std %f6,[$out+8]
+ std %f4,[$out + 0]
+ std %f6,[$out + 8]
add $out, $inc, $out
- std %f0,[$out+0]
- std %f2,[$out+8]
+ std %f0,[$out + 0]
+ std %f2,[$out + 8]
retl
xor %o0, %o0, %o0 ! return 0
@@ -264,6 +281,7 @@ $code.=<<___;
nop
ldd [$inp + 24], %f6
+ alignaddr $inp, $tmp, %g0
faligndata %f0, %f2, %f0
faligndata %f2, %f4, %f2
faligndata %f4, %f6, %f4
@@ -308,6 +326,7 @@ $code.=<<___;
nop
ldd [$inp + 16], %f4
+ alignaddr $inp, $tmp, %g0
faligndata %f0, %f2, %f0
faligndata %f2, %f4, %f2
@@ -336,7 +355,704 @@ $code.=<<___;
.size aes_fx_set_encrypt_key,.-aes_fx_set_encrypt_key
___
}
+{
+my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
+my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
+my ($out0,$out1,$iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead)
+ = map("%f$_",grep { !($_ & 1) } (16 .. 62));
+my ($ileft,$iright) = ($ialign,$oalign);
+
+$code.=<<___;
+.globl aes_fx_cbc_encrypt
+.align 32
+aes_fx_cbc_encrypt:
+ save %sp, -STACK_FRAME-16, %sp
+ andncc $len, 15, $len
+ bz,pn SIZE_T_CC, .Lcbc_no_data
+ and $inp, 7, $ialign
+
+ andn $inp, 7, $inp
+ ld [$key + 240], $rounds
+ and $out, 7, $oalign
+ ld [$ivp + 0], %f0 ! load ivec
+ ld [$ivp + 4], %f1
+ ld [$ivp + 8], %f2
+ ld [$ivp + 12], %f3
+
+ sll $rounds, 4, $rounds
+ add $rounds, $key, $end
+ ldd [$key + 0], $r0hi ! round[0]
+ ldd [$key + 8], $r0lo
+
+ add $inp, 16, $inp
+ sub $len, 16, $len
+ ldd [$end + 0], $rlhi ! round[last]
+ ldd [$end + 8], $rllo
+
+ mov 16, $inc
+ movrz $len, 0, $inc
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ ldd [$inp - 16], $in0 ! load input
+ ldd [$inp - 8], $in1
+ ldda [$inp]0x82, $intail ! non-faulting load
+ brz $dir, .Lcbc_decrypt
+ add $inp, $inc, $inp ! inp+=16
+
+ fxor $r0hi, %f0, %f0 ! ivec^=round[0]
+ fxor $r0lo, %f2, %f2
+ alignaddr $inp, $ialign, %g0
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+ fxor $r0hi, $rlhi, $rlhi ! round[last]^=round[0]
+ fxor $r0lo, $rllo, $rllo
+
+.Loop_cbc_enc:
+ fxor $in0, %f0, %f0 ! inp^ivec^round[0]
+ fxor $in1, %f2, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+ add $key, 32, $end
+ sub $rounds, 16*6, $inner
+
+.Lcbc_enc:
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lcbc_enc
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+
+ movrz $len, 0, $inc
+ fmovd $intail, $in0
+ ldd [$inp - 8], $in1 ! load next input block
+ ldda [$inp]0x82, $intail ! non-faulting load
+ add $inp, $inc, $inp ! inp+=16
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+
+ fmovd %f0, %f4
+ faesenclx %f2, $rlhi, %f0 ! result is out^round[0]
+ faesenclx %f4, $rllo, %f2
+
+ fxor %f0, $r0hi, $out0 ! out^round[0]^round[0]
+ brnz,pn $oalign, .Lcbc_enc_unaligned_out
+ fxor %f2, $r0lo, $out1
+
+ std $out0, [$out + 0]
+ std $out1, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_cbc_enc
+ sub $len, 16, $len
+
+ st $out0, [$ivp + 0] ! output ivec
+ st $out0#lo, [$ivp + 4]
+ st $out1, [$ivp + 8]
+ st $out1#lo, [$ivp + 12]
+
+.Lcbc_no_data:
+ ret
+ restore
+
+.align 32
+.Lcbc_enc_unaligned_out:
+ alignaddrl $out, %g0, $out
+ mov 0xff, $mask
+ sll $ialign, 3, $ileft
+ srl $mask, $oalign, $mask
+ sub %g0, $ileft, $iright
+
+ faligndata $out0, $out0, %f6
+ faligndata $out0, $out1, %f8
+
+ stda %f6, [$out + $mask]0xc0 ! partial store
+ std %f8, [$out + 8]
+ add $out, 16, $out
+ brz $len, .Lcbc_enc_unaligned_out_done
+ orn %g0, $mask, $mask
+
+.Loop_cbc_enc_unaligned_out:
+ fxor $in0, %f0, %f0 ! inp^ivec^round[0]
+ fxor $in1, %f2, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 48], %f10 ! round[3]
+ ldd [$key + 56], %f12
+
+ ldx [$inp - 16], %o0
+ ldx [$inp - 8], %o1
+ brz $ialign, .Lcbc_enc_aligned_inp
+ movrz $len, 0, $inc
+
+ ldx [$inp], %o2
+ sllx %o0, $ileft, %o0
+ srlx %o1, $iright, %g1
+ sllx %o1, $ileft, %o1
+ or %g1, %o0, %o0
+ srlx %o2, $iright, %o2
+ or %o2, %o1, %o1
+
+.Lcbc_enc_aligned_inp:
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$key + 64], %f6 ! round[4]
+ ldd [$key + 72], %f8
+ add $key, 64, $end
+ sub $rounds, 16*8, $inner
+
+ stx %o0, [%sp + LOCALS + 0]
+ stx %o1, [%sp + LOCALS + 8]
+ add $inp, $inc, $inp ! inp+=16
+
+.Lcbc_enc_unaligned:
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lcbc_enc_unaligned
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [%sp + LOCALS + 0], $in0
+ ldd [%sp + LOCALS + 8], $in1
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ fmovd %f0, %f4
+ faesenclx %f2, $rlhi, %f0 ! result is out^round[0]
+ faesenclx %f4, $rllo, %f2
+
+ fmovd $out1, $outhead
+ fxor %f0, $r0hi, $out0 ! out^round[0]^round[0]
+ fxor %f2, $r0lo, $out1
+ faligndata $outhead, $out0, %f6
+ faligndata $out0, $out1, %f8
+ std %f6, [$out + 0]
+ std %f8, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_cbc_enc_unaligned_out
+ sub $len, 16, $len
+
+.Lcbc_enc_unaligned_out_done:
+ faligndata $out1, $out1, %f8
+ stda %f8, [$out + $mask]0xc0 ! partial store
+
+ st $out0, [$ivp + 0] ! output ivec
+ st $out0#lo, [$ivp + 4]
+ st $out1, [$ivp + 8]
+ st $out1#lo, [$ivp + 12]
+
+ ret
+ restore
+
+.align 32
+.Lcbc_decrypt:
+ alignaddr $inp, $ialign, %g0
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+ fmovd %f0, $iv0
+ fmovd %f2, $iv1
+
+.Loop_cbc_dec:
+ fxor $in0, $r0hi, %f0 ! inp^round[0]
+ fxor $in1, $r0lo, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+ add $key, 32, $end
+ sub $rounds, 16*6, $inner
+
+.Lcbc_dec:
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f6, %f0
+ faesdecx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lcbc_dec
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f6, %f0
+ faesdecx %f4, %f8, %f2
+ fxor $iv0, $rlhi, %f6 ! ivec^round[last]
+ fxor $iv1, $rllo, %f8
+ fmovd $in0, $iv0
+ fmovd $in1, $iv1
+
+ movrz $len, 0, $inc
+ fmovd $intail, $in0
+ ldd [$inp - 8], $in1 ! load next input block
+ ldda [$inp]0x82, $intail ! non-faulting load
+ add $inp, $inc, $inp ! inp+=16
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+
+ fmovd %f0, %f4
+ faesdeclx %f2, %f6, %f0
+ faesdeclx %f4, %f8, %f2
+
+ brnz,pn $oalign, .Lcbc_dec_unaligned_out
+ nop
+
+ std %f0, [$out + 0]
+ std %f2, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_cbc_dec
+ sub $len, 16, $len
+
+ st $iv0, [$ivp + 0] ! output ivec
+ st $iv0#lo, [$ivp + 4]
+ st $iv1, [$ivp + 8]
+ st $iv1#lo, [$ivp + 12]
+
+ ret
+ restore
+
+.align 32
+.Lcbc_dec_unaligned_out:
+ alignaddrl $out, %g0, $out
+ mov 0xff, $mask
+ sll $ialign, 3, $ileft
+ srl $mask, $oalign, $mask
+ sub %g0, $ileft, $iright
+
+ faligndata %f0, %f0, $out0
+ faligndata %f0, %f2, $out1
+
+ stda $out0, [$out + $mask]0xc0 ! partial store
+ std $out1, [$out + 8]
+ add $out, 16, $out
+ brz $len, .Lcbc_dec_unaligned_out_done
+ orn %g0, $mask, $mask
+
+.Loop_cbc_dec_unaligned_out:
+ fmovd %f2, $outhead
+ fxor $in0, $r0hi, %f0 ! inp^round[0]
+ fxor $in1, $r0lo, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$key + 48], %f10 ! round[3]
+ ldd [$key + 56], %f12
+
+ ldx [$inp - 16], %o0
+ ldx [$inp - 8], %o1
+ brz $ialign, .Lcbc_dec_aligned_inp
+ movrz $len, 0, $inc
+
+ ldx [$inp], %o2
+ sllx %o0, $ileft, %o0
+ srlx %o1, $iright, %g1
+ sllx %o1, $ileft, %o1
+ or %g1, %o0, %o0
+ srlx %o2, $iright, %o2
+ or %o2, %o1, %o1
+
+.Lcbc_dec_aligned_inp:
+ fmovd %f0, %f4
+ faesdecx %f2, %f6, %f0
+ faesdecx %f4, %f8, %f2
+ ldd [$key + 64], %f6 ! round[4]
+ ldd [$key + 72], %f8
+ add $key, 64, $end
+ sub $rounds, 16*8, $inner
+
+ stx %o0, [%sp + LOCALS + 0]
+ stx %o1, [%sp + LOCALS + 8]
+ add $inp, $inc, $inp ! inp+=16
+
+.Lcbc_dec_unaligned:
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f6, %f0
+ faesdecx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lcbc_dec_unaligned
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f6, %f0
+ faesdecx %f4, %f8, %f2
+ fxor $iv0, $rlhi, %f6 ! ivec^round[last]
+ fxor $iv1, $rllo, %f8
+ fmovd $in0, $iv0
+ fmovd $in1, $iv1
+
+ fmovd %f0, %f4
+ faesdecx %f2, %f10, %f0
+ faesdecx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ fmovd %f0, %f4
+ faesdeclx %f2, %f6, %f0
+ faesdeclx %f4, %f8, %f2
+ ldd [%sp + LOCALS + 0], $in0
+ ldd [%sp + LOCALS + 8], $in1
+
+ faligndata $outhead, %f0, $out0
+ faligndata %f0, %f2, $out1
+ std $out0, [$out + 0]
+ std $out1, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_cbc_dec_unaligned_out
+ sub $len, 16, $len
+
+.Lcbc_dec_unaligned_out_done:
+ faligndata %f2, %f2, %f8
+ stda %f8, [$out + $mask]0xc0 ! partial store
+
+ st $iv0, [$ivp + 0] ! output ivec
+ st $iv0#lo, [$ivp + 4]
+ st $iv1, [$ivp + 8]
+ st $iv1#lo, [$ivp + 12]
+
+ ret
+ restore
+.type aes_fx_cbc_encrypt,#function
+.size aes_fx_cbc_encrypt,.-aes_fx_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
+my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
+my ($out0,$out1,$ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead)
+ = map("%f$_",grep { !($_ & 1) } (16 .. 62));
+my ($ileft,$iright) = ($ialign, $oalign);
+my $one = "%f14";
+
+$code.=<<___;
+.globl aes_fx_ctr32_encrypt_blocks
+.align 32
+aes_fx_ctr32_encrypt_blocks:
+ save %sp, -STACK_FRAME-16, %sp
+ and $inp, 7, $ialign
+ brz,pn $len, .Lctr32_no_data
+ andn $inp, 7, $inp
+
+.Lpic: call .+8
+ add %o7, .Lone - .Lpic, %o0
+
+ ld [$key + 240], $rounds
+ and $out, 7, $oalign
+ ld [$ivp + 0], $ctr0 ! load counter
+ ld [$ivp + 4], $ctr0#lo
+ ld [$ivp + 8], $ctr1
+ ld [$ivp + 12], $ctr1#lo
+ ldd [%o0], $one
+
+ sll $rounds, 4, $rounds
+ add $rounds, $key, $end
+ ldd [$key + 0], $r0hi ! round[0]
+ ldd [$key + 8], $r0lo
+
+ add $inp, 16, $inp
+ sub $len, 1, $len
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ mov 16, $inc
+ movrz $len, 0, $inc
+ ldd [$end + 0], $rlhi ! round[last]
+ ldd [$end + 8], $rllo
+
+ ldd [$inp - 16], $in0 ! load input
+ ldd [$inp - 8], $in1
+ ldda [$inp]0x82, $intail ! non-faulting load
+ add $inp, $inc, $inp ! inp+=16
+
+ alignaddr $inp, $ialign, %g0
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+
+.Loop_ctr32:
+ fxor $ctr0, $r0hi, %f0 ! counter^round[0]
+ fxor $ctr1, $r0lo, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+ add $key, 32, $end
+ sub $rounds, 16*6, $inner
+
+.Lctr32_enc:
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lctr32_enc
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ fxor $in0, $rlhi, %f6 ! inp^round[last]
+ fxor $in1, $rllo, %f8
+
+ movrz $len, 0, $inc
+ fmovd $intail, $in0
+ ldd [$inp - 8], $in1 ! load next input block
+ ldda [$inp]0x82, $intail ! non-faulting load
+ add $inp, $inc, $inp ! inp+=16
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ faligndata $in0, $in1, $in0
+ faligndata $in1, $intail, $in1
+ fpadd32 $ctr1, $one, $ctr1 ! increment counter
+
+ fmovd %f0, %f4
+ faesenclx %f2, %f6, %f0
+ faesenclx %f4, %f8, %f2
+
+ brnz,pn $oalign, .Lctr32_unaligned_out
+ nop
+
+ std %f0, [$out + 0]
+ std %f2, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_ctr32
+ sub $len, 1, $len
+
+.Lctr32_no_data:
+ ret
+ restore
+
+.align 32
+.Lctr32_unaligned_out:
+ alignaddrl $out, %g0, $out
+ mov 0xff, $mask
+ sll $ialign, 3, $ileft
+ srl $mask, $oalign, $mask
+ sub %g0, $ileft, $iright
+
+ faligndata %f0, %f0, $out0
+ faligndata %f0, %f2, $out1
+
+ stda $out0, [$out + $mask]0xc0 ! partial store
+ std $out1, [$out + 8]
+ add $out, 16, $out
+ brz $len, .Lctr32_unaligned_out_done
+ orn %g0, $mask, $mask
+
+.Loop_ctr32_unaligned_out:
+ fmovd %f2, $outhead
+ fxor $ctr0, $r0hi, %f0 ! counter^round[0]
+ fxor $ctr1, $r0lo, %f2
+ ldd [$key + 32], %f6 ! round[2]
+ ldd [$key + 40], %f8
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 48], %f10 ! round[3]
+ ldd [$key + 56], %f12
+
+ ldx [$inp - 16], %o0
+ ldx [$inp - 8], %o1
+ brz $ialign, .Lctr32_aligned_inp
+ movrz $len, 0, $inc
+
+ ldx [$inp], %o2
+ sllx %o0, $ileft, %o0
+ srlx %o1, $iright, %g1
+ sllx %o1, $ileft, %o1
+ or %g1, %o0, %o0
+ srlx %o2, $iright, %o2
+ or %o2, %o1, %o1
+
+.Lctr32_aligned_inp:
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$key + 64], %f6 ! round[4]
+ ldd [$key + 72], %f8
+ add $key, 64, $end
+ sub $rounds, 16*8, $inner
+
+ stx %o0, [%sp + LOCALS + 0]
+ stx %o1, [%sp + LOCALS + 8]
+ add $inp, $inc, $inp ! inp+=16
+
+.Lctr32_enc_unaligned:
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10
+ ldd [$end + 24], %f12
+ add $end, 32, $end
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ ldd [$end + 0], %f6
+ ldd [$end + 8], %f8
+
+ brnz,a $inner, .Lctr32_enc_unaligned
+ sub $inner, 16*2, $inner
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$end + 16], %f10 ! round[last-1]
+ ldd [$end + 24], %f12
+ fpadd32 $ctr1, $one, $ctr1 ! increment counter
+
+ fmovd %f0, %f4
+ faesencx %f2, %f6, %f0
+ faesencx %f4, %f8, %f2
+ fxor $in0, $rlhi, %f6 ! inp^round[last]
+ fxor $in1, $rllo, %f8
+ ldd [%sp + LOCALS + 0], $in0
+ ldd [%sp + LOCALS + 8], $in1
+
+ fmovd %f0, %f4
+ faesencx %f2, %f10, %f0
+ faesencx %f4, %f12, %f2
+ ldd [$key + 16], %f10 ! round[1]
+ ldd [$key + 24], %f12
+
+ fmovd %f0, %f4
+ faesenclx %f2, %f6, %f0
+ faesenclx %f4, %f8, %f2
+
+ faligndata $outhead, %f0, $out0
+ faligndata %f0, %f2, $out1
+ std $out0, [$out + 0]
+ std $out1, [$out + 8]
+ add $out, 16, $out
+
+ brnz,a $len, .Loop_ctr32_unaligned_out
+ sub $len, 1, $len
+
+.Lctr32_unaligned_out_done:
+ faligndata %f2, %f2, %f8
+ stda %f8, [$out + $mask]0xc0 ! partial store
+
+ ret
+ restore
+.type aes_fx_ctr32_encrypt_blocks,#function
+.size aes_fx_ctr32_encrypt_blocks,.-aes_fx_ctr32_encrypt_blocks
+.align 32
+.Lone:
+ .word 0, 1
+.asciz "AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+}
# Purpose of these subroutines is to explicitly encode VIS instructions,
# so that one can compile the module without having to specify VIS
# extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
@@ -347,6 +1063,7 @@ my ($mnemonic,$rs1,$rs2,$rd)=@_;
my ($ref,$opf);
my %visopf = ( "faligndata" => 0x048,
"bshuffle" => 0x04c,
+ "fpadd32" => 0x052,
"fxor" => 0x06c,
"fsrc2" => 0x078 );
@@ -431,6 +1148,8 @@ my %aesopf = ( "faesencx" => 0x90,
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/ge;
+ s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
+
s/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
&unfx($1,$2,$3,$4,$5)
/ge or