diff options
author | Andy Polyakov <appro@openssl.org> | 2015-02-22 18:11:28 +0100 |
---|---|---|
committer | Andy Polyakov <appro@openssl.org> | 2015-04-21 09:37:44 +0200 |
commit | 5557d5f2e27ae8265d0b76227c78f2879d7f80a6 (patch) | |
tree | f06c7e62e033d25d8df3b87bcb1223bbe20cf221 /crypto | |
parent | 9b6b470afee13e011152cd1c5006251cc69d03b2 (diff) |
Add ec/asm/ecp_nistz256-sparcv9.pl.
Reviewed-by: Richard Levitte <levitte@openssl.org>
Reviewed-by: Rich Salz <rsalz@openssl.org>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/ec/Makefile | 3 | ||||
-rwxr-xr-x | crypto/ec/asm/ecp_nistz256-sparcv9.pl | 3045 |
2 files changed, 3048 insertions, 0 deletions
diff --git a/crypto/ec/Makefile b/crypto/ec/Makefile index fa2fc4cbb2..423f60bb54 100644 --- a/crypto/ec/Makefile +++ b/crypto/ec/Makefile @@ -54,6 +54,9 @@ ecp_nistz256-x86_64.s: asm/ecp_nistz256-x86_64.pl ecp_nistz256-avx2.s: asm/ecp_nistz256-avx2.pl $(PERL) asm/ecp_nistz256-avx2.pl $(PERLASM_SCHEME) > $@ +ecp_nistz256-sparcv9.S: asm/ecp_nistz256-sparcv9.pl + $(PERL) asm/ecp_nistz256-sparcv9.pl $(CFLAGS) > $@ + ecp_nistz256-%.S: asm/ecp_nistz256-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ ecp_nistz256-armv4.o: ecp_nistz256-armv4.S ecp_nistz256-armv8.o: ecp_nistz256-armv8.S diff --git a/crypto/ec/asm/ecp_nistz256-sparcv9.pl b/crypto/ec/asm/ecp_nistz256-sparcv9.pl new file mode 100755 index 0000000000..5693b75e5b --- /dev/null +++ b/crypto/ec/asm/ecp_nistz256-sparcv9.pl @@ -0,0 +1,3045 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# ECP_NISTZ256 module for SPARCv9. +# +# February 2015. +# +# Original ECP_NISTZ256 submission targeting x86_64 is detailed in +# http://eprint.iacr.org/2013/816. In the process of adaptation +# original .c module was made 32-bit savvy in order to make this +# implementation possible. +# +# with/without -DECP_NISTZ256_ASM +# UltraSPARC III +12-18% +# SPARC T4 +99-550% (+66-150% on 32-bit Solaris) +# +# Ranges denote minimum and maximum improvement coefficients depending +# on benchmark. Lower coefficients are for ECDSA sign, server-side +# operation. Keep in mind that +200% means 3x improvement. + +$code.=<<___; +#include "sparc_arch.h" + +#define LOCALS (STACK_BIAS+STACK_FRAME) +#ifdef __arch64__ +.register %g2,#scratch +.register %g3,#scratch +# define STACK64_FRAME STACK_FRAME +# define LOCALS64 LOCALS +#else +# define STACK64_FRAME (2047+192) +# define LOCALS64 STACK64_FRAME +#endif + +.section ".text",#alloc,#execinstr +___ +######################################################################## +# Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7 +# +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +open TABLE,"<ecp_nistz256_table.c" or +open TABLE,"<${dir}../ecp_nistz256_table.c" or +die "failed to open ecp_nistz256_table.c:",$!; + +use integer; + +foreach(<TABLE>) { + s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo; +} +close TABLE; + +# See ecp_nistz256_table.c for explanation for why it's 64*16*37. +# 64*16*37-1 is because $#arr returns last valid index or @arr, not +# amount of elements. +die "insane number of elements" if ($#arr != 64*16*37-1); + +$code.=<<___; +.globl ecp_nistz256_precomputed +.align 4096 +ecp_nistz256_precomputed: +___ +######################################################################## +# this conversion smashes P256_POINT_AFFINE by individual bytes with +# 64 byte interval, similar to +# 1111222233334444 +# 1234123412341234 +for(1..37) { + @tbl = splice(@arr,0,64*16); + for($i=0;$i<64;$i++) { + undef @line; + for($j=0;$j<64;$j++) { + push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff; + } + $code.=".byte\t"; + $code.=join(',',map { sprintf "0x%02x",$_} @line); + $code.="\n"; + } +} + +{{{ +my ($rp,$ap,$bp)=map("%i$_",(0..2)); +my @acc=map("%l$_",(0..7)); +my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5"); +my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1"); +my ($rp_real,$ap_real)=("%g2","%g3"); + +$code.=<<___; +.size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed +.align 64 +.LRR: ! 2^512 mod P precomputed for NIST P256 polynomial +.long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb +.long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004 +.Lone: +.long 1,0,0,0,0,0,0,0 +.asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>" + +! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_to_mont +.align 64 +ecp_nistz256_to_mont: + save %sp,-STACK_FRAME,%sp + nop +1: call .+8 + add %o7,.LRR-1b,$bp + call __ecp_nistz256_mul_mont + nop + ret + restore +.size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont + +! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_from_mont +.align 32 +ecp_nistz256_from_mont: + save %sp,-STACK_FRAME,%sp + nop +1: call .+8 + add %o7,.Lone-1b,$bp + call __ecp_nistz256_mul_mont + nop + ret + restore +.size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont + +! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8], +! const BN_ULONG %i2[8]); +.globl ecp_nistz256_mul_mont +.align 32 +ecp_nistz256_mul_mont: + save %sp,-STACK_FRAME,%sp + nop + call __ecp_nistz256_mul_mont + nop + ret + restore +.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont + +! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]); +.globl ecp_nistz256_sqr_mont +.align 32 +ecp_nistz256_sqr_mont: + save %sp,-STACK_FRAME,%sp + mov $ap,$bp + call __ecp_nistz256_mul_mont + nop + ret + restore +.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont +___ + +######################################################################## +# Special thing to keep in mind is that $t0-$t7 hold 64-bit values, +# while all others are meant to keep 32. "Meant to" means that additions +# to @acc[0-7] do "contaminate" upper bits, but they are cleared before +# they can affect outcome (follow 'and' with $mask). Also keep in mind +# that addition with carry is addition with 32-bit carry, even though +# CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see +# below for VIS3 code paths.] + +$code.=<<___; +.align 32 +__ecp_nistz256_mul_mont: + ld [$bp+0],$bi ! b[0] + mov -1,$mask + ld [$ap+0],$a0 + srl $mask,0,$mask ! 0xffffffff + ld [$ap+4],$t1 + ld [$ap+8],$t2 + ld [$ap+12],$t3 + ld [$ap+16],$t4 + ld [$ap+20],$t5 + ld [$ap+24],$t6 + ld [$ap+28],$t7 + mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results + mulx $t1,$bi,$t1 + mulx $t2,$bi,$t2 + mulx $t3,$bi,$t3 + mulx $t4,$bi,$t4 + mulx $t5,$bi,$t5 + mulx $t6,$bi,$t6 + mulx $t7,$bi,$t7 + srlx $t0,32,@acc[1] ! extract high parts + srlx $t1,32,@acc[2] + srlx $t2,32,@acc[3] + srlx $t3,32,@acc[4] + srlx $t4,32,@acc[5] + srlx $t5,32,@acc[6] + srlx $t6,32,@acc[7] + srlx $t7,32,@acc[0] ! "@acc[8]" + mov 0,$carry +___ +for($i=1;$i<8;$i++) { +$code.=<<___; + addcc @acc[1],$t1,@acc[1] ! accumulate high parts + ld [$bp+4*$i],$bi ! b[$i] + ld [$ap+4],$t1 ! re-load a[1-7] + addccc @acc[2],$t2,@acc[2] + addccc @acc[3],$t3,@acc[3] + ld [$ap+8],$t2 + ld [$ap+12],$t3 + addccc @acc[4],$t4,@acc[4] + addccc @acc[5],$t5,@acc[5] + ld [$ap+16],$t4 + ld [$ap+20],$t5 + addccc @acc[6],$t6,@acc[6] + addccc @acc[7],$t7,@acc[7] + ld [$ap+24],$t6 + ld [$ap+28],$t7 + addccc @acc[0],$carry,@acc[0] ! "@acc[8]" + addc %g0,%g0,$carry +___ + # Reduction iteration is normally performed by accumulating + # result of multiplication of modulus by "magic" digit [and + # omitting least significant word, which is guaranteed to + # be 0], but thanks to special form of modulus and "magic" + # digit being equal to least significant word, it can be + # performed with additions and subtractions alone. Indeed: + # + # ffff.0001.0000.0000.0000.ffff.ffff.ffff + # * abcd + # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd + # + # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we + # rewrite above as: + # + # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd + # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000 + # - abcd.0000.0000.0000.0000.0000.0000.abcd + # + # or marking redundant operations: + # + # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.---- + # + abcd.0000.abcd.0000.0000.abcd.----.----.---- + # - abcd.----.----.----.----.----.----.---- + +$code.=<<___; + ! multiplication-less reduction + addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0] + addccc @acc[4],%g0,@acc[4] ! r[4]+=0 + and @acc[1],$mask,@acc[1] + and @acc[2],$mask,@acc[2] + addccc @acc[5],%g0,@acc[5] ! r[5]+=0 + addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0] + and @acc[3],$mask,@acc[3] + and @acc[4],$mask,@acc[4] + addccc @acc[7],%g0,@acc[7] ! r[7]+=0 + addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]" + and @acc[5],$mask,@acc[5] + and @acc[6],$mask,@acc[6] + addc $carry,%g0,$carry ! top-most carry + subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0] + subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]" + subc $carry,%g0,$carry ! top-most carry + and @acc[7],$mask,@acc[7] + and @acc[0],$mask,@acc[0] ! "@acc[8]" +___ + push(@acc,shift(@acc)); # rotate registers to "omit" acc[0] +$code.=<<___; + mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results + mulx $t1,$bi,$t1 + mulx $t2,$bi,$t2 + mulx $t3,$bi,$t3 + mulx $t4,$bi,$t4 + mulx $t5,$bi,$t5 + mulx $t6,$bi,$t6 + mulx $t7,$bi,$t7 + add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow + add @acc[1],$t1,$t1 + srlx $t0,32,@acc[1] ! extract high parts + add @acc[2],$t2,$t2 + srlx $t1,32,@acc[2] + add @acc[3],$t3,$t3 + srlx $t2,32,@acc[3] + add @acc[4],$t4,$t4 + srlx $t3,32,@acc[4] + add @acc[5],$t5,$t5 + srlx $t4,32,@acc[5] + add @acc[6],$t6,$t6 + srlx $t5,32,@acc[6] + add @acc[7],$t7,$t7 + srlx $t6,32,@acc[7] + srlx $t7,32,@acc[0] ! "@acc[8]" +___ +} +$code.=<<___; + addcc @acc[1],$t1,@acc[1] ! accumulate high parts + addccc @acc[2],$t2,@acc[2] + addccc @acc[3],$t3,@acc[3] + addccc @acc[4],$t4,@acc[4] + addccc @acc[5],$t5,@acc[5] + addccc @acc[6],$t6,@acc[6] + addccc @acc[7],$t7,@acc[7] + addccc @acc[0],$carry,@acc[0] ! "@acc[8]" + addc %g0,%g0,$carry + + addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction + addccc @acc[4],%g0,@acc[4] + addccc @acc[5],%g0,@acc[5] + addccc @acc[6],$t0,@acc[6] + addccc @acc[7],%g0,@acc[7] + addccc @acc[0],$t0,@acc[0] ! "@acc[8]" + addc $carry,%g0,$carry + subcc @acc[7],$t0,@acc[7] + subccc @acc[0],%g0,@acc[0] ! "@acc[8]" + subc $carry,%g0,$carry ! top-most carry +___ + push(@acc,shift(@acc)); # rotate registers to omit acc[0] +$code.=<<___; + ! Final step is "if result > mod, subtract mod", but we do it + ! "other way around", namely subtract modulus from result + ! and if it borrowed, add modulus back. + + subcc @acc[0],-1,@acc[0] ! subtract modulus + subccc @acc[1],-1,@acc[1] + subccc @acc[2],-1,@acc[2] + subccc @acc[3],0,@acc[3] + subccc @acc[4],0,@acc[4] + subccc @acc[5],0,@acc[5] + subccc @acc[6],1,@acc[6] + subccc @acc[7],-1,@acc[7] + subc $carry,0,$carry ! broadcast borrow bit + + ! Note that because mod has special form, i.e. consists of + ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by + ! using value of broadcasted borrow and the borrow bit itself. + ! To minimize dependency chain we first broadcast and then + ! extract the bit by negating (follow $bi). + + addcc @acc[0],$carry,@acc[0] ! add modulus or zero + addccc @acc[1],$carry,@acc[1] + neg $carry,$bi + st @acc[0],[$rp] + addccc @acc[2],$carry,@acc[2] + st @acc[1],[$rp+4] + addccc @acc[3],0,@acc[3] + st @acc[2],[$rp+8] + addccc @acc[4],0,@acc[4] + st @acc[3],[$rp+12] + addccc @acc[5],0,@acc[5] + st @acc[4],[$rp+16] + addccc @acc[6],$bi,@acc[6] + st @acc[5],[$rp+20] + addc @acc[7],$carry,@acc[7] + st @acc[6],[$rp+24] + retl + st @acc[7],[$rp+28] +.size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont + +! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8], +! const BN_ULONG %i2[8]); +.globl ecp_nistz256_add +.align 32 +ecp_nistz256_add: + save %sp,-STACK_FRAME,%sp + ld [$ap],@acc[0] + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + call __ecp_nistz256_add + ld [$ap+28],@acc[7] + ret + restore +.size ecp_nistz256_add,.-ecp_nistz256_add + +.align 32 +__ecp_nistz256_add: + ld [$bp+0],$t0 ! b[0] + ld [$bp+4],$t1 + ld [$bp+8],$t2 + ld [$bp+12],$t3 + addcc @acc[0],$t0,@acc[0] + ld [$bp+16],$t4 + ld [$bp+20],$t5 + addccc @acc[1],$t1,@acc[1] + ld [$bp+24],$t6 + ld [$bp+28],$t7 + addccc @acc[2],$t2,@acc[2] + addccc @acc[3],$t3,@acc[3] + addccc @acc[4],$t4,@acc[4] + addccc @acc[5],$t5,@acc[5] + addccc @acc[6],$t6,@acc[6] + addccc @acc[7],$t7,@acc[7] + subc %g0,%g0,$carry ! broadcast carry bit + +.Lreduce_by_sub: + + ! if a+b carries, subtract modulus. + ! + ! Note that because mod has special form, i.e. consists of + ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by + ! using value of broadcasted borrow and the borrow bit itself. + ! To minimize dependency chain we first broadcast and then + ! extract the bit by negating (follow $bi). + + subcc @acc[0],$carry,@acc[0] ! subtract synthesized modulus + subccc @acc[1],$carry,@acc[1] + neg $carry,$bi + st @acc[0],[$rp] + subccc @acc[2],$carry,@acc[2] + st @acc[1],[$rp+4] + subccc @acc[3],0,@acc[3] + st @acc[2],[$rp+8] + subccc @acc[4],0,@acc[4] + st @acc[3],[$rp+12] + subccc @acc[5],0,@acc[5] + st @acc[4],[$rp+16] + subccc @acc[6],$bi,@acc[6] + st @acc[5],[$rp+20] + subc @acc[7],$carry,@acc[7] + st @acc[6],[$rp+24] + retl + st @acc[7],[$rp+28] +.size __ecp_nistz256_add,.-__ecp_nistz256_add + +! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_mul_by_2 +.align 32 +ecp_nistz256_mul_by_2: + save %sp,-STACK_FRAME,%sp + ld [$ap],@acc[0] + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + call __ecp_nistz256_mul_by_2 + ld [$ap+28],@acc[7] + ret + restore +.size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2 + +.align 32 +__ecp_nistz256_mul_by_2: + addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a + addccc @acc[1],@acc[1],@acc[1] + addccc @acc[2],@acc[2],@acc[2] + addccc @acc[3],@acc[3],@acc[3] + addccc @acc[4],@acc[4],@acc[4] + addccc @acc[5],@acc[5],@acc[5] + addccc @acc[6],@acc[6],@acc[6] + addccc @acc[7],@acc[7],@acc[7] + b .Lreduce_by_sub + subc %g0,%g0,$carry ! broadcast carry bit +.size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2 + +! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_mul_by_3 +.align 32 +ecp_nistz256_mul_by_3: + save %sp,-STACK_FRAME,%sp + ld [$ap],@acc[0] + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + call __ecp_nistz256_mul_by_3 + ld [$ap+28],@acc[7] + ret + restore +.size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3 + +.align 32 +__ecp_nistz256_mul_by_3: + addcc @acc[0],@acc[0],$t0 ! a+a=2*a + addccc @acc[1],@acc[1],$t1 + addccc @acc[2],@acc[2],$t2 + addccc @acc[3],@acc[3],$t3 + addccc @acc[4],@acc[4],$t4 + addccc @acc[5],@acc[5],$t5 + addccc @acc[6],@acc[6],$t6 + addccc @acc[7],@acc[7],$t7 + subc %g0,%g0,$carry ! broadcast carry bit + + subcc $t0,$carry,$t0 ! .Lreduce_by_sub but without stores + neg $carry,$bi + subccc $t1,$carry,$t1 + subccc $t2,$carry,$t2 + subccc $t3,0,$t3 + subccc $t4,0,$t4 + subccc $t5,0,$t5 + subccc $t6,$bi,$t6 + subc $t7,$carry,$t7 + + addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a + addccc $t1,@acc[1],@acc[1] + addccc $t2,@acc[2],@acc[2] + addccc $t3,@acc[3],@acc[3] + addccc $t4,@acc[4],@acc[4] + addccc $t5,@acc[5],@acc[5] + addccc $t6,@acc[6],@acc[6] + addccc $t7,@acc[7],@acc[7] + b .Lreduce_by_sub + subc %g0,%g0,$carry ! broadcast carry bit +.size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3 + +! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8], +! const BN_ULONG %i2[8]); +.globl ecp_nistz256_sub +.align 32 +ecp_nistz256_sub: + save %sp,-STACK_FRAME,%sp + ld [$ap],@acc[0] + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + call __ecp_nistz256_sub_from + ld [$ap+28],@acc[7] + ret + restore +.size ecp_nistz256_sub,.-ecp_nistz256_sub + +! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_neg +.align 32 +ecp_nistz256_neg: + save %sp,-STACK_FRAME,%sp + mov $ap,$bp + mov 0,@acc[0] + mov 0,@acc[1] + mov 0,@acc[2] + mov 0,@acc[3] + mov 0,@acc[4] + mov 0,@acc[5] + mov 0,@acc[6] + call __ecp_nistz256_sub_from + mov 0,@acc[7] + ret + restore +.size ecp_nistz256_neg,.-ecp_nistz256_neg + +.align 32 +__ecp_nistz256_sub_from: + ld [$bp+0],$t0 ! b[0] + ld [$bp+4],$t1 + ld [$bp+8],$t2 + ld [$bp+12],$t3 + subcc @acc[0],$t0,@acc[0] + ld [$bp+16],$t4 + ld [$bp+20],$t5 + subccc @acc[1],$t1,@acc[1] + subccc @acc[2],$t2,@acc[2] + ld [$bp+24],$t6 + ld [$bp+28],$t7 + subccc @acc[3],$t3,@acc[3] + subccc @acc[4],$t4,@acc[4] + subccc @acc[5],$t5,@acc[5] + subccc @acc[6],$t6,@acc[6] + subccc @acc[7],$t7,@acc[7] + subc %g0,%g0,$carry ! broadcast borrow bit + +.Lreduce_by_add: + + ! if a-b borrows, add modulus. + ! + ! Note that because mod has special form, i.e. consists of + ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by + ! using value of broadcasted borrow and the borrow bit itself. + ! To minimize dependency chain we first broadcast and then + ! extract the bit by negating (follow $bi). + + addcc @acc[0],$carry,@acc[0] ! add synthesized modulus + addccc @acc[1],$carry,@acc[1] + neg $carry,$bi + st @acc[0],[$rp] + addccc @acc[2],$carry,@acc[2] + st @acc[1],[$rp+4] + addccc @acc[3],0,@acc[3] + st @acc[2],[$rp+8] + addccc @acc[4],0,@acc[4] + st @acc[3],[$rp+12] + addccc @acc[5],0,@acc[5] + st @acc[4],[$rp+16] + addccc @acc[6],$bi,@acc[6] + st @acc[5],[$rp+20] + addc @acc[7],$carry,@acc[7] + st @acc[6],[$rp+24] + retl + st @acc[7],[$rp+28] +.size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from + +.align 32 +__ecp_nistz256_sub_morf: + ld [$bp+0],$t0 ! b[0] + ld [$bp+4],$t1 + ld [$bp+8],$t2 + ld [$bp+12],$t3 + subcc $t0,@acc[0],@acc[0] + ld [$bp+16],$t4 + ld [$bp+20],$t5 + subccc $t1,@acc[1],@acc[1] + subccc $t2,@acc[2],@acc[2] + ld [$bp+24],$t6 + ld [$bp+28],$t7 + subccc $t3,@acc[3],@acc[3] + subccc $t4,@acc[4],@acc[4] + subccc $t5,@acc[5],@acc[5] + subccc $t6,@acc[6],@acc[6] + subccc $t7,@acc[7],@acc[7] + b .Lreduce_by_add + subc %g0,%g0,$carry ! broadcast borrow bit +.size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf + +! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]); +.globl ecp_nistz256_div_by_2 +.align 32 +ecp_nistz256_div_by_2: + save %sp,-STACK_FRAME,%sp + ld [$ap],@acc[0] + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + call __ecp_nistz256_div_by_2 + ld [$ap+28],@acc[7] + ret + restore +.size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2 + +.align 32 +__ecp_nistz256_div_by_2: + ! ret = (a is odd ? a+mod : a) >> 1 + + and @acc[0],1,$bi + neg $bi,$carry + addcc @acc[0],$carry,@acc[0] + addccc @acc[1],$carry,@acc[1] + addccc @acc[2],$carry,@acc[2] + addccc @acc[3],0,@acc[3] + addccc @acc[4],0,@acc[4] + addccc @acc[5],0,@acc[5] + addccc @acc[6],$bi,@acc[6] + addccc @acc[7],$carry,@acc[7] + addc %g0,%g0,$carry + + ! ret >>= 1 + + srl @acc[0],1,@acc[0] + sll @acc[1],31,$t0 + srl @acc[1],1,@acc[1] + or @acc[0],$t0,@acc[0] + sll @acc[2],31,$t1 + srl @acc[2],1,@acc[2] + or @acc[1],$t1,@acc[1] + sll @acc[3],31,$t2 + st @acc[0],[$rp] + srl @acc[3],1,@acc[3] + or @acc[2],$t2,@acc[2] + sll @acc[4],31,$t3 + st @acc[1],[$rp+4] + srl @acc[4],1,@acc[4] + or @acc[3],$t3,@acc[3] + sll @acc[5],31,$t4 + st @acc[2],[$rp+8] + srl @acc[5],1,@acc[5] + or @acc[4],$t4,@acc[4] + sll @acc[6],31,$t5 + st @acc[3],[$rp+12] + srl @acc[6],1,@acc[6] + or @acc[5],$t5,@acc[5] + sll @acc[7],31,$t6 + st @acc[4],[$rp+16] + srl @acc[7],1,@acc[7] + or @acc[6],$t6,@acc[6] + sll $carry,31,$t7 + st @acc[5],[$rp+20] + or @acc[7],$t7,@acc[7] + st @acc[6],[$rp+24] + retl + st @acc[7],[$rp+28] +.size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2 +___ + +######################################################################## +# following subroutines are "literal" implemetation of those found in +# ecp_nistz256.c +# +######################################################################## +# void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp); +# +{ +my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3)); +# above map() describes stack layout with 4 temporary +# 256-bit vectors on top. + +$code.=<<___; +#ifdef __PIC__ +SPARC_PIC_THUNK(%g1) +#endif + +.globl ecp_nistz256_point_double +.align 32 +ecp_nistz256_point_double: + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0] + and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1 + cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK) + be ecp_nistz256_point_double_vis3 + nop + + save %sp,-STACK_FRAME-32*4,%sp + + mov $rp,$rp_real + mov $ap,$ap_real + + ld [$ap+32],@acc[0] + ld [$ap+32+4],@acc[1] + ld [$ap+32+8],@acc[2] + ld [$ap+32+12],@acc[3] + ld [$ap+32+16],@acc[4] + ld [$ap+32+20],@acc[5] + ld [$ap+32+24],@acc[6] + ld [$ap+32+28],@acc[7] + call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y); + add %sp,LOCALS+$S,$rp + + add $ap_real,64,$bp + add $ap_real,64,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z); + add %sp,LOCALS+$Zsqr,$rp + + add $ap_real,0,$bp + call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x); + add %sp,LOCALS+$M,$rp + + add %sp,LOCALS+$S,$bp + add %sp,LOCALS+$S,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S); + add %sp,LOCALS+$S,$rp + + ld [$ap_real],@acc[0] + add %sp,LOCALS+$Zsqr,$bp + ld [$ap_real+4],@acc[1] + ld [$ap_real+8],@acc[2] + ld [$ap_real+12],@acc[3] + ld [$ap_real+16],@acc[4] + ld [$ap_real+20],@acc[5] + ld [$ap_real+24],@acc[6] + ld [$ap_real+28],@acc[7] + call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr); + add %sp,LOCALS+$Zsqr,$rp + + add $ap_real,32,$bp + add $ap_real,64,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y); + add %sp,LOCALS+$tmp0,$rp + + call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0); + add $rp_real,64,$rp + + add %sp,LOCALS+$Zsqr,$bp + add %sp,LOCALS+$M,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr); + add %sp,LOCALS+$M,$rp + + call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M); + add %sp,LOCALS+$M,$rp + + add %sp,LOCALS+$S,$bp + add %sp,LOCALS+$S,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S); + add %sp,LOCALS+$tmp0,$rp + + call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0); + add $rp_real,32,$rp + + add $ap_real,0,$bp + add %sp,LOCALS+$S,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x); + add %sp,LOCALS+$S,$rp + + call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S); + add %sp,LOCALS+$tmp0,$rp + + add %sp,LOCALS+$M,$bp + add %sp,LOCALS+$M,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M); + add $rp_real,0,$rp + + add %sp,LOCALS+$tmp0,$bp + call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0); + add $rp_real,0,$rp + + add %sp,LOCALS+$S,$bp + call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x); + add %sp,LOCALS+$S,$rp + + add %sp,LOCALS+$M,$bp + add %sp,LOCALS+$S,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M); + add %sp,LOCALS+$S,$rp + + add $rp_real,32,$bp + call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y); + add $rp_real,32,$rp + + ret + restore +.size ecp_nistz256_point_double,.-ecp_nistz256_point_double +___ +} + +######################################################################## +# void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1, +# const P256_POINT *in2); +{ +my ($res_x,$res_y,$res_z, + $H,$Hsqr,$R,$Rsqr,$Hcub, + $U1,$U2,$S1,$S2)=map(32*$_,(0..11)); +my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr); + +# above map() describes stack layout with 12 temporary +# 256-bit vectors on top. Then we reserve some space for +# !in1infty, !in2infty, result of check for zero and return pointer. + +my $bp_real=$rp_real; + +$code.=<<___; +.globl ecp_nistz256_point_add +.align 32 +ecp_nistz256_point_add: + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0] + and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1 + cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK) + be ecp_nistz256_point_add_vis3 + nop + + save %sp,-STACK_FRAME-32*12-32,%sp + + stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp + mov $ap,$ap_real + mov $bp,$bp_real + + ld [$bp],@acc[0] ! in2_x + ld [$bp+4],@acc[1] + ld [$bp+8],@acc[2] + ld [$bp+12],@acc[3] + ld [$bp+16],@acc[4] + ld [$bp+20],@acc[5] + ld [$bp+24],@acc[6] + ld [$bp+28],@acc[7] + ld [$bp+32],$t0 ! in2_y + ld [$bp+32+4],$t1 + ld [$bp+32+8],$t2 + ld [$bp+32+12],$t3 + ld [$bp+32+16],$t4 + ld [$bp+32+20],$t5 + ld [$bp+32+24],$t6 + ld [$bp+32+28],$t7 + or @acc[1],@acc[0],@acc[0] + or @acc[3],@acc[2],@acc[2] + or @acc[5],@acc[4],@acc[4] + or @acc[7],@acc[6],@acc[6] + or @acc[2],@acc[0],@acc[0] + or @acc[6],@acc[4],@acc[4] + or @acc[4],@acc[0],@acc[0] + or $t1,$t0,$t0 + or $t3,$t2,$t2 + or $t5,$t4,$t4 + or $t7,$t6,$t6 + or $t2,$t0,$t0 + or $t6,$t4,$t4 + or $t4,$t0,$t0 + or @acc[0],$t0,$t0 ! !in2infty + movrnz $t0,-1,$t0 + st $t0,[%fp+STACK_BIAS-12] + + ld [$ap],@acc[0] ! in1_x + ld [$ap+4],@acc[1] + ld [$ap+8],@acc[2] + ld [$ap+12],@acc[3] + ld [$ap+16],@acc[4] + ld [$ap+20],@acc[5] + ld [$ap+24],@acc[6] + ld [$ap+28],@acc[7] + ld [$ap+32],$t0 ! in1_y + ld [$ap+32+4],$t1 + ld [$ap+32+8],$t2 + ld [$ap+32+12],$t3 + ld [$ap+32+16],$t4 + ld [$ap+32+20],$t5 + ld [$ap+32+24],$t6 + ld [$ap+32+28],$t7 + or @acc[1],@acc[0],@acc[0] + or @acc[3],@acc[2],@acc[2] + or @acc[5],@acc[4],@acc[4] + or @acc[7],@acc[6],@acc[6] + or @acc[2],@acc[0],@acc[0] + or @acc[6],@acc[4],@acc[4] + or @acc[4],@acc[0],@acc[0] + or $t1,$t0,$t0 + or $t3,$t2,$t2 + or $t5,$t4,$t4 + or $t7,$t6,$t6 + or $t2,$t0,$t0 + or $t6,$t4,$t4 + or $t4,$t0,$t0 + or @acc[0],$t0,$t0 ! !in1infty + movrnz $t0,-1,$t0 + st $t0,[%fp+STACK_BIAS-16] + + add $bp_real,64,$bp + add $bp_real,64,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z); + add %sp,LOCALS+$Z2sqr,$rp + + add $ap_real,64,$bp + add $ap_real,64,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z); + add %sp,LOCALS+$Z1sqr,$rp + + add $bp_real,64,$bp + add %sp,LOCALS+$Z2sqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z); + add %sp,LOCALS+$S1,$rp + + add $ap_real,64,$bp + add %sp,LOCALS+$Z1sqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z); + add %sp,LOCALS+$S2,$rp + + add $ap_real,32,$bp + add %sp,LOCALS+$S1,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y); + add %sp,LOCALS+$S1,$rp + + add $bp_real,32,$bp + add %sp,LOCALS+$S2,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y); + add %sp,LOCALS+$S2,$rp + + add %sp,LOCALS+$S1,$bp + call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1); + add %sp,LOCALS+$R,$rp + + or @acc[1],@acc[0],@acc[0] ! see if result is zero + or @acc[3],@acc[2],@acc[2] + or @acc[5],@acc[4],@acc[4] + or @acc[7],@acc[6],@acc[6] + or @acc[2],@acc[0],@acc[0] + or @acc[6],@acc[4],@acc[4] + or @acc[4],@acc[0],@acc[0] + st @acc[0],[%fp+STACK_BIAS-20] + + add $ap_real,0,$bp + add %sp,LOCALS+$Z2sqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr); + add %sp,LOCALS+$U1,$rp + + add $bp_real,0,$bp + add %sp,LOCALS+$Z1sqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr); + add %sp,LOCALS+$U2,$rp + + add %sp,LOCALS+$U1,$bp + call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1); + add %sp,LOCALS+$H,$rp + + or @acc[1],@acc[0],@acc[0] ! see if result is zero + or @acc[3],@acc[2],@acc[2] + or @acc[5],@acc[4],@acc[4] + or @acc[7],@acc[6],@acc[6] + or @acc[2],@acc[0],@acc[0] + or @acc[6],@acc[4],@acc[4] + orcc @acc[4],@acc[0],@acc[0] + + bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)? + nop + + ld [%fp+STACK_BIAS-12],$t0 + ld [%fp+STACK_BIAS-16],$t1 + ld [%fp+STACK_BIAS-20],$t2 + andcc $t0,$t1,%g0 + be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)? + nop + andcc $t2,$t2,%g0 + be,pt %icc,.Ladd_proceed ! is_equal(S1,S2)? + nop + + ldx [%fp+STACK_BIAS-8],$rp + st %g0,[$rp] + st %g0,[$rp+4] + st %g0,[$rp+8] + st %g0,[$rp+12] + st %g0,[$rp+16] + st %g0,[$rp+20] + st %g0,[$rp+24] + st %g0,[$rp+28] + st %g0,[$rp+32] + st %g0,[$rp+32+4] + st %g0,[$rp+32+8] + st %g0,[$rp+32+12] + st %g0,[$rp+32+16] + st %g0,[$rp+32+20] + st %g0,[$rp+32+24] + st %g0,[$rp+32+28] + st %g0,[$rp+64] + st %g0,[$rp+64+4] + st %g0,[$rp+64+8] + st %g0,[$rp+64+12] + st %g0,[$rp+64+16] + st %g0,[$rp+64+20] + st %g0,[$rp+64+24] + st %g0,[$rp+64+28] + b .Ladd_done + nop + +.align 16 +.Ladd_proceed: + add %sp,LOCALS+$R,$bp + add %sp,LOCALS+$R,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R); + add %sp,LOCALS+$Rsqr,$rp + + add $ap_real,64,$bp + add %sp,LOCALS+$H,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z); + add %sp,LOCALS+$res_z,$rp + + add %sp,LOCALS+$H,$bp + add %sp,LOCALS+$H,$ap + call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H); + add %sp,LOCALS+$Hsqr,$rp + + add $bp_real,64,$bp + add %sp,LOCALS+$res_z,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z); + add %sp,LOCALS+$res_z,$rp + + add %sp,LOCALS+$H,$bp + add %sp,LOCALS+$Hsqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H); + add %sp,LOCALS+$Hcub,$rp + + add %sp,LOCALS+$U1,$bp + add %sp,LOCALS+$Hsqr,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr); + add %sp,LOCALS+$U2,$rp + + call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2); + add %sp,LOCALS+$Hsqr,$rp + + add %sp,LOCALS+$Rsqr,$bp + call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr); + add %sp,LOCALS+$res_x,$rp + + add %sp,LOCALS+$Hcub,$bp + call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub); + add %sp,LOCALS+$res_x,$rp + + add %sp,LOCALS+$U2,$bp + call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x); + add %sp,LOCALS+$res_y,$rp + + add %sp,LOCALS+$Hcub,$bp + add %sp,LOCALS+$S1,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub); + add %sp,LOCALS+$S2,$rp + + add %sp,LOCALS+$R,$bp + add %sp,LOCALS+$res_y,$ap + call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R); + add %sp,LOCALS+$res_y,$rp + + add %sp,LOCALS+$S2,$bp + call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2); + add %sp,LOCALS+$res_y,$rp + + ld [%fp+STACK_BIAS-16],$t1 ! !in1infty + ld [%fp+STACK_BIAS-12],$t2 ! !in2infty + ldx [%fp+STACK_BIAS-8],$rp +___ +for($i=0;$i<96;$i+=8) { # conditional moves +$code.=<<___; + ld [%sp+LOCALS+$i],@acc[0] ! res + ld [%sp+LOCALS+$i+4],@acc[1] + ld [$bp_real+$i],@acc[2] ! in2 + ld [$bp_real+$i+4],@acc[3] + ld [$ap_real+$i],@acc[4] ! in1 + ld [$ap_real+$i+4],@acc[5] + movrz $t1,@acc[2],@acc[0] + movrz $t1,@acc[3],@acc[1] + movrz $t2,@acc[4],@acc[0] + movrz $t2,@acc[5],@acc[1] + st @acc[0],[$rp+$i] + st @acc[1],[$rp+$i+4] +___ +} +$code.=<<___; +.Ladd_done: + ret + restore +.size ecp_nistz256_point_add,.-ecp_nistz256_point_add +___ +} + +######################################################################## +# void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1, +# const P256_POINT_AFFINE *in2); +{ +my ($res_x,$res_y,$res_z, + $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9)); +my $Z1sqr = $S2; +# above map() describes stack layout with 10 temporary +# 256-bit vectors on top. Then we reserve some space for +# !in1infty, !in2infty, result of check for zero and return pointer. + +my @ONE_mont=(1,0,0,-1,-1,-1,-2,0); +my $bp_real=$rp_real; + +$code.=<<___; +.globl ecp_nistz256_point_add_affine +.align 32 +ecp_nistz256_point_add_affine: + SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5) + ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0] + and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1 + cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK) + be ecp_ni |