From e98c526b68fb1463c511c159fc892a207ad67abb Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Sun, 23 Sep 2012 20:39:53 +0000 Subject: Add md5-sparcv9.pl. --- crypto/md5/asm/md5-sparcv9.pl | 284 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 crypto/md5/asm/md5-sparcv9.pl (limited to 'crypto/md5/asm') diff --git a/crypto/md5/asm/md5-sparcv9.pl b/crypto/md5/asm/md5-sparcv9.pl new file mode 100644 index 0000000000..c9ce0b94ad --- /dev/null +++ b/crypto/md5/asm/md5-sparcv9.pl @@ -0,0 +1,284 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# MD5 for SPARCv9, 7.5 cycles per byte on UltraSPARC, >40% faster than +# code generated by Sun C 5.2. + +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } +if ($bits==64) { $bias=2047; $frame=192; } +else { $bias=0; $frame=112; } + +$output=shift; +open STDOUT,">$output"; + +use integer; + +($ctx,$inp,$len)=("%i0","%i1","%i2"); # input arguments + +# 64-bit values +@X=("%o0","%o1","%o2","%o3","%o4","%o5","%o7","%g1","%g2"); +$tx="%g3"; +($AB,$CD)=("%g4","%g5"); + +# 32-bit values +@V=($A,$B,$C,$D)=map("%l$_",(0..3)); +($t1,$t2,$t3,$saved_asi)=map("%l$_",(4..7)); +($shr,$shl1,$shl2)=("%i3","%i4","%i5"); + +my @K=( 0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee, + 0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501, + 0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be, + 0x6b901122,0xfd987193,0xa679438e,0x49b40821, + + 0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa, + 0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8, + 0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed, + 0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a, + + 0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c, + 0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70, + 0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05, + 0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665, + + 0xf4292244,0x432aff97,0xab9423a7,0xfc93a039, + 0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1, + 0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1, + 0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391, 0 ); + +sub R0 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (7,12,17,22)[$i%4]; + my $j = ($i+1)/2; + + if ($i&1) { + $code.=<<___; + srlx @X[$j],$shr,@X[$j] ! align X[`$i+1`] + and $b,$t1,$t1 ! round $i + sllx @X[$j+1],$shl1,$tx + add $t2,$a,$a + sllx $tx,$shl2,$tx + xor $d,$t1,$t1 + or $tx,@X[$j],@X[$j] + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add @X[$j],$t2,$t2 ! X[`$i+1`]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ + } else { + $code.=<<___; + srlx @X[$j],32,$tx ! extract X[`2*$j+1`] + and $b,$t1,$t1 ! round $i + add $t2,$a,$a + xor $d,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $tx,$t2,$t2 ! X[`2*$j+1`]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ + } +} + +sub R0_1 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (7,12,17,22)[$i%4]; + +$code.=<<___; + srlx @X[0],32,$tx ! extract X[1] + and $b,$t1,$t1 ! round $i + add $t2,$a,$a + xor $d,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $tx,$t2,$t2 ! X[1]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + andn $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R1 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (5,9,14,20)[$i%4]; + my $j = $i<31 ? (1+5*($i+1))%16 : (5+3*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + and $b,$d,$t3 ! round $i + add $t2,$a,$a + or $t3,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + `$i<31?"andn":"xor"` $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R2 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (4,11,16,23)[$i%4]; + my $j = $i<47 ? (5+3*($i+1))%16 : (0+7*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + add $t2,$a,$a ! round $i + xor $b,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R3 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (6,10,15,21)[$i%4]; + my $j = (0+7*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___; + add $t2,$a,$a ! round $i +___ +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + orn $b,$d,$t1 + sethi %hi(@K[$i+1]),$t2 + xor $c,$t1,$t1 + or $t2,%lo(@K[$i+1]),$t2 + add $t1,$a,$a + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + add $t3,$a,$a +___ +} + +$code.=<<___ if ($bits==64); +.register %g2,#scratch +.register %g3,#scratch +___ +$code.=<<___; +.section ".text",#alloc,#execinstr + +.globl md5_block_asm_data_order +.align 32 +md5_block_asm_data_order: + save %sp,-$frame,%sp + + rd %asi,$saved_asi + wr %g0,0x88,%asi ! ASI_PRIMARY_LITTLE + and $inp,7,$shr + andn $inp,7,$inp + + sll $shr,3,$shr ! *=8 + mov 56,$shl2 + ld [$ctx+0],$A + sub $shl2,$shr,$shl2 + ld [$ctx+4],$B + and $shl2,32,$shl1 + add $shl2,8,$shl2 + ld [$ctx+8],$C + sub $shl2,$shl1,$shl2 ! shr+shl1+shl2==64 + ld [$ctx+12],$D + nop + +.Loop: + cmp $shr,0 ! was inp aligned? + ldxa [$inp+0]%asi,@X[0] ! load little-endian input + ldxa [$inp+8]%asi,@X[1] + ldxa [$inp+16]%asi,@X[2] + ldxa [$inp+24]%asi,@X[3] + ldxa [$inp+32]%asi,@X[4] + sllx $A,32,$AB ! pack A,B + ldxa [$inp+40]%asi,@X[5] + sllx $C,32,$CD ! pack C,D + ldxa [$inp+48]%asi,@X[6] + or $B,$AB,$AB + ldxa [$inp+56]%asi,@X[7] + or $D,$CD,$CD + bnz,a,pn %icc,.+8 + ldxa [$inp+64]%asi,@X[8] + + srlx @X[0],$shr,@X[0] ! align X[0] + sllx @X[1],$shl1,$tx + sethi %hi(@K[0]),$t2 + sllx $tx,$shl2,$tx + or $t2,%lo(@K[0]),$t2 + or $tx,@X[0],@X[0] + xor $C,$D,$t1 + add @X[0],$t2,$t2 ! X[0]+K[0] +___ + for ($i=0;$i<15;$i++) { &R0($i,@V); unshift(@V,pop(@V)); } + for (;$i<16;$i++) { &R0_1($i,@V); unshift(@V,pop(@V)); } + for (;$i<32;$i++) { &R1($i,@V); unshift(@V,pop(@V)); } + for (;$i<48;$i++) { &R2($i,@V); unshift(@V,pop(@V)); } + for (;$i<64;$i++) { &R3($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + srlx $AB,32,$t1 ! unpack A,B,C,D and accumulate + add $inp,64,$inp ! advance inp + srlx $CD,32,$t2 + add $t1,$A,$A + subcc $len,1,$len ! done yet? + add $AB,$B,$B + add $t2,$C,$C + add $CD,$D,$D + srl $B,0,$B ! clruw $B + bne `$bits==64?"%xcc":"%icc"`,.Loop + srl $D,0,$D ! clruw $D + + st $A,[$ctx+0] ! write out ctx + st $B,[$ctx+4] + st $C,[$ctx+8] + st $D,[$ctx+12] + + wr %g0,$saved_asi,%asi + ret + restore +.type md5_block_asm_data_order,#function +.size md5_block_asm_data_order,(.-md5_block_asm_data_order) + +.asciz "MD5 block transform for SPARCv9, CRYPTOGAMS by " +.align 4 +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; -- cgit v1.2.3