summaryrefslogtreecommitdiffstats
path: root/crypto/sha
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/sha')
-rw-r--r--crypto/sha/asm/sha1-c64x-large.pl230
-rw-r--r--crypto/sha/asm/sha1-c64x.pl330
-rw-r--r--crypto/sha/asm/sha256-c64x.pl313
-rw-r--r--crypto/sha/asm/sha512-c64x.pl437
4 files changed, 1310 insertions, 0 deletions
diff --git a/crypto/sha/asm/sha1-c64x-large.pl b/crypto/sha/asm/sha1-c64x-large.pl
new file mode 100644
index 0000000000..3916ff3a3f
--- /dev/null
+++ b/crypto/sha/asm/sha1-c64x-large.pl
@@ -0,0 +1,230 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA1 for C64x.
+#
+# November 2016
+#
+# This is fully-unrolled SHA1 implementation. It's 25% faster than
+# one with compact loops, doesn't use in-memory ring buffer, as
+# everything is accomodated in registers, and has "perfect" interrupt
+# agility. Drawback is obviously the code size...
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($CTX,$INP,$NUM) = ("A4","B4","A6"); # arguments
+
+($A,$B,$C,$D,$E, $Arot,$F,$F0,$K) = map("A$_",(16..20, 21..24));
+@V = ($A,$B,$C,$D,$E);
+@X = map("B$_",(16..31));
+($Actx,$Bctx,$Cctx,$Dctx,$Ectx) = map("A$_",(3,6..9)); # zaps $NUM
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e) = @_;
+my $j = ($i+1)&15;
+
+$code.=<<___ if ($i<14);
+ ROTL $a,5,$Arot ;; $i
+|| AND $c,$b,$F
+|| ANDN $d,$b,$F0
+|| ADD $K,$e,$e ; E+=K
+|| LDNW *${INP}++,@X[$i+2]
+ OR $F0,$F,$F ; F_00_19(B,C,D)
+|| ROTL $b,30,$b
+|| SWAP2 @X[$i+1],@X[$i+1]
+|| ADD @X[$i],$e,$e ; E+=X[i]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+|| SWAP4 @X[$i+1],@X[$i+1]
+ ADD $F,$e,$e ; E+=F_00_19(B,C,D)
+___
+$code.=<<___ if ($i==14);
+ ROTL $a,5,$Arot ;; $i
+|| AND $c,$b,$F
+|| ANDN $d,$b,$F0
+|| ADD $K,$e,$e ; E+=K
+ OR $F0,$F,$F ; F_00_19(B,C,D)
+|| ROTL $b,30,$b
+|| ADD @X[$i],$e,$e ; E+=X[i]
+|| SWAP2 @X[$i+1],@X[$i+1]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+|| SWAP4 @X[$i+1],@X[$i+1]
+ ADD $F,$e,$e ; E+=F_00_19(B,C,D)
+___
+$code.=<<___ if ($i==15);
+|| XOR @X[($j+2)&15],@X[$j],@X[$j]
+ ROTL $a,5,$Arot ;; $i
+|| AND $c,$b,$F
+|| ANDN $d,$b,$F0
+|| ADD $K,$e,$e ; E+=K
+|| XOR @X[($j+8)&15],@X[$j],@X[$j]
+ OR $F0,$F,$F ; F_00_19(B,C,D)
+|| ROTL $b,30,$b
+|| ADD @X[$i],$e,$e ; E+=X[i]
+|| XOR @X[($j+13)&15],@X[$j],@X[$j]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+|| ROTL @X[$j],1,@X[$j]
+ ADD $F,$e,$e ; E+=F_00_19(B,C,D)
+___
+$code.=<<___ if ($i>15);
+|| XOR @X[($j+2)&15],@X[$j],@X[$j]
+ ROTL $a,5,$Arot ;; $i
+|| AND $c,$b,$F
+|| ANDN $d,$b,$F0
+|| ADD $K,$e,$e ; E+=K
+|| XOR @X[($j+8)&15],@X[$j],@X[$j]
+ OR $F0,$F,$F ; F_00_19(B,C,D)
+|| ROTL $b,30,$b
+|| ADD @X[$i&15],$e,$e ; E+=X[i]
+|| XOR @X[($j+13)&15],@X[$j],@X[$j]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+|| ROTL @X[$j],1,@X[$j]
+ ADD $F,$e,$e ; E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e) = @_;
+my $j = ($i+1)&15;
+
+$code.=<<___ if ($i<79);
+|| XOR @X[($j+2)&15],@X[$j],@X[$j]
+ ROTL $a,5,$Arot ;; $i
+|| XOR $c,$b,$F
+|| ADD $K,$e,$e ; E+=K
+|| XOR @X[($j+8)&15],@X[$j],@X[$j]
+ XOR $d,$F,$F ; F_20_39(B,C,D)
+|| ROTL $b,30,$b
+|| ADD @X[$i&15],$e,$e ; E+=X[i]
+|| XOR @X[($j+13)&15],@X[$j],@X[$j]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+|| ROTL @X[$j],1,@X[$j]
+ ADD $F,$e,$e ; E+=F_20_39(B,C,D)
+___
+$code.=<<___ if ($i==79);
+|| [A0] B loop?
+|| [A0] LDNW *${INP}++,@X[0] ; pre-fetch input
+ ROTL $a,5,$Arot ;; $i
+|| XOR $c,$b,$F
+|| ADD $K,$e,$e ; E+=K
+|| [A0] LDNW *${INP}++,@X[1]
+ XOR $d,$F,$F ; F_20_39(B,C,D)
+|| ROTL $b,30,$b
+|| ADD @X[$i&15],$e,$e ; E+=X[i]
+ ADD $Arot,$e,$e ; E+=rot(A,5)
+ ADD $F,$e,$e ; E+=F_20_39(B,C,D)
+|| ADD $Bctx,$a,$a ; accumulate context
+|| ADD $Cctx,$b,$b
+ ADD $Dctx,$c,$c
+|| ADD $Ectx,$d,$d
+|| ADD $Actx,$e,$e
+;;===== branch to loop? is taken here
+___
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e) = @_;
+my $j = ($i+1)&15;
+
+$code.=<<___;
+|| XOR @X[($j+2)&15],@X[$j],@X[$j]
+ ROTL $a,5,$Arot ;; $i
+|| AND $c,$b,$F
+|| AND $d,$b,$F0
+|| ADD $K,$e,$e ; E+=K
+|| XOR @X[($j+8)&15],@X[$j],@X[$j]
+ XOR $F0,$F,$F
+|| AND $c,$d,$F0
+|| ROTL $b,30,$b
+|| XOR @X[($j+13)&15],@X[$j],@X[$j]
+|| ADD @X[$i&15],$e,$e ; E+=X[i]
+ XOR $F0,$F,$F ; F_40_59(B,C,D)
+|| ADD $Arot,$e,$e ; E+=rot(A,5)
+|| ROTL @X[$j],1,@X[$j]
+ ADD $F,$e,$e ; E+=F_20_39(B,C,D)
+___
+}
+
+$code=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .asg sha1_block_data_order,_sha1_block_data_order
+ .endif
+
+ .asg B3,RA
+ .asg A15,FP
+ .asg B15,SP
+
+ .if .BIG_ENDIAN
+ .asg MV,SWAP2
+ .asg MV,SWAP4
+ .endif
+
+ .global _sha1_block_data_order
+_sha1_block_data_order:
+ .asmfunc
+ MV $NUM,A0 ; reassign $NUM
+ [!A0] BNOP RA ; if ($NUM==0) return;
+|| [A0] LDW *${CTX}[0],$A ; load A-E...
+ [A0] LDW *${CTX}[1],$B
+ [A0] LDW *${CTX}[2],$C
+ [A0] LDW *${CTX}[3],$D
+ [A0] LDW *${CTX}[4],$E
+ [A0] LDNW *${INP}++,@X[0] ; pre-fetch input
+ [A0] LDNW *${INP}++,@X[1]
+ NOP 3
+
+loop?:
+ SUB A0,1,A0
+|| MV $A,$Actx
+|| MVD $B,$Bctx
+|| SWAP2 @X[0],@X[0]
+|| MVKL 0x5a827999,$K
+ MVKH 0x5a827999,$K ; K_00_19
+|| MV $C,$Cctx
+|| MV $D,$Dctx
+|| MVD $E,$Ectx
+|| SWAP4 @X[0],@X[0]
+___
+for ($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+|| MVKL 0x6ed9eba1,$K
+ MVKH 0x6ed9eba1,$K ; K_20_39
+___
+for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+|| MVKL 0x8f1bbcdc,$K
+ MVKH 0x8f1bbcdc,$K ; K_40_59
+___
+for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+|| MVKL 0xca62c1d6,$K
+ MVKH 0xca62c1d6,$K ; K_60_79
+___
+for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ BNOP RA ; return
+ STW $A,*${CTX}[0] ; emit A-E...
+ STW $B,*${CTX}[1]
+ STW $C,*${CTX}[2]
+ STW $D,*${CTX}[3]
+ STW $E,*${CTX}[4]
+ .endasmfunc
+
+ .sect .const
+ .cstring "SHA1 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
+ .align 4
+___
+
+print $code;
+close STDOUT;
diff --git a/crypto/sha/asm/sha1-c64x.pl b/crypto/sha/asm/sha1-c64x.pl
new file mode 100644
index 0000000000..d7a9dd1d05
--- /dev/null
+++ b/crypto/sha/asm/sha1-c64x.pl
@@ -0,0 +1,330 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA1 for C64x.
+#
+# November 2016
+#
+# If compared to compiler-generated code with similar characteristics,
+# i.e. compiled with OPENSSL_SMALL_FOOTPRINT and utilizing SPLOOPs,
+# this implementation is 25% smaller and >2x faster. In absolute terms
+# performance is (quite impressive) ~6.5 cycles per processed byte.
+# Unlike its predecessor, sha1-c64xplus module, this module has worse
+# interrupt agility. While original added up to 5 cycles delay to
+# response to interrupt, this module adds up to 100. Fully unrolled
+# implementation doesn't add any delay and even 25% faster, but is
+# almost 5x larger...
+#
+# !!! Note that this module uses AMR, which means that all interrupt
+# service routines are expected to preserve it and for own well-being
+# zero it upon entry.
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($CTX,$INP,$NUM) = ("A4","B4","A6"); # arguments
+
+($A,$B,$C,$D,$E, $Arot,$F,$F0,$T,$K) = map("A$_",(16..20, 21..25));
+($X0,$X2,$X8,$X13) = ("A26","B26","A27","B27");
+($TX0,$TX1,$TX2,$TX3) = map("B$_",(28..31));
+($XPA,$XPB) = ("A5","B5"); # X circular buffer
+($Actx,$Bctx,$Cctx,$Dctx,$Ectx) = map("A$_",(3,6..9)); # zaps $NUM
+
+$code=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .asg sha1_block_data_order,_sha1_block_data_order
+ .endif
+
+ .asg B3,RA
+ .asg A15,FP
+ .asg B15,SP
+
+ .if .BIG_ENDIAN
+ .asg MV,SWAP2
+ .asg MV,SWAP4
+ .endif
+
+ .global _sha1_block_data_order
+_sha1_block_data_order:
+ .asmfunc stack_usage(64)
+ MV $NUM,A0 ; reassign $NUM
+|| MVK -64,B0
+ [!A0] BNOP RA ; if ($NUM==0) return;
+|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
+|| [A0] MV SP,FP
+ [A0] LDW *${CTX}[0],$A ; load A-E...
+|| [A0] AND B0,SP,SP ; align stack at 64 bytes
+ [A0] LDW *${CTX}[1],$B
+|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
+ [A0] LDW *${CTX}[2],$C
+|| [A0] MVK 0x00404,B0
+ [A0] LDW *${CTX}[3],$D
+|| [A0] MVKH 0x50000,B0 ; 0x050404, 64 bytes for $XP[AB]
+ [A0] LDW *${CTX}[4],$E
+|| [A0] MVC B0,AMR ; setup circular addressing
+ LDNW *${INP}++,$TX1 ; pre-fetch input
+ NOP 1
+
+loop?:
+ MVKL 0x5a827999,$K
+|| ADDAW SP,2,$XPB
+|| SUB A0,1,A0
+ MVKH 0x5a827999,$K ; K_00_19
+|| MV $A,$Actx
+|| MV $B,$Bctx
+;;==================================================
+ B body_00_13? ; BODY_00_13
+|| MVK 11,B0
+|| MV $XPB,$XPA
+|| MV $C,$Cctx
+|| MV $D,$Dctx
+|| MVD $E,$Ectx
+
+body_00_13?:
+ ROTL $A,5,$Arot
+|| AND $C,$B,$F
+|| ANDN $D,$B,$F0
+|| ADD $K,$E,$T ; T=E+K
+
+ XOR $F0,$F,$F ; F_00_19(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+|| SWAP2 $TX1,$TX2
+|| LDNW *${INP}++,$TX1
+
+ ADD $F,$T,$T ; T+=F_00_19(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| SWAP4 $TX2,$TX3 ; byte swap
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+
+ ADD $TX3,$T,$A ; A=T+Xi
+|| STW $TX3,*${XPB}++
+|| BDEC body_00_13?,B0
+;;==================================================
+ ROTL $A,5,$Arot ; BODY_14
+|| AND $C,$B,$F
+|| ANDN $D,$B,$F0
+|| ADD $K,$E,$T ; T=E+K
+
+ XOR $F0,$F,$F ; F_00_19(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+|| SWAP2 $TX1,$TX2
+|| LDNW *${INP}++,$TX1
+
+ ADD $F,$T,$T ; T+=F_00_19(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| SWAP4 $TX2,$TX2 ; byte swap
+|| LDW *${XPA}++,$X0 ; fetches from X ring buffer are
+|| LDW *${XPB}[4],$X2 ; 2 iterations ahead
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+|| LDW *${XPA}[7],$X8
+|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
+|| MV $TX2,$TX3
+
+ ADD $TX2,$T,$A ; A=T+Xi
+|| STW $TX2,*${XPB}++
+;;==================================================
+ ROTL $A,5,$Arot ; BODY_15
+|| AND $C,$B,$F
+|| ANDN $D,$B,$F0
+|| ADD $K,$E,$T ; T=E+K
+
+ XOR $F0,$F,$F ; F_00_19(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+|| SWAP2 $TX1,$TX2
+
+ ADD $F,$T,$T ; T+=F_00_19(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| SWAP4 $TX2,$TX2 ; byte swap
+|| XOR $X0,$X2,$TX0 ; Xupdate XORs are 1 iteration ahead
+|| LDW *${XPA}++,$X0
+|| LDW *${XPB}[4],$X2
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+|| XOR $X8,$X13,$TX1
+|| LDW *${XPA}[7],$X8
+|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
+|| MV $TX2,$TX3
+
+ ADD $TX2,$T,$A ; A=T+Xi
+|| STW $TX2,*${XPB}++
+|| XOR $TX0,$TX1,$TX1
+;;==================================================
+|| B body_16_19? ; BODY_16_19
+|| MVK 1,B0
+
+body_16_19?:
+ ROTL $A,5,$Arot
+|| AND $C,$B,$F
+|| ANDN $D,$B,$F0
+|| ADD $K,$E,$T ; T=E+K
+|| ROTL $TX1,1,$TX2 ; Xupdate output
+
+ XOR $F0,$F,$F ; F_00_19(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+
+ ADD $F,$T,$T ; T+=F_00_19(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| XOR $X0,$X2,$TX0
+|| LDW *${XPA}++,$X0
+|| LDW *${XPB}[4],$X2
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+|| XOR $X8,$X13,$TX1
+|| LDW *${XPA}[7],$X8
+|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
+|| MV $TX2,$TX3
+
+ ADD $TX2,$T,$A ; A=T+Xi
+|| STW $TX2,*${XPB}++
+|| XOR $TX0,$TX1,$TX1
+|| BDEC body_16_19?,B0
+
+ MVKL 0x6ed9eba1,$K
+|| MVK 17,B0
+ MVKH 0x6ed9eba1,$K ; K_20_39
+___
+sub BODY_20_39 {
+my $label = shift;
+$code.=<<___;
+;;==================================================
+|| B $label ; BODY_20_39
+
+$label:
+ ROTL $A,5,$Arot
+|| XOR $B,$C,$F
+|| ADD $K,$E,$T ; T=E+K
+|| ROTL $TX1,1,$TX2 ; Xupdate output
+
+ XOR $D,$F,$F ; F_20_39(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+
+ ADD $F,$T,$T ; T+=F_20_39(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| XOR $X0,$X2,$TX0
+|| LDW *${XPA}++,$X0
+|| LDW *${XPB}[4],$X2
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+|| XOR $X8,$X13,$TX1
+|| LDW *${XPA}[7],$X8
+|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
+|| MV $TX2,$TX3
+
+ ADD $TX2,$T,$A ; A=T+Xi
+|| STW $TX2,*${XPB}++ ; last one is redundant
+|| XOR $TX0,$TX1,$TX1
+|| BDEC $label,B0
+___
+} &BODY_20_39("body_20_39?");
+$code.=<<___;
+;;==================================================
+ MVKL 0x8f1bbcdc,$K
+|| MVK 17,B0
+ MVKH 0x8f1bbcdc,$K ; K_40_59
+|| B body_40_59? ; BODY_40_59
+|| AND $B,$C,$F
+|| AND $B,$D,$F0
+
+body_40_59?:
+ ROTL $A,5,$Arot
+|| XOR $F0,$F,$F
+|| AND $C,$D,$F0
+|| ADD $K,$E,$T ; T=E+K
+|| ROTL $TX1,1,$TX2 ; Xupdate output
+
+ XOR $F0,$F,$F ; F_40_59(B,C,D)
+|| MV $D,$E ; E=D
+|| MV $C,$D ; D=C
+
+ ADD $F,$T,$T ; T+=F_40_59(B,C,D)
+|| ROTL $B,30,$C ; C=ROL(B,30)
+|| XOR $X0,$X2,$TX0
+|| LDW *${XPA}++,$X0
+|| LDW *${XPB}[4],$X2
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| MV $A,$B ; B=A
+|| XOR $X8,$X13,$TX1
+|| LDW *${XPA}[7],$X8
+|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
+|| MV $TX2,$TX3
+
+ ADD $TX2,$T,$A ; A=T+Xi
+|| STW $TX2,*${XPB}++
+|| XOR $TX0,$TX1,$TX1
+|| AND $B,$C,$F
+|| AND $B,$D,$F0
+|| BDEC body_40_59?,B0
+
+ MVKL 0xca62c1d6,$K
+|| MVK 16,B0
+ MVKH 0xca62c1d6,$K ; K_60_79
+___
+ &BODY_20_39("body_60_78?"); # BODY_60_78
+$code.=<<___;
+;;==================================================
+ [A0] B loop?
+|| ROTL $A,5,$Arot ; BODY_79
+|| XOR $B,$C,$F
+|| ROTL $TX1,1,$TX2 ; Xupdate output
+
+ [A0] LDNW *${INP}++,$TX1 ; pre-fetch input
+|| ADD $K,$E,$T ; T=E+K
+|| XOR $D,$F,$F ; F_20_39(B,C,D)
+
+ ADD $F,$T,$T ; T+=F_20_39(B,C,D)
+|| ADD $Ectx,$D,$E ; E=D,E+=Ectx
+|| ADD $Dctx,$C,$D ; D=C,D+=Dctx
+|| ROTL $B,30,$C ; C=ROL(B,30)
+
+ ADD $Arot,$T,$T ; T+=ROL(A,5)
+|| ADD $Bctx,$A,$B ; B=A,B+=Bctx
+
+ ADD $TX2,$T,$A ; A=T+Xi
+
+ ADD $Actx,$A,$A ; A+=Actx
+|| ADD $Cctx,$C,$C ; C+=Cctx
+;; end of loop?
+
+ BNOP RA ; return
+|| MV FP,SP ; restore stack pointer
+|| LDW *FP[0],FP ; restore frame pointer
+ STW $A,*${CTX}[0] ; emit A-E...
+|| MVK 0,B0
+ STW $B,*${CTX}[1]
+|| MVC B0,AMR ; clear AMR
+ STW $C,*${CTX}[2]
+ STW $D,*${CTX}[3]
+ STW $E,*${CTX}[4]
+ .endasmfunc
+
+ .sect .const
+ .cstring "SHA1 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
+ .align 4
+___
+
+print $code;
+close STDOUT;
diff --git a/crypto/sha/asm/sha256-c64x.pl b/crypto/sha/asm/sha256-c64x.pl
new file mode 100644
index 0000000000..fbe99c0b7f
--- /dev/null
+++ b/crypto/sha/asm/sha256-c64x.pl
@@ -0,0 +1,313 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256 for C64x.
+#
+# November 2016
+#
+# Performance is just below 10 cycles per processed byte, which is
+# almost 40% faster than compiler-generated code. Unroll is unlikely
+# to give more than ~8% improvement...
+#
+# !!! Note that this module uses AMR, which means that all interrupt
+# service routines are expected to preserve it and for own well-being
+# zero it upon entry.
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
+ $K256="A3";
+
+($A,$Actx,$B,$Bctx,$C,$Cctx,$D,$Dctx,$T2,$S0,$s1,$t0a,$t1a,$t2a,$X9,$X14)
+ =map("A$_",(16..31));
+($E,$Ectx,$F,$Fctx,$G,$Gctx,$H,$Hctx,$T1,$S1,$s0,$t0e,$t1e,$t2e,$X1,$X15)
+ =map("B$_",(16..31));
+
+($Xia,$Xib)=("A5","B5"); # circular/ring buffer
+ $CTXB=$t2e;
+
+($Xn,$X0,$K)=("B7","B8","B9");
+($Maj,$Ch)=($T2,"B6");
+
+$code.=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .nocmp
+ .asg sha256_block_data_order,_sha256_block_data_order
+ .endif
+
+ .asg B3,RA
+ .asg A15,FP
+ .asg B15,SP
+
+ .if .BIG_ENDIAN
+ .asg SWAP2,MV
+ .asg SWAP4,MV
+ .endif
+
+ .global _sha256_block_data_order
+_sha256_block_data_order:
+__sha256_block:
+ .asmfunc stack_usage(64)
+ MV $NUM,A0 ; reassign $NUM
+|| MVK -64,B0
+ [!A0] BNOP RA ; if ($NUM==0) return;
+|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
+|| [A0] MV SP,FP
+ [A0] ADDKPC _sha256_block_data_order,B2
+|| [A0] AND B0,SP,SP ; align stack at 64 bytes
+ .if __TI_EABI__
+ [A0] MVK 0x00404,B1
+|| [A0] MVKL \$PCR_OFFSET(K256,__sha256_block),$K256
+ [A0] MVKH 0x50000,B1
+|| [A0] MVKH \$PCR_OFFSET(K256,__sha256_block),$K256
+ .else
+ [A0] MVK 0x00404,B1
+|| [A0] MVKL (K256-__sha256_block),$K256
+ [A0] MVKH 0x50000,B1
+|| [A0] MVKH (K256-__sha256_block),$K256
+ .endif
+ [A0] MVC B1,AMR ; setup circular addressing
+|| [A0] MV SP,$Xia
+ [A0] MV SP,$Xib
+|| [A0] ADD B2,$K256,$K256
+|| [A0] MV $CTXA,$CTXB
+|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
+ LDW *${CTXA}[0],$A ; load ctx
+|| LDW *${CTXB}[4],$E
+ LDW *${CTXA}[1],$B
+|| LDW *${CTXB}[5],$F
+ LDW *${CTXA}[2],$C
+|| LDW *${CTXB}[6],$G
+ LDW *${CTXA}[3],$D
+|| LDW *${CTXB}[7],$H
+
+ LDNW *$INP++,$Xn ; pre-fetch input
+ LDW *$K256++,$K ; pre-fetch K256[0]
+ NOP
+ ADDAW $Xia,9,$Xia
+outerloop?:
+ SUB A0,1,A0
+|| MV $A,$Actx
+|| MV $E,$Ectx
+|| MVD $B,$Bctx
+|| MVD $F,$Fctx
+ MV $C,$Cctx
+|| MV $G,$Gctx
+|| MVD $D,$Dctx
+|| MVD $H,$Hctx
+|| SWAP4 $Xn,$X0
+
+ MVK 14,B0 ; loop counter
+|| SWAP2 $X0,$X0
+
+loop_00_14?: ; BODY_00_14
+ LDNW *$INP++,$Xn
+|| ROTL $A,30,$S0
+|| OR $A,$B,$Maj
+|| AND $A,$B,$t2a
+|| ROTL $E,26,$S1
+|| AND $F,$E,$Ch
+|| ANDN $G,$E,$t2e
+ ROTL $A,19,$t0a
+|| AND $C,$Maj,$Maj
+|| ROTL $E,21,$t0e
+|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
+ ROTL $A,10,$t1a
+|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
+|| ROTL $E,7,$t1e
+|| ADD $K,$H,$T1 ; T1 = h + K256[i]
+|| [B0] BDEC loop_00_14?,B0
+ ADD $X0,$T1,$T1 ; T1 += X[i];
+|| STW $X0,*$Xib++
+|| XOR $t0a,$S0,$S0
+|| XOR $t0e,$S1,$S1
+ XOR $t1a,$S0,$S0 ; Sigma0(a)
+|| XOR $t1e,$S1,$S1 ; Sigma1(e)
+|| LDW *$K256++,$K ; pre-fetch K256[i+1]
+|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
+ ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
+|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
+|| ROTL $G,0,$H ; h = g
+|| MV $F,$G ; g = f
+|| MV $X0,$X14
+|| SWAP4 $Xn,$X0
+ SWAP2 $X0,$X0
+|| MV $E,$F ; f = e
+|| ADD $D,$T1,$E ; e = d + T1
+|| MV $C,$D ; d = c
+ MV $B,$C ; c = b
+|| MV $A,$B ; b = a
+|| ADD $T1,$T2,$A ; a = T1 + T2
+;;===== branch to loop00_14? is taken here
+
+ ROTL $A,30,$S0 ; BODY_15
+|| OR $A,$B,$Maj
+|| AND $A,$B,$t2a
+|| ROTL $E,26,$S1
+|| AND $F,$E,$Ch
+|| ANDN $G,$E,$t2e
+|| LDW *${Xib}[1],$Xn ; modulo-scheduled
+ ROTL $A,19,$t0a
+|| AND $C,$Maj,$Maj
+|| ROTL $E,21,$t0e
+|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
+|| LDW *${Xib}[2],$X1 ; modulo-scheduled
+ ROTL $A,10,$t1a
+|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
+|| ROTL $E,7,$t1e
+|| ADD $K,$H,$T1 ; T1 = h + K256[i]
+ ADD $X0,$T1,$T1 ; T1 += X[i];
+|| STW $X0,*$Xib++
+|| XOR $t0a,$S0,$S0
+|| XOR $t0e,$S1,$S1
+ XOR $t1a,$S0,$S0 ; Sigma0(a)
+|| XOR $t1e,$S1,$S1 ; Sigma1(e)
+|| LDW *$K256++,$K ; pre-fetch K256[i+1]
+|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
+ ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
+|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
+|| ROTL $G,0,$H ; h = g
+|| MV $F,$G ; g = f
+|| MV $X0,$X15
+ MV $E,$F ; f = e
+|| ADD $D,$T1,$E ; e = d + T1
+|| MV $C,$D ; d = c
+|| MV $Xn,$X0 ; modulo-scheduled
+|| LDW *$Xia,$X9 ; modulo-scheduled
+|| ROTL $X1,25,$t0e ; modulo-scheduled
+|| ROTL $X14,15,$t0a ; modulo-scheduled
+ SHRU $X1,3,$s0 ; modulo-scheduled
+|| SHRU $X14,10,$s1 ; modulo-scheduled
+|| ROTL $B,0,$C ; c = b
+|| MV $A,$B ; b = a
+|| ADD $T1,$T2,$A ; a = T1 + T2
+
+ MVK 47,B1 ; loop counter
+|| ROTL $X1,14,$t1e ; modulo-scheduled
+|| ROTL $X14,13,$t1a ; modulo-scheduled
+
+loop_16_63?: ; BODY_16_63
+ XOR $t0e,$s0,$s0
+|| XOR $t0a,$s1,$s1
+|| MV $X15,$X14
+|| MV $X1,$Xn
+ XOR $t1e,$s0,$s0 ; sigma0(X[i+1])
+|| XOR $t1a,$s1,$s1 ; sigma1(X[i+14])
+|| LDW *${Xib}[2],$X1 ; module-scheduled
+ ROTL $A,30,$S0
+|| OR $A,$B,$Maj
+|| AND $A,$B,$t2a
+|| ROTL $E,26,$S1
+|| AND $F,$E,$Ch
+|| ANDN $G,$E,$t2e
+|| ADD $X9,$X0,$X0 ; X[i] += X[i+9]
+ ROTL $A,19,$t0a
+|| AND $C,$Maj,$Maj
+|| ROTL $E,21,$t0e
+|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
+|| ADD $s0,$X0,$X0 ; X[i] += sigma1(X[i+1])
+ ROTL $A,10,$t1a
+|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
+|| ROTL $E,7,$t1e
+|| ADD $H,$K,$T1 ; T1 = h + K256[i]
+|| ADD $s1,$X0,$X0 ; X[i] += sigma1(X[i+14])
+|| [B1] BDEC loop_16_63?,B1
+ XOR $t0a,$S0,$S0
+|| XOR $t0e,$S1,$S1
+|| ADD $X0,$T1,$T1 ; T1 += X[i]
+|| STW $X0,*$Xib++
+ XOR $t1a,$S0,$S0 ; Sigma0(a)
+|| XOR $t1e,$S1,$S1 ; Sigma1(e)
+|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
+|| MV $X0,$X15
+|| ROTL $G,0,$H ; h = g
+|| LDW *$K256++,$K ; pre-fetch K256[i+1]
+ ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
+|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
+|| MV $F,$G ; g = f
+|| MV $Xn,$X0 ; modulo-scheduled
+|| LDW *++$Xia,$X9 ; modulo-scheduled
+|| ROTL $X1,25,$t0e ; module-scheduled
+|| ROTL $X14,15,$t0a ; modulo-scheduled
+ ROTL $X1,14,$t1e ; modulo-scheduled
+|| ROTL $X14,13,$t1a ; modulo-scheduled
+|| MV $E,$F ; f = e
+|| ADD $D,$T1,$E ; e = d + T1
+|| MV $C,$D ; d = c
+|| MV $B,$C ; c = b
+ MV $A,$B ; b = a
+|| ADD $T1,$T2,$A ; a = T1 + T2
+|| SHRU $X1,3,$s0 ; modulo-scheduled
+|| SHRU $X14,10,$s1 ; modulo-scheduled
+;;===== branch to loop16_63? is taken here
+
+ [A0] B outerloop?
+|| [A0] LDNW *$INP++,$Xn ; pre-fetch input
+|| [A0] ADDK -260,$K256 ; rewind K256
+|| ADD $Actx,$A,$A ; accumulate ctx
+|| ADD $Ectx,$E,$E
+|| ADD $Bctx,$B,$B
+ ADD $Fctx,$F,$F
+|| ADD $Cctx,$C,$C
+|| ADD $Gctx,$G,$G
+|| ADD $Dctx,$D,$D
+|| ADD $Hctx,$H,$H
+|| [A0] LDW *$K256++,$K ; pre-fetch K256[0]
+
+ [!A0] BNOP RA
+||[!A0] MV $CTXA,$CTXB
+ [!A0] MV FP,SP ; restore stack pointer
+||[!A0] LDW *FP[0],FP ; restore frame pointer
+ [!A0] STW $A,*${CTXA}[0] ; save ctx
+||[!A0] STW $E,*${CTXB}[4]
+||[!A0] MVK 0,B0
+ [!A0] STW $B,*${CTXA}[1]
+||[!A0] STW $F,*${CTXB}[5]
+||[!A0] MVC B0,AMR ; clear AMR
+ STW $C,*${CTXA}[2]
+|| STW $G,*${CTXB}[6]
+ STW $D,*${CTXA}[3]
+|| STW $H,*${CTXB}[7]
+ .endasmfunc
+
+ .if __TI_EABI__
+ .sect ".text:sha_asm.const"
+ .else
+ .sect ".const:sha_asm"
+ .endif
+ .align 128
+K256:
+ .uword 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .uword 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .uword 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .uword 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .uword 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .uword 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .uword 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .uword 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .uword 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .uword 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .uword 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .uword 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .uword 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .uword 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .uword 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .uword 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+ .cstring "SHA256 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
+ .align 4
+
+___
+
+print $code;
diff --git a/crypto/sha/asm/sha512-c64x.pl b/crypto/sha/asm/sha512-c64x.pl
new file mode 100644
index 0000000000..e35a72ade5
--- /dev/null
+++ b/crypto/sha/asm/sha512-c64x.pl
@@ -0,0 +1,437 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA512 for C64x.
+#
+# November 2016
+#
+# Performance is ~19 cycles per processed byte. Compared to block
+# transform function from sha512.c compiled with cl6x with -mv6400+
+# -o2 -DOPENSSL_SMALL_FOOTPRINT it's almost 7x faster and 2x smaller.
+# Loop unroll won't make it, this implementation, any faster, because
+# it's effectively dominated by SHRU||SHL pairs and you can't schedule
+# more of them.
+#
+# !!! Note that this module uses AMR, which means that all interrupt
+# service routines are expected to preserve it and for own well-being
+# zero it upon entry.
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
+ $K512="A3";
+
+($Ahi,$Actxhi,$Bhi,$Bctxhi,$Chi,$Cctxhi,$Dhi,$Dctxhi,
+ $Ehi,$Ectxhi,$Fhi,$Fctxhi,$Ghi,$Gctxhi,$Hhi,$Hctxhi)=map("A$_",(16..31));
+($Alo,$Actxlo,$Blo,$Bctxlo,$Clo,$Cctxlo,$Dlo,$Dctxlo,
+ $Elo,$Ectxlo,$Flo,$Fctxlo,$Glo,$Gctxlo,$Hlo,$Hctxlo)=map("B$_",(16..31));
+
+($S1hi,$CHhi,$S0hi,$t0hi)=map("A$_",(10..13));
+($S1lo,$CHlo,$S0lo,$t0lo)=map("B$_",(10..13));
+($T1hi, $T2hi)= ("A6","A7");
+($T1lo,$T1carry,$T2lo,$T2carry)=("B6","B7","B8","B9");
+($Khi,$Klo)=("A9","A8");
+($MAJhi,$MAJlo)=($T2hi,$T2lo);
+($t1hi,$t1lo)=($Khi,"B2");
+ $CTXB=$t1lo;
+
+($Xihi,$Xilo)=("A5","B5"); # circular/ring buffer
+
+$code.=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .nocmp
+ .asg sha512_block_data_order,_sha512_block_data_order
+ .endif
+
+ .asg B3,RA
+ .asg A15,FP
+ .asg B15,SP
+
+ .if .BIG_ENDIAN
+ .asg $Khi,KHI
+ .asg $Klo,KLO
+ .else
+ .asg $Khi,KLO
+ .asg $Klo,KHI
+ .endif
+
+ .global _sha512_block_data_order
+_sha512_block_data_order:
+__sha512_block:
+ .asmfunc stack_usage(40+128)
+ MV $NUM,A0 ; reassign $NUM
+|| MVK -128,B0
+ [!A0] BNOP RA ; if ($NUM==0) return;
+|| [A0] STW FP,*SP--(40) ; save frame pointer
+|| [A0] MV SP,FP
+ [A0] STDW B13:B12,*SP[4]
+|| [A0] MVK 0x00404,B1
+ [A0] STDW B11:B10,*SP[3]
+|| [A0] STDW A13:A12,*FP[-3]
+|| [A0] MVKH 0x60000,B1
+ [A0] STDW A11:A10,*SP[1]
+|| [A0] MVC B1,AMR ; setup circular addressing
+|| [A0] ADD B0,SP,SP ; alloca(128)
+ .if __TI_EABI__
+ [A0] AND B0,SP,SP ; align stack at 128 bytes
+|| [A0] ADDKPC __sha512_block,B1
+|| [A0] MVKL \$PCR_OFFSET(K512,__sha512_block),$K512
+ [A0] MVKH \$PCR_OFFSET(K512,__sha512_block),$K512
+|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
+ .else
+ [A0] AND B0,SP,SP ; align stack at 128 bytes
+|| [A0] ADDKPC __sha512_block,B1
+|| [A0] MVKL (K512-__sha512_block),$K512
+ [A0] MVKH (K512-__sha512_block),$K512
+|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
+ .endif
+ ADDAW SP,3,$Xilo
+ ADD SP,4*2,$Xihi ; ADDAW SP,2,$Xihi
+
+|| MV $CTXA,$CTXB
+ LDW *${CTXA}[0^.LITTLE_ENDIAN],$Ahi ; load ctx
+|| LDW *${CTXB}[1^.LITTLE_ENDIAN],$Alo
+|| ADD B1,$K512,$K512
+ LDW *${CTXA}[2^.LITTLE_ENDIAN],$Bhi
+|| LDW *${CTXB}[3^.LITTLE_ENDIAN],$Blo
+ LDW *${CTXA}[4^.LITTLE_ENDIAN],$Chi
+|| LDW *${CTXB}[5^.LITTLE_ENDIAN],$Clo
+ LDW *${CTXA}[6^.LITTLE_ENDIAN],$Dhi
+|| LDW *${CTXB}[7^.LITTLE_ENDIAN],$Dlo
+ LDW *${CTXA}[8^.LITTLE_ENDIAN],$Ehi
+|| LDW *${CTXB}[9^.LITTLE_ENDIAN],$Elo
+ LDW *${CTXA}[10^.LITTLE_ENDIAN],$Fhi
+|| LDW *${CTXB}[11^.LITTLE_ENDIAN],$Flo
+ LDW *${CTXA}[12^.LITTLE_ENDIAN],$Ghi
+|| LDW *${CTXB}[13^.LITTLE_ENDIAN],$Glo
+ LDW *${CTXA}[14^.LITTLE_ENDIAN],$Hhi
+|| LDW *${CTXB}[15^.LITTLE_ENDIAN],$Hlo
+
+ LDNDW *$INP++,B11:B10 ; pre-fetch input
+ LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[0]
+outerloop?:
+ MVK 15,B0 ; loop counters
+|| MVK 64,B1
+|| SUB A0,1,A0
+ MV $Ahi,$Actxhi
+|| MV $Alo,$Actxlo
+|| MV $Bhi,$Bctxhi
+|| MV $Blo,$Bctxlo
+|| MV $Chi,$Cctxhi
+|| MV $Clo,$Cctxlo
+|| MVD $Dhi,$Dctxhi
+|| MVD $Dlo,$Dctxlo
+ MV $Ehi,$Ectxhi
+|| MV $Elo,$Ectxlo
+|| MV $Fhi,$Fctxhi
+|| MV $Flo,$Fctxlo
+|| MV $Ghi,$Gctxhi
+|| MV $Glo,$Gctxlo
+|| MVD $Hhi,$Hctxhi
+|| MVD $Hlo,$Hctxlo
+loop0_15?:
+ .if .BIG_ENDIAN
+ MV B11,$T1hi
+|| MV B10,$T1lo
+ .else
+ SWAP4 B10,$T1hi
+|| SWAP4 B11,$T1lo
+ SWAP2 $T1hi,$T1hi
+|| SWAP2 $T1lo,$T1lo
+ .endif
+ STW $T1hi,*$Xihi++[2] ; original loop16_79?
+|| STW $T1lo,*$Xilo++[2] ; X[i] = T1
+|| ADD $Hhi,$T1hi,$T1hi
+|| ADDU $Hlo,$T1lo,$T1carry:$T1lo ; T1 += h
+|| SHRU $Ehi,14,$S1hi
+|| SHL $Ehi,32-14,$S1lo
+loop16_79?:
+ XOR $Fhi,$Ghi,$CHhi
+|| XOR $Flo,$Glo,$CHlo
+|| ADD KHI,$T1hi,$T1hi
+|| ADDU KLO,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += K512[i]
+|| SHRU $Elo,14,$t0lo
+|| SHL $Elo,32-14,$t0hi
+ XOR $t0hi,$S1hi,$S1hi
+|| XOR $t0lo,$S1lo,$S1lo
+|| AND $Ehi,$CHhi,$CHhi
+|| AND $Elo,$CHlo,$CHlo
+|| ROTL $Ghi,0,$Hhi
+|| ROTL $Glo,0,$Hlo ; h = g
+|| SHRU $Ehi,18,$t0hi
+|| SHL $Ehi,32-18,$t0lo
+ XOR $t0hi,$S1hi,$S1hi
+|| XOR $t0lo,$S1lo,$S1lo
+|| XOR $Ghi,$CHhi,$CHhi
+|| XOR $Glo,$CHlo,$CHlo ; Ch(e,f,g) = ((f^g)&e)^g
+|| ROTL $Fhi,0,$Ghi
+|| ROTL $Flo,0,$Glo ; g = f
+|| SHRU $Elo,18,$t0lo
+|| SHL $Elo,32-18,$t0hi
+ XOR $t0hi,$S1hi,$S1hi
+|| XOR $t0lo,$S1lo,$S1lo
+|| OR $Ahi,$Bhi,$MAJhi
+|| OR $Alo,$Blo,$MAJlo
+|| ROTL $Ehi,0,$Fhi
+|| ROTL $Elo,0,$Flo ; f = e
+|| SHRU $Ehi,41-32,$t0lo
+|| SHL $Ehi,64-41,$t0hi
+ XOR $t0hi,$S1hi,$S1hi
+|| XOR $t0lo,$S1lo,$S1lo
+|| AND $Chi,$MAJhi,$MAJhi
+|| AND $Clo,$MAJlo,$MAJlo
+|| ROTL $Dhi,0,$Ehi
+|| ROTL $Dlo,0,$Elo ; e = d
+|| SHRU $Elo,41-32,$t0hi
+|| SHL $Elo,64-41,$t0lo
+ XOR $t0hi,$S1hi,$S1hi
+|| XOR $t0lo,$S1lo,$S1lo ; Sigma1(e)
+|| AND $Ahi,$Bhi,$t1hi
+|| AND $Alo,$Blo,$t1lo
+|| ROTL $Chi,0,$Dhi
+|| ROTL $Clo,0,$Dlo ; d = c
+|| SHRU $Ahi,28,$S0hi
+|| SHL $Ahi,32-28,$S0lo
+ OR $t1hi,$MAJhi,$MAJhi
+|| OR $t1lo,$MAJlo,$MAJlo ; Maj(a,b,c) = ((a|b)&c)|(a&b)
+|| ADD $CHhi,$T1hi,$T1hi
+|| ADDU $CHlo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Ch(e,f,g)
+|| ROTL $Bhi,0,$Chi
+|| ROTL $Blo,0,$Clo ; c = b
+|| SHRU $Alo,28,$t0lo
+|| SHL $Alo,32-28,$t0hi
+ XOR $t0hi,$S0hi,$S0hi
+|| XOR $t0lo,$S0lo,$S0lo
+|| ADD $S1hi,$T1hi,$T1hi
+|| ADDU $S1lo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Sigma1(e)
+|| ROTL $Ahi,0,$Bhi
+|| ROTL $Alo,0,$Blo ; b = a
+|| SHRU $Ahi,34-32,$t0lo
+|| SHL $Ahi,64-34,$t0hi
+ XOR $t0hi,$S0hi,$S0hi
+|| XOR $t0lo,$S0lo,$S0lo
+|| ADD $MAJhi,$T1hi,$T2hi
+|| ADDU $MAJlo,$T1carry:$T1lo,$T2carry:$T2lo ; T2 = T1+Maj(a,b,c)
+|| SHRU $Alo,34-32,$t0hi
+|| SHL $Alo,64-34,$t0lo
+ XOR $t0hi,$S0hi,$S0hi
+|| XOR $t0lo,$S0lo,$S0lo
+|| ADD $Ehi,$T1hi,$T1hi
+|| ADDU $Elo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += e
+|| SHRU $Ahi,39-32,$t0lo
+|| SHL $Ahi,64-39,$t0hi
+ [B0] BNOP loop0_15?
+|| [B0] LDNDW *$INP++,B11:B10 ; pre-fetch input
+ XOR $t0hi,$S0hi,$S0hi
+|| XOR $t0lo,$S0lo,$S0lo
+|| SHRU $Alo,39-32,$t0hi
+|| SHL $Alo,64-39,$t0lo
+||[!B0] LDW *${Xihi}[28],$T1hi
+||[!B0] LDW *${Xilo}[28],$T1lo ; X[i+14]
+ XOR $t0hi,$S0hi,$S0hi
+|| XOR $t0lo,$S0lo,$S0lo ; Sigma0(a)
+|| ADD $T1carry,$T1hi,$Ehi
+|| ROTL $T1lo,0,$Elo ; e = T1, "ghost" value
+||[!B1] BNOP break?
+ ADD $S0hi,$T2hi,$T2hi
+|| ADDU $S0lo,$T2carry:$T2lo,$T2carry:$T2lo ;