From 5f1841cdcae459924c3d1d92fcaf3110068c7cda Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Tue, 3 May 2005 15:42:05 +0000 Subject: Rename amd64 modules to x86_64 and update RC4 implementation. --- crypto/rc4/Makefile | 2 +- crypto/rc4/asm/rc4-amd64.pl | 160 ----------------------------- crypto/rc4/asm/rc4-x86_64.pl | 238 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+), 161 deletions(-) delete mode 100755 crypto/rc4/asm/rc4-amd64.pl create mode 100755 crypto/rc4/asm/rc4-x86_64.pl (limited to 'crypto/rc4') diff --git a/crypto/rc4/Makefile b/crypto/rc4/Makefile index a392e3e997..b41bc23e50 100644 --- a/crypto/rc4/Makefile +++ b/crypto/rc4/Makefile @@ -58,7 +58,7 @@ rx86-cof.s: asm/rc4-586.pl ../perlasm/x86asm.pl rx86-out.s: asm/rc4-586.pl ../perlasm/x86asm.pl (cd asm; $(PERL) rc4-586.pl a.out $(CFLAGS) > ../$@) -rc4-amd64.s: asm/rc4-amd64.pl; $(PERL) asm/rc4-amd64.pl $@ +rc4-x86_64.s: asm/rc4-x86_64.pl; $(PERL) asm/rc4-x86_64.pl $@ rc4-ia64.s: asm/rc4-ia64.S $(CC) $(CFLAGS) -E asm/rc4-ia64.S > $@ diff --git a/crypto/rc4/asm/rc4-amd64.pl b/crypto/rc4/asm/rc4-amd64.pl deleted file mode 100755 index d425318afe..0000000000 --- a/crypto/rc4/asm/rc4-amd64.pl +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. Rights for redistribution and usage in source and binary -# forms are granted according to the OpenSSL license. -# ==================================================================== -# -# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in -# "hand-coded assembler"] doesn't stand for the whole improvement -# coefficient. It turned out that eliminating RC4_CHAR from config -# line results in ~40% improvement (yes, even for C implementation). -# Presumably it has everything to do with AMD cache architecture and -# RAW or whatever penalties. Once again! The module *requires* config -# line *without* RC4_CHAR! As for coding "secret," I bet on partial -# register arithmetics. For example instead of 'inc %r8; and $255,%r8' -# I simply 'inc %r8b'. Even though optimization manual discourages -# to operate on partial registers, it turned out to be the best bet. -# At least for AMD... How IA32E would perform remains to be seen... - -# As was shown by Marc Bevand reordering of couple of load operations -# results in even higher performance gain of 3.3x:-) At least on -# Opteron... For reference, 1x in this case is RC4_CHAR C-code -# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. -# Latter means that if you want to *estimate* what to expect from -# *your* CPU, then multiply 54 by 3.3 and clock frequency in GHz. - -# Intel P4 EM64T core was found to run the AMD64 code really slow... -# The only way to achieve comparable performance on P4 is to keep -# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to -# compose blended code, which would perform even within 30% marginal -# on either AMD and Intel platforms, I implement both cases. See -# rc4_skey.c for further details... This applies to 0.9.8 and later. -# In 0.9.7 context RC4_CHAR codepath is never engaged and ~70 bytes -# of code remain redundant. - -$output=shift; -open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output"; - -$dat="%rdi"; # arg1 -$len="%rsi"; # arg2 -$inp="%rdx"; # arg3 -$out="%rcx"; # arg4 - -$XX="%r10"; -$TX="%r8"; -$YY="%r11"; -$TY="%r9"; - -$code=<<___; -.text - -.globl RC4 -.type RC4,\@function,4 -.align 16 -RC4: or $len,$len - jne .Lentry - ret -.Lentry: - add \$8,$dat - movl -8($dat),$XX#d - movl -4($dat),$YY#d - cmpl \$-1,256($dat) - je .LRC4_CHAR - test \$-8,$len - jz .Lloop1 -.align 16 -.Lloop8: - inc $XX#b - movl ($dat,$XX,4),$TX#d - add $TX#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TX#b,$TY#b - inc $XX#b - movl ($dat,$XX,4),$TX#d - movb ($dat,$TY,4),%al -___ -for ($i=1;$i<=6;$i++) { -$code.=<<___; - add $TX#b,$YY#b - ror \$8,%rax - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TX#b,$TY#b - inc $XX#b - movl ($dat,$XX,4),$TX#d - movb ($dat,$TY,4),%al -___ -} -$code.=<<___; - add $TX#b,$YY#b - ror \$8,%rax - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - sub \$8,$len - add $TY#b,$TX#b - movb ($dat,$TX,4),%al - ror \$8,%rax - add \$8,$inp - add \$8,$out - - xor -8($inp),%rax - mov %rax,-8($out) - - test \$-8,$len - jnz .Lloop8 - cmp \$0,$len - jne .Lloop1 -.Lexit: - movl $XX#d,-8($dat) - movl $YY#d,-4($dat) - ret -.align 16 -.Lloop1: - movzb ($inp),%eax - inc $XX#b - movl ($dat,$XX,4),$TX#d - add $TX#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TY#b,$TX#b - movl ($dat,$TX,4),$TY#d - xor $TY,%rax - inc $inp - movb %al,($out) - inc $out - dec $len - jnz .Lloop1 - jmp .Lexit - -.align 16 -.LRC4_CHAR: - add \$1,$XX#b - movzb ($dat,$XX),$TX#d - add $TX#b,$YY#b - movzb ($dat,$YY),$TY#d - movb $TX#b,($dat,$YY) - movb $TY#b,($dat,$XX) - add $TX#b,$TY#b - movzb ($dat,$TY),$TY#d - xorb ($inp),$TY#b - movb $TY#b,($out) - lea 1($inp),$inp - lea 1($out),$out - sub \$1,$len - jnz .LRC4_CHAR - jmp .Lexit -.size RC4,.-RC4 -___ - -$code =~ s/#([bwd])/$1/gm; - -print $code; - -close STDOUT; diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl new file mode 100755 index 0000000000..89239400d4 --- /dev/null +++ b/crypto/rc4/asm/rc4-x86_64.pl @@ -0,0 +1,238 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. Rights for redistribution and usage in source and binary +# forms are granted according to the OpenSSL license. +# ==================================================================== +# +# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in +# "hand-coded assembler"] doesn't stand for the whole improvement +# coefficient. It turned out that eliminating RC4_CHAR from config +# line results in ~40% improvement (yes, even for C implementation). +# Presumably it has everything to do with AMD cache architecture and +# RAW or whatever penalties. Once again! The module *requires* config +# line *without* RC4_CHAR! As for coding "secret," I bet on partial +# register arithmetics. For example instead of 'inc %r8; and $255,%r8' +# I simply 'inc %r8b'. Even though optimization manual discourages +# to operate on partial registers, it turned out to be the best bet. +# At least for AMD... How IA32E would perform remains to be seen... + +# As was shown by Marc Bevand reordering of couple of load operations +# results in even higher performance gain of 3.3x:-) At least on +# Opteron... For reference, 1x in this case is RC4_CHAR C-code +# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. +# Latter means that if you want to *estimate* what to expect from +# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. + +# Intel P4 EM64T core was found to run the AMD64 code really slow... +# The only way to achieve comparable performance on P4 was to keep +# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to +# compose blended code, which would perform even within 30% marginal +# on either AMD and Intel platforms, I implement both cases. See +# rc4_skey.c for further details... + +# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing +# those with add/sub results in 50% performance improvement of folded +# loop... + +# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T +# performance by >30% [unlike P4 32-bit case that is]. But this is +# provided that loads are reordered even more aggressively! Both code +# pathes, AMD64 and EM64T, reorder loads in essentially same manner +# as my IA-64 implementation. On Opteron this resulted in modest 5% +# improvement [I had to test it], while final Intel P4 performance +# achieves respectful 432MBps on 2.8GHz processor now. For reference. +# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than +# RC4_INT code-path. While if executed on Opteron, it's is only 25% +# slower than the latter... + +$output=shift; +open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output"; + +$dat="%rdi"; # arg1 +$len="%rsi"; # arg2 +$inp="%rdx"; # arg3 +$out="%rcx"; # arg4 + +@XX=("%r8","%r10"); +@TX=("%r9","%r11"); +$YY="%r12"; +$TY="%r13"; + +$code=<<___; +.text + +.globl RC4 +.type RC4,\@function,4 +.align 16 +RC4: or $len,$len + jne .Lentry + ret +.Lentry: + push %r12 + push %r13 + + add \$8,$dat + movl -8($dat),$XX[0]#d + movl -4($dat),$YY#d + cmpl \$-1,256($dat) + je .LRC4_CHAR + inc $XX[0]#b + movl ($dat,$XX[0],4),$TX[0]#d + test \$-8,$len + jz .Lloop1 + jmp .Lloop8 +.align 16 +.Lloop8: +___ +for ($i=0;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + mov $XX[0],$XX[1] + movl ($dat,$YY,4),$TY#d + ror \$8,%rax # ror is redundant when $i=0 + inc $XX[1]#b + movl ($dat,$XX[1],4),$TX[1]#d + cmp $XX[1],$YY + movl $TX[0]#d,($dat,$YY,4) + cmove $TX[0],$TX[1] + movl $TY#d,($dat,$XX[0],4) + add $TX[0]#b,$TY#b + movb ($dat,$TY,4),%al +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + ror \$8,%rax + sub \$8,$len + + xor ($inp),%rax + add \$8,$inp + mov %rax,($out) + add \$8,$out + + test \$-8,$len + jnz .Lloop8 + cmp \$0,$len + jne .Lloop1 +___ +$code.=<<___; +.Lexit: + sub \$1,$XX[0]#b + movl $XX[0]#d,-8($dat) + movl $YY#d,-4($dat) + + pop %r13 + pop %r12 + ret +.align 16 +.Lloop1: + add $TX[0]#b,$YY#b + movl ($dat,$YY,4),$TY#d + movl $TX[0]#d,($dat,$YY,4) + movl $TY#d,($dat,$XX[0],4) + add $TY#b,$TX[0]#b + inc $XX[0]#b + movl ($dat,$TX[0],4),$TY#d + movl ($dat,$XX[0],4),$TX[0]#d + xorb ($inp),$TY#b + inc $inp + movb $TY#b,($out) + inc $out + dec $len + jnz .Lloop1 + jmp .Lexit + +.align 16 +.LRC4_CHAR: + add \$1,$XX[0]#b + movzb ($dat,$XX[0]),$TX[0]#d + test \$-8,$len + jz .Lcloop1 + push %rbx + jmp .Lcloop8 +.align 16 +.Lcloop8: + mov ($inp),%eax + mov 4($inp),%ebx +___ +# unroll 2x4-wise, because 64-bit rotates kill Intel P4... +for ($i=0;$i<4;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1]#d + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%al + ror \$8,%eax +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +for ($i=4;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1] + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%bl + ror \$8,%ebx +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + lea -8($len),$len + mov %eax,($out) + lea 8($inp),$inp + mov %ebx,4($out) + lea 8($out),$out + + test \$-8,$len + jnz .Lcloop8 + pop %rbx + cmp \$0,$len + jne .Lcloop1 + jmp .Lexit +___ +$code.=<<___; +.align 16 +.Lcloop1: + add $TX[0]#b,$YY#b + movzb ($dat,$YY),$TY#d + movb $TX[0]#b,($dat,$YY) + movb $TY#b,($dat,$XX[0]) + add $TX[0]#b,$TY#b + add \$1,$XX[0]#b + movzb ($dat,$TY),$TY#d + movzb ($dat,$XX[0]),$TX[0]#d + xorb ($inp),$TY#b + lea 1($inp),$inp + movb $TY#b,($out) + lea 1($out),$out + sub \$1,$len + jnz .Lcloop1 + jmp .Lexit +.size RC4,.-RC4 +___ + +$code =~ s/#([bwd])/$1/gm; + +print $code; + +close STDOUT; -- cgit v1.2.3