From 5f1841cdcae459924c3d1d92fcaf3110068c7cda Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Tue, 3 May 2005 15:42:05 +0000 Subject: Rename amd64 modules to x86_64 and update RC4 implementation. --- crypto/Makefile | 4 +- crypto/amd64cpuid.pl | 138 ------------------------- crypto/rc4/Makefile | 2 +- crypto/rc4/asm/rc4-amd64.pl | 160 ----------------------------- crypto/rc4/asm/rc4-x86_64.pl | 238 +++++++++++++++++++++++++++++++++++++++++++ crypto/x86_64cpuid.pl | 138 +++++++++++++++++++++++++ 6 files changed, 379 insertions(+), 301 deletions(-) delete mode 100644 crypto/amd64cpuid.pl delete mode 100755 crypto/rc4/asm/rc4-amd64.pl create mode 100755 crypto/rc4/asm/rc4-x86_64.pl create mode 100644 crypto/x86_64cpuid.pl (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index be7c25041a..ae9cf4e146 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -70,8 +70,8 @@ x86cpuid-cof.s: x86cpuid.pl perlasm/x86asm.pl x86cpuid-out.s: x86cpuid.pl perlasm/x86asm.pl $(PERL) x86cpuid.pl a.out $(CFLAGS) $(PROCESSOR) > $@ -amd64cpuid.s: amd64cpuid.pl - $(PERL) amd64cpuid.pl $@ +x86_64cpuid.s: x86_64cpuid.pl + $(PERL) x86_64cpuid.pl $@ ia64cpuid.s: ia64cpuid.S $(CC) $(CFLAGS) -E ia64cpuid.S > $@ diff --git a/crypto/amd64cpuid.pl b/crypto/amd64cpuid.pl deleted file mode 100644 index 777d557783..0000000000 --- a/crypto/amd64cpuid.pl +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env perl - -$output=shift; -$win64a=1 if ($output =~ /win64a\.[s|asm]/); -open STDOUT,">$output" || die "can't open $output: $!"; - -print<<___ if(defined($win64a)); -_TEXT SEGMENT -PUBLIC OPENSSL_rdtsc -ALIGN 16 -OPENSSL_rdtsc PROC - rdtsc - shl rdx,32 - or rax,rdx - ret -OPENSSL_rdtsc ENDP - -PUBLIC OPENSSL_atomic_add -ALIGN 16 -OPENSSL_atomic_add PROC - mov eax,DWORD PTR[rcx] -\$Lspin: lea r8,DWORD PTR[rdx+rax] -lock cmpxchg DWORD PTR[rcx],r8d - jne \$Lspin - mov eax,r8d - cdqe - ret -OPENSSL_atomic_add ENDP - -PUBLIC OPENSSL_wipe_cpu -ALIGN 16 -OPENSSL_wipe_cpu PROC - pxor xmm0,xmm0 - pxor xmm1,xmm1 - pxor xmm2,xmm2 - pxor xmm3,xmm3 - pxor xmm4,xmm4 - pxor xmm5,xmm5 - xor rcx,rcx - xor rdx,rdx - xor r8,r8 - xor r9,r9 - xor r10,r10 - xor r11,r11 - lea rax,QWORD PTR[rsp+8] - ret -OPENSSL_wipe_cpu ENDP - -OPENSSL_ia32_cpuid PROC - mov r8,rbx - mov eax,1 - cpuid - shl rcx,32 - mov eax,edx - mov rbx,r8 - or rax,rcx - ret -OPENSSL_ia32_cpuid ENDP -_TEXT ENDS - -CRT\$XIU SEGMENT -EXTRN OPENSSL_cpuid_setup:PROC -DQ OPENSSL_cpuid_setup -CRT\$XIU ENDS -END -___ -print<<___ if(!defined($win64a)); -.text -.globl OPENSSL_rdtsc -.align 16 -OPENSSL_rdtsc: - rdtsc - shl \$32,%rdx - or %rdx,%rax - ret -.size OPENSSL_rdtsc,.-OPENSSL_rdtsc - -.globl OPENSSL_atomic_add -.type OPENSSL_atomic_add,\@function -.align 16 -OPENSSL_atomic_add: - movl (%rdi),%eax -.Lspin: lea (%rsi,%rax),%r8 -lock; cmpxchg %r8d,(%rdi) - jne .Lspin - mov %r8d,%eax - cdqe - ret -.size OPENSSL_atomic_add,.-OPENSSL_atomic_add - -.globl OPENSSL_wipe_cpu -.type OPENSSL_wipe_cpu,\@function -.align 16 -OPENSSL_wipe_cpu: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 - pxor %xmm8,%xmm8 - pxor %xmm9,%xmm9 - pxor %xmm10,%xmm10 - pxor %xmm11,%xmm11 - pxor %xmm12,%xmm12 - pxor %xmm13,%xmm13 - pxor %xmm14,%xmm14 - pxor %xmm15,%xmm15 - xor %rcx,%rcx - xor %rdx,%rdx - xor %rsi,%rsi - xor %rdi,%rdi - xor %r8,%r8 - xor %r9,%r9 - xor %r10,%r10 - xor %r11,%r11 - lea 8(%rsp),%rax - ret -.size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu - -.globl OPENSSL_ia32_cpuid -.align 16 -OPENSSL_ia32_cpuid: - mov %rbx,%r8 - mov \$1,%eax - cpuid - shl \$32,%rcx - mov %edx,%eax - mov %r8,%rbx - or %rcx,%rax - ret -.size OPENSSL_ia32_cpuid,.-OPENSSL_ia32_cpuid - -.section .init - call OPENSSL_cpuid_setup -___ diff --git a/crypto/rc4/Makefile b/crypto/rc4/Makefile index a392e3e997..b41bc23e50 100644 --- a/crypto/rc4/Makefile +++ b/crypto/rc4/Makefile @@ -58,7 +58,7 @@ rx86-cof.s: asm/rc4-586.pl ../perlasm/x86asm.pl rx86-out.s: asm/rc4-586.pl ../perlasm/x86asm.pl (cd asm; $(PERL) rc4-586.pl a.out $(CFLAGS) > ../$@) -rc4-amd64.s: asm/rc4-amd64.pl; $(PERL) asm/rc4-amd64.pl $@ +rc4-x86_64.s: asm/rc4-x86_64.pl; $(PERL) asm/rc4-x86_64.pl $@ rc4-ia64.s: asm/rc4-ia64.S $(CC) $(CFLAGS) -E asm/rc4-ia64.S > $@ diff --git a/crypto/rc4/asm/rc4-amd64.pl b/crypto/rc4/asm/rc4-amd64.pl deleted file mode 100755 index d425318afe..0000000000 --- a/crypto/rc4/asm/rc4-amd64.pl +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. Rights for redistribution and usage in source and binary -# forms are granted according to the OpenSSL license. -# ==================================================================== -# -# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in -# "hand-coded assembler"] doesn't stand for the whole improvement -# coefficient. It turned out that eliminating RC4_CHAR from config -# line results in ~40% improvement (yes, even for C implementation). -# Presumably it has everything to do with AMD cache architecture and -# RAW or whatever penalties. Once again! The module *requires* config -# line *without* RC4_CHAR! As for coding "secret," I bet on partial -# register arithmetics. For example instead of 'inc %r8; and $255,%r8' -# I simply 'inc %r8b'. Even though optimization manual discourages -# to operate on partial registers, it turned out to be the best bet. -# At least for AMD... How IA32E would perform remains to be seen... - -# As was shown by Marc Bevand reordering of couple of load operations -# results in even higher performance gain of 3.3x:-) At least on -# Opteron... For reference, 1x in this case is RC4_CHAR C-code -# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. -# Latter means that if you want to *estimate* what to expect from -# *your* CPU, then multiply 54 by 3.3 and clock frequency in GHz. - -# Intel P4 EM64T core was found to run the AMD64 code really slow... -# The only way to achieve comparable performance on P4 is to keep -# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to -# compose blended code, which would perform even within 30% marginal -# on either AMD and Intel platforms, I implement both cases. See -# rc4_skey.c for further details... This applies to 0.9.8 and later. -# In 0.9.7 context RC4_CHAR codepath is never engaged and ~70 bytes -# of code remain redundant. - -$output=shift; -open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output"; - -$dat="%rdi"; # arg1 -$len="%rsi"; # arg2 -$inp="%rdx"; # arg3 -$out="%rcx"; # arg4 - -$XX="%r10"; -$TX="%r8"; -$YY="%r11"; -$TY="%r9"; - -$code=<<___; -.text - -.globl RC4 -.type RC4,\@function,4 -.align 16 -RC4: or $len,$len - jne .Lentry - ret -.Lentry: - add \$8,$dat - movl -8($dat),$XX#d - movl -4($dat),$YY#d - cmpl \$-1,256($dat) - je .LRC4_CHAR - test \$-8,$len - jz .Lloop1 -.align 16 -.Lloop8: - inc $XX#b - movl ($dat,$XX,4),$TX#d - add $TX#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TX#b,$TY#b - inc $XX#b - movl ($dat,$XX,4),$TX#d - movb ($dat,$TY,4),%al -___ -for ($i=1;$i<=6;$i++) { -$code.=<<___; - add $TX#b,$YY#b - ror \$8,%rax - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TX#b,$TY#b - inc $XX#b - movl ($dat,$XX,4),$TX#d - movb ($dat,$TY,4),%al -___ -} -$code.=<<___; - add $TX#b,$YY#b - ror \$8,%rax - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - sub \$8,$len - add $TY#b,$TX#b - movb ($dat,$TX,4),%al - ror \$8,%rax - add \$8,$inp - add \$8,$out - - xor -8($inp),%rax - mov %rax,-8($out) - - test \$-8,$len - jnz .Lloop8 - cmp \$0,$len - jne .Lloop1 -.Lexit: - movl $XX#d,-8($dat) - movl $YY#d,-4($dat) - ret -.align 16 -.Lloop1: - movzb ($inp),%eax - inc $XX#b - movl ($dat,$XX,4),$TX#d - add $TX#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX#d,($dat,$YY,4) - movl $TY#d,($dat,$XX,4) - add $TY#b,$TX#b - movl ($dat,$TX,4),$TY#d - xor $TY,%rax - inc $inp - movb %al,($out) - inc $out - dec $len - jnz .Lloop1 - jmp .Lexit - -.align 16 -.LRC4_CHAR: - add \$1,$XX#b - movzb ($dat,$XX),$TX#d - add $TX#b,$YY#b - movzb ($dat,$YY),$TY#d - movb $TX#b,($dat,$YY) - movb $TY#b,($dat,$XX) - add $TX#b,$TY#b - movzb ($dat,$TY),$TY#d - xorb ($inp),$TY#b - movb $TY#b,($out) - lea 1($inp),$inp - lea 1($out),$out - sub \$1,$len - jnz .LRC4_CHAR - jmp .Lexit -.size RC4,.-RC4 -___ - -$code =~ s/#([bwd])/$1/gm; - -print $code; - -close STDOUT; diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl new file mode 100755 index 0000000000..89239400d4 --- /dev/null +++ b/crypto/rc4/asm/rc4-x86_64.pl @@ -0,0 +1,238 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. Rights for redistribution and usage in source and binary +# forms are granted according to the OpenSSL license. +# ==================================================================== +# +# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in +# "hand-coded assembler"] doesn't stand for the whole improvement +# coefficient. It turned out that eliminating RC4_CHAR from config +# line results in ~40% improvement (yes, even for C implementation). +# Presumably it has everything to do with AMD cache architecture and +# RAW or whatever penalties. Once again! The module *requires* config +# line *without* RC4_CHAR! As for coding "secret," I bet on partial +# register arithmetics. For example instead of 'inc %r8; and $255,%r8' +# I simply 'inc %r8b'. Even though optimization manual discourages +# to operate on partial registers, it turned out to be the best bet. +# At least for AMD... How IA32E would perform remains to be seen... + +# As was shown by Marc Bevand reordering of couple of load operations +# results in even higher performance gain of 3.3x:-) At least on +# Opteron... For reference, 1x in this case is RC4_CHAR C-code +# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. +# Latter means that if you want to *estimate* what to expect from +# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. + +# Intel P4 EM64T core was found to run the AMD64 code really slow... +# The only way to achieve comparable performance on P4 was to keep +# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to +# compose blended code, which would perform even within 30% marginal +# on either AMD and Intel platforms, I implement both cases. See +# rc4_skey.c for further details... + +# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing +# those with add/sub results in 50% performance improvement of folded +# loop... + +# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T +# performance by >30% [unlike P4 32-bit case that is]. But this is +# provided that loads are reordered even more aggressively! Both code +# pathes, AMD64 and EM64T, reorder loads in essentially same manner +# as my IA-64 implementation. On Opteron this resulted in modest 5% +# improvement [I had to test it], while final Intel P4 performance +# achieves respectful 432MBps on 2.8GHz processor now. For reference. +# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than +# RC4_INT code-path. While if executed on Opteron, it's is only 25% +# slower than the latter... + +$output=shift; +open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output"; + +$dat="%rdi"; # arg1 +$len="%rsi"; # arg2 +$inp="%rdx"; # arg3 +$out="%rcx"; # arg4 + +@XX=("%r8","%r10"); +@TX=("%r9","%r11"); +$YY="%r12"; +$TY="%r13"; + +$code=<<___; +.text + +.globl RC4 +.type RC4,\@function,4 +.align 16 +RC4: or $len,$len + jne .Lentry + ret +.Lentry: + push %r12 + push %r13 + + add \$8,$dat + movl -8($dat),$XX[0]#d + movl -4($dat),$YY#d + cmpl \$-1,256($dat) + je .LRC4_CHAR + inc $XX[0]#b + movl ($dat,$XX[0],4),$TX[0]#d + test \$-8,$len + jz .Lloop1 + jmp .Lloop8 +.align 16 +.Lloop8: +___ +for ($i=0;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + mov $XX[0],$XX[1] + movl ($dat,$YY,4),$TY#d + ror \$8,%rax # ror is redundant when $i=0 + inc $XX[1]#b + movl ($dat,$XX[1],4),$TX[1]#d + cmp $XX[1],$YY + movl $TX[0]#d,($dat,$YY,4) + cmove $TX[0],$TX[1] + movl $TY#d,($dat,$XX[0],4) + add $TX[0]#b,$TY#b + movb ($dat,$TY,4),%al +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + ror \$8,%rax + sub \$8,$len + + xor ($inp),%rax + add \$8,$inp + mov %rax,($out) + add \$8,$out + + test \$-8,$len + jnz .Lloop8 + cmp \$0,$len + jne .Lloop1 +___ +$code.=<<___; +.Lexit: + sub \$1,$XX[0]#b + movl $XX[0]#d,-8($dat) + movl $YY#d,-4($dat) + + pop %r13 + pop %r12 + ret +.align 16 +.Lloop1: + add $TX[0]#b,$YY#b + movl ($dat,$YY,4),$TY#d + movl $TX[0]#d,($dat,$YY,4) + movl $TY#d,($dat,$XX[0],4) + add $TY#b,$TX[0]#b + inc $XX[0]#b + movl ($dat,$TX[0],4),$TY#d + movl ($dat,$XX[0],4),$TX[0]#d + xorb ($inp),$TY#b + inc $inp + movb $TY#b,($out) + inc $out + dec $len + jnz .Lloop1 + jmp .Lexit + +.align 16 +.LRC4_CHAR: + add \$1,$XX[0]#b + movzb ($dat,$XX[0]),$TX[0]#d + test \$-8,$len + jz .Lcloop1 + push %rbx + jmp .Lcloop8 +.align 16 +.Lcloop8: + mov ($inp),%eax + mov 4($inp),%ebx +___ +# unroll 2x4-wise, because 64-bit rotates kill Intel P4... +for ($i=0;$i<4;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1]#d + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%al + ror \$8,%eax +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +for ($i=4;$i<8;$i++) { +$code.=<<___; + add $TX[0]#b,$YY#b + lea 1($XX[0]),$XX[1] + movzb ($dat,$YY),$TY#d + movzb $XX[1]#b,$XX[1] + movzb ($dat,$XX[1]),$TX[1]#d + movb $TX[0]#b,($dat,$YY) + cmp $XX[1],$YY + movb $TY#b,($dat,$XX[0]) + jne .Lcmov$i # Intel cmov is sloooow... + mov $TX[0],$TX[1] +.Lcmov$i: + add $TX[0]#b,$TY#b + xor ($dat,$TY),%bl + ror \$8,%ebx +___ +push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers +} +$code.=<<___; + lea -8($len),$len + mov %eax,($out) + lea 8($inp),$inp + mov %ebx,4($out) + lea 8($out),$out + + test \$-8,$len + jnz .Lcloop8 + pop %rbx + cmp \$0,$len + jne .Lcloop1 + jmp .Lexit +___ +$code.=<<___; +.align 16 +.Lcloop1: + add $TX[0]#b,$YY#b + movzb ($dat,$YY),$TY#d + movb $TX[0]#b,($dat,$YY) + movb $TY#b,($dat,$XX[0]) + add $TX[0]#b,$TY#b + add \$1,$XX[0]#b + movzb ($dat,$TY),$TY#d + movzb ($dat,$XX[0]),$TX[0]#d + xorb ($inp),$TY#b + lea 1($inp),$inp + movb $TY#b,($out) + lea 1($out),$out + sub \$1,$len + jnz .Lcloop1 + jmp .Lexit +.size RC4,.-RC4 +___ + +$code =~ s/#([bwd])/$1/gm; + +print $code; + +close STDOUT; diff --git a/crypto/x86_64cpuid.pl b/crypto/x86_64cpuid.pl new file mode 100644 index 0000000000..777d557783 --- /dev/null +++ b/crypto/x86_64cpuid.pl @@ -0,0 +1,138 @@ +#!/usr/bin/env perl + +$output=shift; +$win64a=1 if ($output =~ /win64a\.[s|asm]/); +open STDOUT,">$output" || die "can't open $output: $!"; + +print<<___ if(defined($win64a)); +_TEXT SEGMENT +PUBLIC OPENSSL_rdtsc +ALIGN 16 +OPENSSL_rdtsc PROC + rdtsc + shl rdx,32 + or rax,rdx + ret +OPENSSL_rdtsc ENDP + +PUBLIC OPENSSL_atomic_add +ALIGN 16 +OPENSSL_atomic_add PROC + mov eax,DWORD PTR[rcx] +\$Lspin: lea r8,DWORD PTR[rdx+rax] +lock cmpxchg DWORD PTR[rcx],r8d + jne \$Lspin + mov eax,r8d + cdqe + ret +OPENSSL_atomic_add ENDP + +PUBLIC OPENSSL_wipe_cpu +ALIGN 16 +OPENSSL_wipe_cpu PROC + pxor xmm0,xmm0 + pxor xmm1,xmm1 + pxor xmm2,xmm2 + pxor xmm3,xmm3 + pxor xmm4,xmm4 + pxor xmm5,xmm5 + xor rcx,rcx + xor rdx,rdx + xor r8,r8 + xor r9,r9 + xor r10,r10 + xor r11,r11 + lea rax,QWORD PTR[rsp+8] + ret +OPENSSL_wipe_cpu ENDP + +OPENSSL_ia32_cpuid PROC + mov r8,rbx + mov eax,1 + cpuid + shl rcx,32 + mov eax,edx + mov rbx,r8 + or rax,rcx + ret +OPENSSL_ia32_cpuid ENDP +_TEXT ENDS + +CRT\$XIU SEGMENT +EXTRN OPENSSL_cpuid_setup:PROC +DQ OPENSSL_cpuid_setup +CRT\$XIU ENDS +END +___ +print<<___ if(!defined($win64a)); +.text +.globl OPENSSL_rdtsc +.align 16 +OPENSSL_rdtsc: + rdtsc + shl \$32,%rdx + or %rdx,%rax + ret +.size OPENSSL_rdtsc,.-OPENSSL_rdtsc + +.globl OPENSSL_atomic_add +.type OPENSSL_atomic_add,\@function +.align 16 +OPENSSL_atomic_add: + movl (%rdi),%eax +.Lspin: lea (%rsi,%rax),%r8 +lock; cmpxchg %r8d,(%rdi) + jne .Lspin + mov %r8d,%eax + cdqe + ret +.size OPENSSL_atomic_add,.-OPENSSL_atomic_add + +.globl OPENSSL_wipe_cpu +.type OPENSSL_wipe_cpu,\@function +.align 16 +OPENSSL_wipe_cpu: + pxor %xmm0,%xmm0 + pxor %xmm1,%xmm1 + pxor %xmm2,%xmm2 + pxor %xmm3,%xmm3 + pxor %xmm4,%xmm4 + pxor %xmm5,%xmm5 + pxor %xmm6,%xmm6 + pxor %xmm7,%xmm7 + pxor %xmm8,%xmm8 + pxor %xmm9,%xmm9 + pxor %xmm10,%xmm10 + pxor %xmm11,%xmm11 + pxor %xmm12,%xmm12 + pxor %xmm13,%xmm13 + pxor %xmm14,%xmm14 + pxor %xmm15,%xmm15 + xor %rcx,%rcx + xor %rdx,%rdx + xor %rsi,%rsi + xor %rdi,%rdi + xor %r8,%r8 + xor %r9,%r9 + xor %r10,%r10 + xor %r11,%r11 + lea 8(%rsp),%rax + ret +.size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu + +.globl OPENSSL_ia32_cpuid +.align 16 +OPENSSL_ia32_cpuid: + mov %rbx,%r8 + mov \$1,%eax + cpuid + shl \$32,%rcx + mov %edx,%eax + mov %r8,%rbx + or %rcx,%rax + ret +.size OPENSSL_ia32_cpuid,.-OPENSSL_ia32_cpuid + +.section .init + call OPENSSL_cpuid_setup +___ -- cgit v1.2.3