summaryrefslogtreecommitdiffstats
path: root/engines
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2011-09-06 20:45:36 +0000
committerAndy Polyakov <appro@openssl.org>2011-09-06 20:45:36 +0000
commited28aef8b455be436f252dfceac49a958a92e53b (patch)
treecd34319f2128612e24d78d06571cdc4c24eab614 /engines
parent0486cce653b62d26a8ca37ac12f69f1a6b998844 (diff)
Padlock engine: make it independent of inline assembler.
Diffstat (limited to 'engines')
-rw-r--r--engines/Makefile13
-rw-r--r--engines/asm/e_padlock-x86.pl424
-rw-r--r--engines/asm/e_padlock-x86_86.pl327
-rw-r--r--engines/e_padlock.c969
4 files changed, 943 insertions, 790 deletions
diff --git a/engines/Makefile b/engines/Makefile
index 65c4991135..190d4a1cdd 100644
--- a/engines/Makefile
+++ b/engines/Makefile
@@ -16,10 +16,13 @@ RECURSIVE_MAKE= [ -z "$(ENGDIRS)" ] || for i in $(ENGDIRS) ; do \
$(MAKE) -e TOP=../.. DIR=$$i $$target ) || exit 1; \
done;
+ENGINES_ASM_OBJ=
+
PEX_LIBS=
EX_LIBS=
CFLAGS= $(INCLUDES) $(CFLAG)
+ASFLAGS= $(INCLUDES) $(ASFLAG)
GENERAL=Makefile engines.com install.com engine_vector.mar
TEST=
@@ -49,7 +52,8 @@ LIBOBJ= e_4758cca.o \
e_sureware.o \
e_ubsec.o \
e_padlock.o \
- e_capi.o
+ e_capi.o \
+ $(ENGINES_ASM_OBJ)
SRC= $(LIBSRC)
@@ -77,7 +81,7 @@ lib: $(LIBOBJ)
set -e; \
for l in $(LIBNAMES); do \
$(MAKE) -f ../Makefile.shared -e \
- LIBNAME=$$l LIBEXTRAS=e_$$l.o \
+ LIBNAME=$$l LIBEXTRAS="e_$$l*.o" \
LIBDEPS='-L.. -lcrypto $(EX_LIBS)' \
link_o.$(SHLIB_TARGET); \
done; \
@@ -87,6 +91,11 @@ lib: $(LIBOBJ)
fi; \
touch lib
+e_padlock-x86.s: asm/e_padlock-x86.pl
+ $(PERL) asm/e_padlock-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+e_padlock-x86_64.s: asm/e_padlock-x86_64.pl
+ $(PERL) asm/e_padlock-x86_64.pl $(PERLASM_SCHEME) > $@
+
subdirs:
echo $(EDIRS)
@target=all; $(RECURSIVE_MAKE)
diff --git a/engines/asm/e_padlock-x86.pl b/engines/asm/e_padlock-x86.pl
new file mode 100644
index 0000000000..4a00d9d687
--- /dev/null
+++ b/engines/asm/e_padlock-x86.pl
@@ -0,0 +1,424 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# September 2011
+#
+# Assembler helpers for Padlock engine. Compared to original engine
+# version relying on inline assembler and compiled with gcc 3.4.6 it
+# was measured to provide ~100% improvement on misaligned data in ECB
+# mode and ~75% in CBC mode. For aligned data improvement can be
+# observed for short inputs only, e.g. 45% for 64-byte messages in
+# ECB mode, 20% in CBC. Difference in performance for aligned vs.
+# misaligned data depends on misalignment and is either ~1.8x or
+# ~2.9x. These are approximately same factors as for hardware support,
+# so there is little reason to rely on the latter. It might actually
+# hurt performance in mixture of aligned and misaligned buffers,
+# because a) if you choose to flip 'align' flag on per-buffer basis,
+# then you'd have to reload key context; b) if you choose to set
+# 'align' flag permanently, it limits performance for aligned data
+# to ~1/2. All results were collected on 1.5GHz C7.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../crypto/perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$PADLOCK_CHUNK=512; # Must be a power of 2 larger than 16
+
+$ctx="edx";
+$out="edi";
+$inp="esi";
+$len="ecx";
+$chunk="ebx";
+
+&function_begin_B("padlock_capability");
+ &push ("ebx");
+ &pushf ();
+ &pop ("eax");
+ &mov ("ecx","eax");
+ &xor ("eax",1<<21);
+ &push ("eax");
+ &popf ();
+ &pushf ();
+ &pop ("eax");
+ &xor ("ecx","eax");
+ &xor ("eax","eax");
+ &bt ("ecx",21);
+ &jnc (&label("noluck"));
+ &cpuid ();
+ &xor ("eax","eax");
+ &cmp ("ebx","0x".unpack("H*",'tneC'));
+ &jne (&label("noluck"));
+ &cmp ("edx","0x".unpack("H*",'Hrua'));
+ &jne (&label("noluck"));
+ &cmp ("ecx","0x".unpack("H*",'slua'));
+ &jne (&label("noluck"));
+ &mov ("eax",0xC0000000);
+ &cpuid ();
+ &mov ("edx","eax");
+ &xor ("eax","eax");
+ &cmp ("edx",0xC0000001);
+ &jb (&label("noluck"));
+ &mov ("eax",1);
+ &cpuid ();
+ &or ("eax",0x0f);
+ &xor ("ebx","ebx");
+ &and ("eax",0x0fff);
+ &cmp ("eax",0x06ff); # check for Nano
+ &sete ("bl");
+ &mov ("eax",0xC0000001);
+ &push ("ebx");
+ &cpuid ();
+ &pop ("ebx");
+ &mov ("eax","edx");
+ &shl ("ebx",4); # bit#4 denotes Nano
+ &and ("eax",0xffffffef);
+ &or ("eax","ebx")
+&set_label("noluck");
+ &pop ("ebx");
+ &ret ();
+&function_end_B("padlock_capability")
+
+&function_begin_B("padlock_key_bswap");
+ &mov ("edx",&wparam(0));
+ &mov ("ecx",&DWP(240,"edx"));
+&set_label("bswap_loop");
+ &mov ("eax",&DWP(0,"edx"));
+ &bswap ("eax");
+ &mov (&DWP(0,"edx"),"eax");
+ &lea ("edx",&DWP(4,"edx"));
+ &sub ("ecx",1);
+ &jnz (&label("bswap_loop"));
+ &ret ();
+&function_end_B("padlock_key_bswap");
+
+# This is heuristic key context tracing. At first one
+# believes that one should use atomic swap instructions,
+# but it's not actually necessary. Point is that if
+# padlock_saved_context was changed by another thread
+# after we've read it and before we compare it with ctx,
+# our key *shall* be reloaded upon thread context switch
+# and we are therefore set in either case...
+&static_label("padlock_saved_context");
+
+&function_begin_B("padlock_verify_context");
+ &mov ($ctx,&wparam(0));
+ &lea ("eax",&DWP("padlock_saved_context-".&label("verify_pic_point")));
+ &pushf ();
+ &call ("_padlock_verify_ctx");
+&set_label("verify_pic_point");
+ &lea ("esp",&DWP(4,"esp"));
+ &ret ();
+&function_end_B("padlock_verify_context");
+
+&function_begin_B("_padlock_verify_ctx");
+ &add ("eax",&DWP(0,"esp")); # &padlock_saved_context
+ &bt (&DWP(4,"esp"),30); # eflags
+ &jnc (&label("verified"));
+ &cmp ($ctx,&DWP(0,"eax"));
+ &je (&label("verified"));
+ &pushf ();
+ &popf ();
+&set_label("verified");
+ &mov (&DWP(0,"eax"),$ctx);
+ &ret ();
+&function_end_B("_padlock_verify_ctx");
+
+&function_begin_B("padlock_reload_key");
+ &pushf ();
+ &popf ();
+ &ret ();
+&function_end_B("padlock_reload_key");
+
+&function_begin_B("padlock_aes_block");
+ &push ("edi");
+ &push ("esi");
+ &push ("ebx");
+ &mov ($out,&wparam(0)); # must be 16-byte aligned
+ &mov ($inp,&wparam(1)); # must be 16-byte aligned
+ &mov ($ctx,&wparam(2));
+ &mov ($len,1);
+ &lea ("ebx",&DWP(32,$ctx)); # key
+ &lea ($ctx,&DWP(16,$ctx)); # control word
+ &data_byte(0xf3,0x0f,0xa7,0xc8); # rep xcryptecb
+ &pop ("ebx");
+ &pop ("esi");
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_aes_block");
+
+sub generate_mode {
+my ($mode,$opcode) = @_;
+# int padlock_$mode_encrypt(void *out, const void *inp,
+# struct padlock_cipher_data *ctx, size_t len);
+&function_begin("padlock_${mode}_encrypt");
+ &mov ($out,&wparam(0));
+ &mov ($inp,&wparam(1));
+ &mov ($ctx,&wparam(2));
+ &mov ($len,&wparam(3));
+ &test ($ctx,15);
+ &jnz (&label("${mode}_abort"));
+ &test ($len,15);
+ &jnz (&label("${mode}_abort"));
+ &lea ("eax",&DWP("padlock_saved_context-".&label("${mode}_pic_point")));
+ &pushf ();
+ &cld ();
+ &call ("_padlock_verify_ctx");
+&set_label("${mode}_pic_point");
+ &lea ($ctx,&DWP(16,$ctx)); # control word
+ &xor ("eax","eax");
+ if ($mode eq "ctr16") {
+ &movdqa ("xmm0",&QWP(-16,$ctx));# load iv
+ } else {
+ &xor ("ebx","ebx");
+ &test (&DWP(0,$ctx),1<<5); # align bit in control word
+ &jnz (&label("${mode}_aligned"));
+ &test ($out,0x0f);
+ &setz ("al"); # !out_misaligned
+ &test ($inp,0x0f);
+ &setz ("bl"); # !inp_misaligned
+ &test ("eax","ebx");
+ &jnz (&label("${mode}_aligned"));
+ &neg ("eax");
+ }
+ &mov ($chunk,$PADLOCK_CHUNK);
+ &not ("eax"); # out_misaligned?-1:0
+ &lea ("ebp",&DWP(-24,"esp"));
+ &cmp ($len,$chunk);
+ &cmovc ($chunk,$len); # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len
+ &and ("eax",$chunk); # out_misaligned?chunk:0
+ &mov ($chunk,$len);
+ &neg ("eax");
+ &and ($chunk,$PADLOCK_CHUNK-1); # chunk=len%PADLOCK_CHUNK
+ &lea ("esp",&DWP(0,"eax","ebp")); # alloca
+ &and ("esp",-16);
+ &jmp (&label("${mode}_loop"));
+
+&set_label("${mode}_loop",16);
+ &mov (&DWP(0,"ebp"),$out); # save parameters
+ &mov (&DWP(4,"ebp"),$inp);
+ &mov (&DWP(8,"ebp"),$len);
+ &mov ($len,$chunk);
+ &mov (&DWP(12,"ebp"),$chunk); # chunk
+ if ($mode eq "ctr16") {
+ &pextrw ("ecx","xmm0",7); # borrow $len
+ &mov ($inp,1);
+ &xor ($out,$out);
+ &xchg ("ch","cl");
+&set_label("${mode}_prepare");
+ &movdqa (&QWP(0,"esp",$out),"xmm0");
+ &lea ("eax",&DWP(0,"ecx",$inp));
+ &xchg ("ah","al");
+ &lea ($out,&DWP(16,$out));
+ &pinsrw ("xmm0","eax",7);
+ &lea ($inp,&DWP(1,$inp));
+ &cmp ($out,$chunk);
+ &jb (&label("${mode}_prepare"));
+
+ &lea ($inp,&DWP(0,"esp"));
+ &lea ($out,&DWP(0,"esp"));
+ &mov ($len,$chunk);
+ } else {
+ &test ($out,0x0f); # out_misaligned
+ &cmovnz ($out,"esp");
+ &test ($inp,0x0f); # inp_misaligned
+ &jz (&label("${mode}_inp_aligned"));
+ &shr ($len,2);
+ &data_byte(0xf3,0xa5); # rep movsl
+ &sub ($out,$chunk);
+ &mov ($len,$chunk);
+ &mov ($inp,$out);
+&set_label("${mode}_inp_aligned");
+ }
+ &lea ("eax",&DWP(-16,$ctx)); # ivp
+ &lea ("ebx",&DWP(16,$ctx)); # key
+ &shr ($len,4); # len/=AES_BLOCK_SIZE
+ &data_byte(0xf3,0x0f,0xa7,$opcode); # rep xcrypt*
+ if ($mode !~ /ecb|ctr/) {
+ &movdqa ("xmm0",&QWP(0,"eax"));
+ &movdqa (&DWP(-16,$ctx),"xmm0"); # copy [or refresh] iv
+ }
+ &mov ($out,&DWP(0,"ebp")); # restore parameters
+ &mov ($chunk,&DWP(12,"ebp"));
+ if ($mode eq "ctr16") {
+ &mov ($inp,&DWP(4,"ebp"));
+ &xor ($len,$len);
+&set_label("${mode}_xor");
+ &movdqu ("xmm1",&QWP(0,$inp,$len));
+ &lea ($len,&DWP(16,$len));
+ &pxor ("xmm1",&QWP(-16,"esp",$len));
+ &movdqu (&QWP(-16,$out,$len),"xmm1");
+ &cmp ($len,$chunk);
+ &jb (&label("${mode}_xor"));
+ } else {
+ &test ($out,0x0f);
+ &jz (&label("${mode}_out_aligned"));
+ &mov ($len,$chunk);
+ &shr ($len,2);
+ &lea ($inp,&DWP(0,"esp"));
+ &data_byte(0xf3,0xa5); # rep movsl
+ &sub ($out,$chunk);
+&set_label("${mode}_out_aligned");
+ &mov ($inp,&DWP(4,"ebp"));
+ }
+ &mov ($len,&DWP(8,"ebp"));
+ &add ($out,$chunk);
+ &add ($inp,$chunk);
+ &sub ($len,$chunk);
+ &mov ($chunk,$PADLOCK_CHUNK);
+ &jnz (&label("${mode}_loop"));
+ if ($mode eq "ctr16") {
+ &movdqa (&QWP(-16,$ctx),"xmm0"); # write out iv
+ &pxor ("xmm0","xmm0");
+ &pxor ("xmm1","xmm1");
+ } else {
+ &test ($out,0x0f); # out_misaligned
+ &jz (&label("${mode}_done"));
+ }
+ &mov ($len,"ebp");
+ &mov ($out,"esp");
+ &sub ($len,"esp");
+ &xor ("eax","eax");
+ &shr ($len,2);
+ &data_byte(0xf3,0xab); # rep stosl
+&set_label("${mode}_done");
+ &lea ("esp",&DWP(24,"ebp"));
+ if ($mode ne "ctr16") {
+ &jmp (&label("${mode}_exit"));
+
+&set_label("${mode}_aligned",16);
+ &lea ("eax",&DWP(-16,$ctx)); # ivp
+ &lea ("ebx",&DWP(16,$ctx)); # key
+ &shr ($len,4); # len/=AES_BLOCK_SIZE
+ &data_byte(0xf3,0x0f,0xa7,$opcode); # rep xcrypt*
+ if ($mode ne "ecb") {
+ &movdqa ("xmm0",&QWP(0,"eax"));
+ &movdqa (&DWP(-16,$ctx),"xmm0"); # copy [or refresh] iv
+ }
+&set_label("${mode}_exit"); }
+ &mov ("eax",1);
+ &lea ("esp",&DWP(4,"esp")); # popf
+&set_label("${mode}_abort");
+&function_end("padlock_${mode}_encrypt");
+}
+
+&generate_mode("ecb",0xc8);
+&generate_mode("cbc",0xd0);
+&generate_mode("cfb",0xe0);
+&generate_mode("ofb",0xe8);
+&generate_mode("ctr16",0xc8); # yes, it implements own ctr with ecb opcode,
+ # because hardware ctr was introduced later
+ # and even has errata on certain CPU stepping.
+ # own implementation *always* works...
+
+&function_begin_B("padlock_xstore");
+ &push ("edi");
+ &mov ("edi",&wparam(0));
+ &mov ("edx",&wparam(1));
+ &data_byte(0x0f,0xa7,0xc0); # xstore
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_xstore");
+
+&function_begin_B("_win32_segv_handler");
+ &mov ("eax",1); # ExceptionContinueSearch
+ &mov ("edx",&wparam(0)); # *ExceptionRecord
+ &mov ("ecx",&wparam(2)); # *ContextRecord
+ &cmp (&DWP(0,"edx"),0xC0000005) # ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION
+ &jne (&label("ret"));
+ &add (&DWP(184,"ecx"),4); # skip over rep sha*
+ &mov ("eax",0); # ExceptionContinueExecution
+&set_label("ret");
+ &ret ();
+&function_end_B("_win32_segv_handler");
+&safeseh("_win32_segv_handler") if ($::win32);
+
+&function_begin_B("padlock_sha1_oneshot");
+ &push ("edi");
+ &push ("esi");
+ &xor ("eax","eax");
+ if ($::win32 or $::coff) {
+ &push (&::islabel("_win32_segv_handler"));
+ &data_byte(0x64,0xff,0x30); # push %fs:(%eax)
+ &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax)
+ }
+ &mov ("edi",&wparam(0));
+ &mov ("esi",&wparam(1));
+ &mov ("ecx",&wparam(2));
+ &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1
+ if ($::win32 or $::coff) {
+ &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0
+ &lea ("esp",&DWP(4,"esp"));
+ }
+ &pop ("esi");
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_sha1_oneshot");
+
+&function_begin_B("padlock_sha1");
+ &push ("edi");
+ &push ("esi");
+ &mov ("eax",-1);
+ &mov ("edi",&wparam(0));
+ &mov ("esi",&wparam(1));
+ &mov ("ecx",&wparam(2));
+ &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1
+ &pop ("esi");
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_sha1");
+
+&function_begin_B("padlock_sha256_oneshot");
+ &push ("edi");
+ &push ("esi");
+ &xor ("eax","eax");
+ if ($::win32 or $::coff) {
+ &push (&::islabel("_win32_segv_handler"));
+ &data_byte(0x64,0xff,0x30); # push %fs:(%eax)
+ &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax)
+ }
+ &mov ("edi",&wparam(0));
+ &mov ("esi",&wparam(1));
+ &mov ("ecx",&wparam(2));
+ &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256
+ if ($::win32 or $::coff) {
+ &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0
+ &lea ("esp",&DWP(4,"esp"));
+ }
+ &pop ("esi");
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_sha256_oneshot");
+
+&function_begin_B("padlock_sha256");
+ &push ("edi");
+ &push ("esi");
+ &mov ("eax",-1);
+ &mov ("edi",&wparam(0));
+ &mov ("esi",&wparam(1));
+ &mov ("ecx",&wparam(2));
+ &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256
+ &pop ("esi");
+ &pop ("edi");
+ &ret ();
+&function_end_B("padlock_sha256");
+
+&asciz ("VIA Padlock x86 module, CRYPTOGAMS by <appro\@openssl.org>");
+&align (16);
+
+&dataseg();
+# Essentially this variable belongs in thread local storage.
+# Having this variable global on the other hand can only cause
+# few bogus key reloads [if any at all on signle-CPU system],
+# so we accept the panalty...
+&set_label("padlock_saved_context",4);
+&data_word(0);
+
+&asm_finish();
diff --git a/engines/asm/e_padlock-x86_86.pl b/engines/asm/e_padlock-x86_86.pl
new file mode 100644
index 0000000000..56cabc0f15
--- /dev/null
+++ b/engines/asm/e_padlock-x86_86.pl
@@ -0,0 +1,327 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# September 2011
+#
+# Assembler helpers for Padlock engine.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../crypto/perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$code=".text\n";
+
+$PADLOCK_CHUNK=512; # Must be a power of 2 larger than 16
+
+$ctx="%rdx";
+$out="%rdi";
+$inp="%rsi";
+$len="%rcx";
+$chunk="%rbx";
+
+($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+
+$code.=<<___;
+.globl padlock_capability
+.type padlock_capability,\@abi-omnipotent
+.align 16
+padlock_capability:
+ mov %rbx,%r8
+ xor %eax,%eax
+ cpuid
+ xor %eax,%eax
+ cmp \$`"0x".unpack("H*",'tneC')`,%ebx
+ jne .Lnoluck
+ cmp \$`"0x".unpack("H*",'Hrua')`,%edx
+ jne .Lnoluck
+ cmp \$`"0x".unpack("H*",'slua')`,%ecx
+ jne .Lnoluck
+ mov \$0xC0000000,%eax
+ cpuid
+ mov %eax,%edx
+ xor %eax,%eax
+ cmp \$0xC0000001,%edx
+ jb .Lnoluck
+ mov \$0xC0000001,%eax
+ cpuid
+ mov %edx,%eax
+ and \$0xffffffef,%eax
+ or \$0x10,%eax # set Nano bit#4
+.Lnoluck:
+ mov %r8,%rbx
+ ret
+.size padlock_capability,.-padlock_capability
+
+.globl padlock_key_bswap
+.type padlock_key_bswap,\@abi-omnipotent,0
+.align 16
+padlock_key_bswap:
+ mov 240($arg1),%edx
+.Lbswap_loop:
+ mov ($arg1),%eax
+ bswap %eax
+ mov %eax,($arg1)
+ lea 4($arg1),$arg1
+ sub \$1,%edx
+ jnz .Lbswap_loop
+ ret
+.size padlock_key_bswap,.-padlock_key_bswap
+
+.globl padlock_verify_context
+.type padlock_verify_context,\@abi-omnipotent
+.align 16
+padlock_verify_context:
+ mov $arg1,$ctx
+ pushf
+ lea .Lpadlock_saved_context(%rip),%rax
+ call _padlock_verify_ctx
+ lea 8(%rsp),%rsp
+ ret
+.size padlock_verify_context,.-padlock_verify_context
+
+.type _padlock_verify_ctx,\@abi-omnipotent
+.align 16
+_padlock_verify_ctx:
+ mov 8(%rsp),%r8
+ bt \$30,%r8
+ jnc .Lverified
+ cmp (%rax),$ctx
+ je .Lverified
+ pushf
+ popf
+.Lverified:
+ mov $ctx,(%rax)
+ ret
+.size _padlock_verify_ctx,.-_padlock_verify_ctx
+
+.globl padlock_reload_key
+.type padlock_reload_key,\@abi-omnipotent
+.align 16
+padlock_reload_key:
+ pushf
+ popf
+ ret
+.size padlock_reload_key,.-padlock_reload_key
+
+.globl padlock_aes_block
+.type padlock_aes_block,\@function,3
+.align 16
+padlock_aes_block:
+ mov %rbx,%r8
+ mov \$1,$len
+ lea 32($ctx),%rbx # key
+ lea 16($ctx),$ctx # control word
+ .byte 0xf3,0x0f,0xa7,0xc8 # rep xcryptecb
+ mov %r8,%rbx
+ ret
+.size padlock_aes_block,.-padlock_aes_block
+
+.globl padlock_xstore
+.type padlock_xstore,\@function,2
+.align 16
+padlock_xstore:
+ mov %esi,%edx
+ .byte 0x0f,0xa7,0xc0 # xstore
+ ret
+.size padlock_xstore,.-padlock_xstore
+
+.globl padlock_sha1_oneshot
+.type padlock_sha1_oneshot,\@function,3
+.align 16
+padlock_sha1_oneshot:
+ xor %rax,%rax
+ mov %rdx,%rcx
+ .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
+ ret
+.size padlock_sha1_oneshot,.-padlock_sha1_oneshot
+
+.globl padlock_sha1
+.type padlock_sha1,\@function,3
+.align 16
+padlock_sha1:
+ mov \$-1,%rax
+ mov %rdx,%rcx
+ .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
+ ret
+.size padlock_sha1,.-padlock_sha1
+
+.globl padlock_sha256_oneshot
+.type padlock_sha256_oneshot,\@function,3
+.align 16
+padlock_sha256_oneshot:
+ xor %rax,%rax
+ mov %rdx,%rcx
+ .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
+ ret
+.size padlock_sha256_oneshot,.-padlock_sha256_oneshot
+
+.globl padlock_sha256
+.type padlock_sha256,\@function,3
+.align 16
+padlock_sha256:
+ mov \$-1,%rax
+ mov %rdx,%rcx
+ .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
+ ret
+.size padlock_sha256,.-padlock_sha256
+___
+
+sub generate_mode {
+my ($mode,$opcode) = @_;
+# int padlock_$mode_encrypt(void *out, const void *inp,
+# struct padlock_cipher_data *ctx, size_t len);
+$code.=<<___;
+.globl padlock_${mode}_encrypt
+.type padlock_${mode}_encrypt,\@function,4
+.align 16
+padlock_${mode}_encrypt:
+ push %rbp
+ push %rbx
+
+ xor %eax,%eax
+ test \$15,$ctx
+ jnz .L${mode}_abort
+ test \$15,$len
+ jnz .L${mode}_abort
+ lea .Lpadlock_saved_context(%rip),%rax
+ pushf
+ cld
+ call _padlock_verify_ctx
+ lea 16($ctx),$ctx # control word
+ xor %eax,%eax
+ xor %ebx,%ebx
+ test \$`1<<5`,($ctx) # align bit in control word
+ test \$0x0f,$out
+ setz %al # !out_misaligned
+ test \$0x0f,$inp
+ setz %bl # !inp_misaligned
+ test %ebx,%eax
+ jnz .L${mode}_aligned
+ neg %rax
+ mov \$$PADLOCK_CHUNK,$chunk
+ not %rax # out_misaligned?-1:0
+ lea (%rsp),%rbp
+ cmp $chunk,$len
+ cmovc $len,$chunk # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len
+ and $chunk,%rax # out_misaligned?chunk:0
+ mov $len,$chunk
+ neg %rax
+ and \$$PADLOCK_CHUNK-1,$chunk # chunk%=PADLOCK_CHUNK
+ lea (%rax,%rbp),%rsp
+ jmp .L${mode}_loop
+.align 16
+.L${mode}_loop:
+ mov $out,%r8 # save parameters
+ mov $inp,%r9
+ mov $len,%r10
+ mov $chunk,$len
+ mov $chunk,%r11
+ test \$0x0f,$out # out_misaligned
+ cmovnz %rsp,$out
+ test \$0x0f,$inp # inp_misaligned
+ jz .L${mode}_inp_aligned
+ shr \$3,$len
+ .byte 0xf3,0x48,0xa5 # rep movsq
+ sub $chunk,$out
+ mov $chunk,$len
+ mov $out,$inp
+.L${mode}_inp_aligned:
+ lea -16($ctx),%rax # ivp
+ lea 16($ctx),%rbx # key
+ shr \$4,$len
+ .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
+___
+$code.=<<___ if ($mode !~ /ecb|ctr/);
+ movdqa (%rax),%xmm0
+ movdqa %xmm0,-16($ctx) # copy [or refresh] iv
+___
+$code.=<<___;
+ mov %r8,$out # restore paramters
+ mov %r11,$chunk
+ test \$0x0f,$out
+ jz .L${mode}_out_aligned
+ mov $chunk,$len
+ shr \$3,$len
+ lea (%rsp),$inp
+ .byte 0xf3,0x48,0xa5 # rep movsq
+ sub $chunk,$out
+.L${mode}_out_aligned:
+ mov %r9,$inp
+ mov %r10,$len
+ add $chunk,$out
+ add $chunk,$inp
+ sub $chunk,$len
+ mov \$$PADLOCK_CHUNK,$chunk
+ jnz .L${mode}_loop
+
+ test \$0x0f,$out
+ jz .L${mode}_done
+
+ mov %rbp,$len
+ mov %rsp,$out
+ sub %rsp,$len
+ xor %rax,%rax
+ shr \$3,$len
+ .byte 0xf3,0x48,0xab # rep stosq
+.L${mode}_done:
+ lea (%rbp),%rsp
+ jmp .L${mode}_exit
+
+.align 16
+.L${mode}_aligned:
+ lea -16($ctx),%rax # ivp
+ lea 16($ctx),%rbx # key
+ shr \$4,$len # len/=AES_BLOCK_SIZE
+ .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
+___
+$code.=<<___ if ($mode !~ /ecb|ctr/);
+ movdqa (%rax),%xmm0
+ movdqa %xmm0,-16($ctx) # copy [or refresh] iv
+___
+$code.=<<___;
+.L${mode}_exit:
+ mov \$1,%eax
+ lea 8(%rsp),%rsp
+.L${mode}_abort:
+ pop %rbx
+ pop %rbp
+ ret
+.size padlock_${mode}_encrypt,.-padlock_${mode}_encrypt
+___
+}
+
+&generate_mode("ecb",0xc8);
+&generate_mode("cbc",0xd0);
+&generate_mode("cfb",0xe0);
+&generate_mode("ofb",0xe8);
+&generate_mode("ctr16",0xd8);
+
+$code.=<<___;
+.asciz "VIA Padlock x86_64 module, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+.data
+.align 8
+.Lpadlock_saved_context:
+ .quad 0
+___
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/engines/e_padlock.c b/engines/e_padlock.c
index 271253dada..7f78135165 100644
--- a/engines/e_padlock.c
+++ b/engines/e_padlock.c
@@ -95,17 +95,14 @@
/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
Not only that it doesn't exist elsewhere, but it
- even can't be compiled on other platforms!
+ even can't be compiled on other platforms! */
- In addition, because of the heavy use of inline assembler,
- compiler choice is limited to GCC and Microsoft C. */
#undef COMPILE_HW_PADLOCK
-#if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
-# if (defined(__GNUC__) && __GNUC__>=2 && \
- (defined(__i386__) || defined(__i386) || \
- defined(__x86_64__) || defined(__x86_64)) \
- ) || \
- (defined(_MSC_VER) && defined(_M_IX86))
+#if !defined(I386_ONLY) && !defined(OPENSSL_NO_ASM)
+# if defined(__i386__) || defined(__i386) || \
+ defined(__x86_64__) || defined(__x86_64) || \
+ defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
+ defined(__INTEL__)
# define COMPILE_HW_PADLOCK
# ifdef OPENSSL_NO_DYNAMIC_ENGINE
static ENGINE *ENGINE_padlock (void);
@@ -130,19 +127,6 @@ void ENGINE_load_padlock (void)
#endif
#ifdef COMPILE_HW_PADLOCK
-/* We do these includes here to avoid header problems on platforms that
- do not have the VIA padlock anyway... */
-#include <stdlib.h>
-#ifdef _WIN32
-# include <malloc.h>
-# ifndef alloca
-# define alloca _alloca
-# endif
-#elif defined(__GNUC__)
-# ifndef alloca
-# define alloca(s) __builtin_alloca((s))
-# endif
-#endif
/* Function for ENGINE detection and control */
static int padlock_available(void);
@@ -163,9 +147,6 @@ static char padlock_name[100];
/* Available features */
static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
static int padlock_use_rng = 0; /* Random Number Generator */
-#ifndef OPENSSL_NO_AES
-static int padlock_aes_align_required = 1;
-#endif
/* ===== Engine "management" functions ===== */
@@ -284,202 +265,37 @@ struct padlock_cipher_data
} cword; /* Control word */
AES_KEY ks; /* Encryption key */
};
-
-/*
- * Essentially this variable belongs in thread local storage.
- * Having this variable global on the other hand can only cause
- * few bogus key reloads [if any at all on single-CPU system],
- * so we accept the penatly...
- */
-static volatile struct padlock_cipher_data *padlock_saved_context;
-#endif
-
-/*
- * =======================================================
- * Inline assembler section(s).
- * =======================================================
- * Order of arguments is chosen to facilitate Windows port
- * using __fastcall calling convention. If you wish to add
- * more routines, keep in mind that first __fastcall
- * argument is passed in %ecx and second - in %edx.
- * =======================================================
- */
-#if defined(__GNUC__) && __GNUC__>=2
-#if defined(__i386__) || defined(__i386)
-/*
- * As for excessive "push %ebx"/"pop %ebx" found all over.
- * When generating position-independent code GCC won't let
- * us use "b" in assembler templates nor even respect "ebx"
- * in "clobber description." Therefore the trouble...
- */
-
-/* Helper function - check if a CPUID instruction
- is available on this CPU */
-static int
-padlock_insn_cpuid_available(void)
-{
- int result = -1;
-
- /* We're checking if the bit #21 of EFLAGS
- can be toggled. If yes = CPUID is available. */
- asm volatile (
- "pushf\n"
- "popl %%eax\n"
- "xorl $0x200000, %%eax\n"
- "movl %%eax, %%ecx\n"
- "andl $0x200000, %%ecx\n"
- "pushl %%eax\n"
- "popf\n"
- "pushf\n"
- "popl %%eax\n"
- "andl $0x200000, %%eax\n"
- "xorl %%eax, %%ecx\n"
- "movl %%ecx, %0\n"
- : "=r" (result) : : "eax", "ecx");
-
- return (result == 0);
-}
-
-/* Load supported features of the CPU to see if
- the PadLock is available. */
-static int
-padlock_available(void)
-{
- char vendor_string[16];
- unsigned int eax, edx;
-
- /* First check if the CPUID instruction is available at all... */
- if (! padlock_insn_cpuid_available())
- return 0;
-
- /* Are we running on the Centaur (VIA) CPU? */
- eax = 0x00000000;
- vendor_string[12] = 0;
- asm volatile (
- "pushl %%ebx\n"
- "cpuid\n"
- "movl %%ebx,(%%edi)\n"
- "movl %%edx,4(%%edi)\n"
- "movl %%ecx,8(%%edi)\n"
- "popl %%ebx"
- : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
- if (strcmp(vendor_string, "CentaurHauls") != 0)
- return 0;
-
- /* Check for Centaur Extended Feature Flags presence */
- eax = 0xC0000000;
- asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax) : : "ecx", "edx");
- if (eax < 0xC0000001)
- return 0;
-
- /* Read the Centaur Extended Feature Flags */
- eax = 0xC0000001;
- asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax), "=d"(edx) : : "ecx");
-
- /* Fill up some flags */
- padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
- padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
-
- return padlock_use_ace + padlock_use_rng;
-}
-
-/* Force key reload from memory to the CPU microcode.
- Loading EFLAGS from the stack clears EFLAGS[30]
- which does the trick. */
-static inline void
-padlock_reload_key(void)
-{
- asm volatile ("pushfl; popfl");
-}
-
-#ifndef OPENSSL_NO_AES
-/*
- * This is heuristic key context tracing. At first one
- * believes that one should use atomic swap instructions,
- * but it's not actually necessary. Point is that if
- * padlock_saved_context was changed by another thread
- * after we've read it and before we compare it with cdata,
- * our key *shall* be reloaded upon thread context switch
- * and we are therefore set in either case...
- */
-static inline void
-padlock_verify_context(struct padlock_cipher_data *cdata)
-{
- asm volatile (
- "pushfl\n"
-" btl $30,(%%esp)\n"
-" jnc 1f\n"
-" cmpl %2,%1\n"
-" je 1f\n"
-" popfl\n"
-" subl $4,%%esp\n"
-"1: addl $4,%%esp\n"
-" movl %2,%0"
- :"+m"(padlock_saved_context)
- : "r"(padlock_saved_context), "r"(cdata) : "cc");
-}
-
-/* Template for padlock_xcrypt_* modes */
-/* BIG FAT WARNING:
- * The offsets used with 'leal' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
- */
-#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
-static inline void *name(size_t cnt, \
- struct padlock_cipher_data *cdata, \
- void *out, const void *inp) \
-{ void *iv; \
- asm volatile ( "pushl %%ebx\n" \
- " leal 16(%0),%%edx\n" \
- " leal 32(%0),%%ebx\n" \
- rep_xcrypt "\n" \
- " popl %%ebx" \
- : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
- : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
- : "edx", "cc", "memory"); \
- return iv; \
-}
#endif
-#elif defined(__x86_64__) || defined(__x86_64)
+/* Interface to assembler module */
+unsigned int padlock_capability();
+void padlock_key_bswap(AES_KEY *key);
+void padlock_verify_context(struct padlock_cipher_data *ctx);
+void padlock_reload_key();
+void padlock_aes_block(void *out, const void *inp,
+ struct padlock_cipher_data *ctx);
+int padlock_ecb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_cbc_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_cfb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_ofb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_ctr32_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_xstore(void *out,int edx);
+void padlock_sha1_oneshot(void *ctx,const void *inp,size_t len);
+void padlock_sha1(void *ctx,const void *inp,size_t len);
+void padlock_sha256_oneshot(void *ctx,const void *inp,size_t len);
+void padlock_sha256(void *ctx,const void *inp,size_t len);
/* Load supported features of the CPU to see if
the PadLock is available. */
static int
padlock_available(void)
{
- char vendor_string[16];
- unsigned int eax, edx;
- size_t scratch;
-
- /* Are we running on the Centaur (VIA) CPU? */
- eax = 0x00000000;
- vendor_string[12] = 0;
- asm volatile (
- "movq %%rbx,%1\n"
- "cpuid\n"
- "movl %%ebx,(%2)\n"
- "movl %%edx,4(%2)\n"
- "movl %%ecx,8(%2)\n"
- "