summaryrefslogtreecommitdiffstats
path: root/crypto/modes
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2015-04-02 10:17:42 +0200
committerAndy Polyakov <appro@openssl.org>2015-04-20 15:06:22 +0200
commit313e6ec11fb8a7bda1676ce5804bee8755664141 (patch)
tree48646a0e7df9d05ab25b537a201c26f1bfee0b0b /crypto/modes
parentcc98b998b82c4cf4e83ccaf4c3cc4963d2f9eace (diff)
Add assembly support for 32-bit iOS.
Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Richard Levitte <levitte@openssl.org>
Diffstat (limited to 'crypto/modes')
-rw-r--r--crypto/modes/asm/ghash-armv4.pl37
1 files changed, 27 insertions, 10 deletions
diff --git a/crypto/modes/asm/ghash-armv4.pl b/crypto/modes/asm/ghash-armv4.pl
index 44521f8984..7311ad2966 100644
--- a/crypto/modes/asm/ghash-armv4.pl
+++ b/crypto/modes/asm/ghash-armv4.pl
@@ -71,8 +71,20 @@
# *native* byte order on current platform. See gcm128.c for working
# example...
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
+$flavour = shift;
+if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
$Xi="r0"; # argument block
$Htbl="r1";
@@ -126,6 +138,11 @@ $code=<<___;
.text
.code 32
+#ifdef __APPLE__
+#define ldrplb ldrbpl
+#define ldrneb ldrbne
+#endif
+
.type rem_4bit,%object
.align 5
rem_4bit:
@@ -373,9 +390,9 @@ $code.=<<___;
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
- vld1.64 $IN#hi,[r1,:64]! @ load H
+ vld1.64 $IN#hi,[r1]! @ load H
vmov.i8 $t0,#0xe1
- vld1.64 $IN#lo,[r1,:64]
+ vld1.64 $IN#lo,[r1]
vshl.i64 $t0#hi,#57
vshr.u64 $t0#lo,#63 @ t0=0xc2....01
vdup.8 $t1,$IN#hi[7]
@@ -394,8 +411,8 @@ gcm_init_neon:
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
- vld1.64 $IN#hi,[$Xi,:64]! @ load Xi
- vld1.64 $IN#lo,[$Xi,:64]!
+ vld1.64 $IN#hi,[$Xi]! @ load Xi
+ vld1.64 $IN#lo,[$Xi]!
vmov.i64 $k48,#0x0000ffffffffffff
vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
vmov.i64 $k32,#0x00000000ffffffff
@@ -412,8 +429,8 @@ gcm_gmult_neon:
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
- vld1.64 $Xl#hi,[$Xi,:64]! @ load Xi
- vld1.64 $Xl#lo,[$Xi,:64]!
+ vld1.64 $Xl#hi,[$Xi]! @ load Xi
+ vld1.64 $Xl#lo,[$Xi]!
vmov.i64 $k48,#0x0000ffffffffffff
vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
vmov.i64 $k32,#0x00000000ffffffff
@@ -468,8 +485,8 @@ $code.=<<___;
vrev64.8 $Xl,$Xl
#endif
sub $Xi,#16
- vst1.64 $Xl#hi,[$Xi,:64]! @ write out Xi
- vst1.64 $Xl#lo,[$Xi,:64]
+ vst1.64 $Xl#hi,[$Xi]! @ write out Xi
+ vst1.64 $Xl#lo,[$Xi]
ret @ bx lr
.size gcm_ghash_neon,.-gcm_ghash_neon