summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2016-08-24 17:13:09 +0200
committerAndy Polyakov <appro@openssl.org>2016-08-26 11:53:03 +0200
commit3953bf53dac62bf8d2e5ebeabc94df388576a8ce (patch)
tree504e181e6f231a8db692c560e486d65f1f4ba0e8 /crypto
parent09f0535681cfa32a137707b61ad94e06b5f2b476 (diff)
ec/asm/ecp_nistz256-x86_64.pl: /cmovb/cmovc/ as nasm doesn't recognize cmovb.
Reviewed-by: Richard Levitte <levitte@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org> (cherry picked from commit d3034d31e7c04b334dd245504dd4f56e513ca115)
Diffstat (limited to 'crypto')
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86_64.pl80
1 files changed, 40 insertions, 40 deletions
diff --git a/crypto/ec/asm/ecp_nistz256-x86_64.pl b/crypto/ec/asm/ecp_nistz256-x86_64.pl
index 8b9da5edb3..7948bf71b5 100755
--- a/crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/crypto/ec/asm/ecp_nistz256-x86_64.pl
@@ -149,12 +149,12 @@ ecp_nistz256_mul_by_2:
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -253,10 +253,10 @@ ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
- cmovb $t2, $a2
- cmovb $t3, $a3
+ cmovc $t0, $a0
+ cmovc $t1, $a1
+ cmovc $t2, $a2
+ cmovc $t3, $a3
xor $t4, $t4
add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
@@ -275,12 +275,12 @@ ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -321,12 +321,12 @@ ecp_nistz256_add:
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -1858,12 +1858,12 @@ __ecp_nistz256_add_toq:
sbb $poly3, $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -1948,12 +1948,12 @@ __ecp_nistz256_mul_by_2q:
sbb $poly3, $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -2424,13 +2424,13 @@ $code.=<<___;
sbb $poly3, $acc3
sbb \$0, $t4
- cmovb $t0, $acc0
+ cmovc $t0, $acc0
mov 8*0($a_ptr), $t0
- cmovb $t1, $acc1
+ cmovc $t1, $acc1
mov 8*1($a_ptr), $t1
- cmovb $t2, $acc2
+ cmovc $t2, $acc2
mov 8*2($a_ptr), $t2
- cmovb $t3, $acc3
+ cmovc $t3, $acc3
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
@@ -2728,13 +2728,13 @@ $code.=<<___;
sbb $poly3, $acc3
sbb \$0, $t4
- cmovb $t0, $acc0
+ cmovc $t0, $acc0
mov 8*0($a_ptr), $t0
- cmovb $t1, $acc1
+ cmovc $t1, $acc1
mov 8*1($a_ptr), $t1
- cmovb $t2, $acc2
+ cmovc $t2, $acc2
mov 8*2($a_ptr), $t2
- cmovb $t3, $acc3
+ cmovc $t3, $acc3
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
@@ -2888,12 +2888,12 @@ __ecp_nistz256_add_tox:
sbb $poly3, $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@ -2983,12 +2983,12 @@ __ecp_nistz256_mul_by_2x:
sbb $poly3, $a3
sbb \$0, $t4
- cmovb $t0, $a0
- cmovb $t1, $a1
+ cmovc $t0, $a0
+ cmovc $t1, $a1
mov $a0, 8*0($r_ptr)
- cmovb $t2, $a2
+ cmovc $t2, $a2
mov $a1, 8*1($r_ptr)
- cmovb $t3, $a3
+ cmovc $t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)