summaryrefslogtreecommitdiffstats
path: root/crypto/ec/ecp_nistz256.c
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2016-08-20 22:04:21 +0200
committerMatt Caswell <matt@openssl.org>2016-08-24 10:44:48 +0100
commitb62b2454fadfccaf5e055a1810d72174c2633b8f (patch)
treed71ad6dc4ecb36f7c14474fac059d1a112f7de1c /crypto/ec/ecp_nistz256.c
parent9e421962e1cd58e302ebd8aca5d5a44198194243 (diff)
ec/asm/ecp_nistz256-x86_64.pl: addition to perform stricter reduction.
Addition was not preserving inputs' property of being fully reduced. Thanks to Brian Smith for reporting this. Reviewed-by: Rich Salz <rsalz@openssl.org>
Diffstat (limited to 'crypto/ec/ecp_nistz256.c')
-rw-r--r--crypto/ec/ecp_nistz256.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/crypto/ec/ecp_nistz256.c b/crypto/ec/ecp_nistz256.c
index d2fabe5349..564a8894b9 100644
--- a/crypto/ec/ecp_nistz256.c
+++ b/crypto/ec/ecp_nistz256.c
@@ -89,19 +89,36 @@ struct nistz256_pre_comp_st {
};
/* Functions implemented in assembly */
+/*
+ * Most of below mentioned functions *preserve* the property of inputs
+ * being fully reduced, i.e. being in [0, modulus) range. Simply put if
+ * inputs are fully reduced, then output is too. Note that reverse is
+ * not true, in sense that given partially reduced inputs output can be
+ * either, not unlikely reduced. And "most" in first sentence refers to
+ * the fact that given the calculations flow one can tolerate that
+ * addition, 1st function below, produces partially reduced result *if*
+ * multiplications by 2 and 3, which customarily use addition, fully
+ * reduce it. This effectively gives two options: a) addition produces
+ * fully reduced result [as long as inputs are, just like remaining
+ * functions]; b) addition is allowed to produce partially reduced
+ * result, but multiplications by 2 and 3 perform additional reduction
+ * step. Choice between the two can be platform-specific, but it was a)
+ * in all cases so far...
+ */
+/* Modular add: res = a+b mod P */
+void ecp_nistz256_add(BN_ULONG res[P256_LIMBS],
+ const BN_ULONG a[P256_LIMBS],
+ const BN_ULONG b[P256_LIMBS]);
/* Modular mul by 2: res = 2*a mod P */
void ecp_nistz256_mul_by_2(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
-/* Modular div by 2: res = a/2 mod P */
-void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS],
- const BN_ULONG a[P256_LIMBS]);
/* Modular mul by 3: res = 3*a mod P */
void ecp_nistz256_mul_by_3(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
-/* Modular add: res = a+b mod P */
-void ecp_nistz256_add(BN_ULONG res[P256_LIMBS],
- const BN_ULONG a[P256_LIMBS],
- const BN_ULONG b[P256_LIMBS]);
+
+/* Modular div by 2: res = a/2 mod P */
+void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS],
+ const BN_ULONG a[P256_LIMBS]);
/* Modular sub: res = a-b mod P */
void ecp_nistz256_sub(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],