1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
/* Copyright (c) 2014-2016 Cryptography Research, Inc.
* Released under the MIT License. See LICENSE.txt for license information.
*/
#define GF_HEADROOM 60
#define FIELD_LITERAL(a,b,c,d,e,f,g,h) {{a,b,c,d,e,f,g,h}}
#define LIMB_PLACE_VALUE(i) 56
void gf_add_RAW (gf out, const gf a, const gf b) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i];
}
/*
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
*/
}
void gf_sub_RAW (gf out, const gf a, const gf b) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i];
}
/*
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
out->limb[i] = a->limb[i] - b->limb[i];
}
*/
}
void gf_bias (gf a, int amt) {
uint64_t co1 = ((1ull<<56)-1)*amt, co2 = co1-amt;
#if __AVX2__
uint64x4_t lo = {co1,co1,co1,co1}, hi = {co2,co1,co1,co1};
uint64x4_t *aa = (uint64x4_t*) a;
aa[0] += lo;
aa[1] += hi;
#elif __SSE2__
uint64x2_t lo = {co1,co1}, hi = {co2,co1};
uint64x2_t *aa = (uint64x2_t*) a;
aa[0] += lo;
aa[1] += lo;
aa[2] += hi;
aa[3] += lo;
#else
for (unsigned int i=0; i<sizeof(*a)/sizeof(uint64_t); i++) {
a->limb[i] += (i==4) ? co2 : co1;
}
#endif
}
void gf_weak_reduce (gf a) {
/* PERF: use pshufb/palignr if anyone cares about speed of this */
uint64_t mask = (1ull<<56) - 1;
uint64_t tmp = a->limb[7] >> 56;
a->limb[4] += tmp;
for (unsigned int i=7; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>56);
}
a->limb[0] = (a->limb[0] & mask) + tmp;
}
|