1 /*
2  * Copyright (C) 2021 Denys Vlasenko
3  *
4  * Licensed under GPLv2, see file LICENSE in this source tree.
5  */
6 #include "tls.h"
7 
8 #define SP_DEBUG          0
9 #define FIXED_SECRET      0
10 #define FIXED_PEER_PUBKEY 0
11 
12 #define ALLOW_ASM         1
13 
14 #if SP_DEBUG
15 # define dbg(...) fprintf(stderr, __VA_ARGS__)
dump_hex(const char * fmt,const void * vp,int len)16 static void dump_hex(const char *fmt, const void *vp, int len)
17 {
18 	char hexbuf[32 * 1024 + 4];
19 	const uint8_t *p = vp;
20 
21 	bin2hex(hexbuf, (void*)p, len)[0] = '\0';
22 	dbg(fmt, hexbuf);
23 }
24 #else
25 # define dbg(...) ((void)0)
26 # define dump_hex(...) ((void)0)
27 #endif
28 
29 typedef uint32_t sp_digit;
30 typedef int32_t signed_sp_digit;
31 
32 /* 64-bit optimizations:
33  * if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff,
34  * then loads and stores can be done in 64-bit chunks.
35  *
36  * A narrower case is when arch is also little-endian (such as x86_64),
37  * then "LSW first", uint32[8] and uint64[4] representations are equivalent,
38  * and arithmetic can be done in 64 bits too.
39  */
40 #if defined(__GNUC__) && defined(__x86_64__)
41 # define UNALIGNED_LE_64BIT 1
42 #else
43 # define UNALIGNED_LE_64BIT 0
44 #endif
45 
46 /* The code below is taken from parts of
47  *  wolfssl-3.15.3/wolfcrypt/src/sp_c32.c
48  * and heavily modified.
49  */
50 
51 typedef struct sp_point {
52 	sp_digit x[8]
53 #if ULONG_MAX > 0xffffffff
54 		/* Make sp_point[] arrays to not be 64-bit misaligned */
55 		ALIGNED(8)
56 #endif
57 	;
58 	sp_digit y[8];
59 	sp_digit z[8];
60 	int infinity;
61 } sp_point;
62 
63 /* The modulus (prime) of the curve P256. */
64 static const sp_digit p256_mod[8] ALIGNED(8) = {
65 	0xffffffff,0xffffffff,0xffffffff,0x00000000,
66 	0x00000000,0x00000000,0x00000001,0xffffffff,
67 };
68 
69 #define p256_mp_mod ((sp_digit)0x000001)
70 
71 /* Normalize the values in each word to 32 bits - NOP */
72 #define sp_256_norm_8(a) ((void)0)
73 
74 /* Write r as big endian to byte array.
75  * Fixed length number of bytes written: 32
76  *
77  * r  A single precision integer.
78  * a  Byte array.
79  */
80 #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
sp_256_to_bin_8(const sp_digit * rr,uint8_t * a)81 static void sp_256_to_bin_8(const sp_digit* rr, uint8_t* a)
82 {
83 	int i;
84 	const uint64_t* r = (void*)rr;
85 
86 	sp_256_norm_8(rr);
87 
88 	r += 4;
89 	for (i = 0; i < 4; i++) {
90 		r--;
91 		move_to_unaligned64(a, SWAP_BE64(*r));
92 		a += 8;
93 	}
94 }
95 #else
sp_256_to_bin_8(const sp_digit * r,uint8_t * a)96 static void sp_256_to_bin_8(const sp_digit* r, uint8_t* a)
97 {
98 	int i;
99 
100 	sp_256_norm_8(r);
101 
102 	r += 8;
103 	for (i = 0; i < 8; i++) {
104 		r--;
105 		move_to_unaligned32(a, SWAP_BE32(*r));
106 		a += 4;
107 	}
108 }
109 #endif
110 
111 /* Read big endian unsigned byte array into r.
112  *
113  * r  A single precision integer.
114  * a  Byte array.
115  * n  Number of bytes in array to read.
116  */
117 #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
sp_256_from_bin_8(sp_digit * rr,const uint8_t * a)118 static void sp_256_from_bin_8(sp_digit* rr, const uint8_t* a)
119 {
120 	int i;
121 	uint64_t* r = (void*)rr;
122 
123 	r += 4;
124 	for (i = 0; i < 4; i++) {
125 		uint64_t v;
126 		move_from_unaligned64(v, a);
127 		*--r = SWAP_BE64(v);
128 		a += 8;
129 	}
130 }
131 #else
sp_256_from_bin_8(sp_digit * r,const uint8_t * a)132 static void sp_256_from_bin_8(sp_digit* r, const uint8_t* a)
133 {
134 	int i;
135 
136 	r += 8;
137 	for (i = 0; i < 8; i++) {
138 		sp_digit v;
139 		move_from_unaligned32(v, a);
140 		*--r = SWAP_BE32(v);
141 		a += 4;
142 	}
143 }
144 #endif
145 
146 #if SP_DEBUG
dump_256(const char * fmt,const sp_digit * r)147 static void dump_256(const char *fmt, const sp_digit* r)
148 {
149 	uint8_t b32[32];
150 	sp_256_to_bin_8(r, b32);
151 	dump_hex(fmt, b32, 32);
152 }
dump_512(const char * fmt,const sp_digit * r)153 static void dump_512(const char *fmt, const sp_digit* r)
154 {
155 	uint8_t b64[64];
156 	sp_256_to_bin_8(r, b64 + 32);
157 	sp_256_to_bin_8(r+8, b64);
158 	dump_hex(fmt, b64, 64);
159 }
160 #else
161 # define dump_256(...) ((void)0)
162 # define dump_512(...) ((void)0)
163 #endif
164 
165 /* Convert a point of big-endian 32-byte x,y pair to type sp_point. */
sp_256_point_from_bin2x32(sp_point * p,const uint8_t * bin2x32)166 static void sp_256_point_from_bin2x32(sp_point* p, const uint8_t *bin2x32)
167 {
168 	memset(p, 0, sizeof(*p));
169 	/*p->infinity = 0;*/
170 	sp_256_from_bin_8(p->x, bin2x32);
171 	sp_256_from_bin_8(p->y, bin2x32 + 32);
172 	p->z[0] = 1; /* p->z = 1 */
173 }
174 
175 /* Compare a with b.
176  *
177  * return -ve, 0 or +ve if a is less than, equal to or greater than b
178  * respectively.
179  */
180 #if UNALIGNED_LE_64BIT
sp_256_cmp_8(const sp_digit * aa,const sp_digit * bb)181 static signed_sp_digit sp_256_cmp_8(const sp_digit* aa, const sp_digit* bb)
182 {
183 	const uint64_t* a = (void*)aa;
184 	const uint64_t* b = (void*)bb;
185 	int i;
186 	for (i = 3; i >= 0; i--) {
187 		if (a[i] == b[i])
188 			continue;
189 		return (a[i] > b[i]) * 2 - 1;
190 	}
191 	return 0;
192 }
193 #else
sp_256_cmp_8(const sp_digit * a,const sp_digit * b)194 static signed_sp_digit sp_256_cmp_8(const sp_digit* a, const sp_digit* b)
195 {
196 	int i;
197 	for (i = 7; i >= 0; i--) {
198 /*		signed_sp_digit r = a[i] - b[i];
199  *		if (r != 0)
200  *			return r;
201  * does not work: think about a[i]=0, b[i]=0xffffffff
202  */
203 		if (a[i] == b[i])
204 			continue;
205 		return (a[i] > b[i]) * 2 - 1;
206 	}
207 	return 0;
208 }
209 #endif
210 
211 /* Compare two numbers to determine if they are equal.
212  *
213  * return 1 when equal and 0 otherwise.
214  */
sp_256_cmp_equal_8(const sp_digit * a,const sp_digit * b)215 static int sp_256_cmp_equal_8(const sp_digit* a, const sp_digit* b)
216 {
217 	return sp_256_cmp_8(a, b) == 0;
218 }
219 
220 /* Add b to a into r. (r = a + b). Return !0 on overflow */
sp_256_add_8(sp_digit * r,const sp_digit * a,const sp_digit * b)221 static int sp_256_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
222 {
223 #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
224 	sp_digit reg;
225 	asm volatile (
226 "\n		movl	(%0), %3"
227 "\n		addl	(%1), %3"
228 "\n		movl	%3, (%2)"
229 "\n"
230 "\n		movl	1*4(%0), %3"
231 "\n		adcl	1*4(%1), %3"
232 "\n		movl	%3, 1*4(%2)"
233 "\n"
234 "\n		movl	2*4(%0), %3"
235 "\n		adcl	2*4(%1), %3"
236 "\n		movl	%3, 2*4(%2)"
237 "\n"
238 "\n		movl	3*4(%0), %3"
239 "\n		adcl	3*4(%1), %3"
240 "\n		movl	%3, 3*4(%2)"
241 "\n"
242 "\n		movl	4*4(%0), %3"
243 "\n		adcl	4*4(%1), %3"
244 "\n		movl	%3, 4*4(%2)"
245 "\n"
246 "\n		movl	5*4(%0), %3"
247 "\n		adcl	5*4(%1), %3"
248 "\n		movl	%3, 5*4(%2)"
249 "\n"
250 "\n		movl	6*4(%0), %3"
251 "\n		adcl	6*4(%1), %3"
252 "\n		movl	%3, 6*4(%2)"
253 "\n"
254 "\n		movl	7*4(%0), %3"
255 "\n		adcl	7*4(%1), %3"
256 "\n		movl	%3, 7*4(%2)"
257 "\n"
258 "\n		sbbl	%3, %3"
259 "\n"
260 		: "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
261 		: "0" (a), "1" (b), "2" (r)
262 		: "memory"
263 	);
264 	return reg;
265 #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
266 	uint64_t reg;
267 	asm volatile (
268 "\n		movq	(%0), %3"
269 "\n		addq	(%1), %3"
270 "\n		movq	%3, (%2)"
271 "\n"
272 "\n		movq	1*8(%0), %3"
273 "\n		adcq	1*8(%1), %3"
274 "\n		movq	%3, 1*8(%2)"
275 "\n"
276 "\n		movq	2*8(%0), %3"
277 "\n		adcq	2*8(%1), %3"
278 "\n		movq	%3, 2*8(%2)"
279 "\n"
280 "\n		movq	3*8(%0), %3"
281 "\n		adcq	3*8(%1), %3"
282 "\n		movq	%3, 3*8(%2)"
283 "\n"
284 "\n		sbbq	%3, %3"
285 "\n"
286 		: "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
287 		: "0" (a), "1" (b), "2" (r)
288 		: "memory"
289 	);
290 	return reg;
291 #else
292 	int i;
293 	sp_digit carry;
294 
295 	carry = 0;
296 	for (i = 0; i < 8; i++) {
297 		sp_digit w, v;
298 		w = b[i] + carry;
299 		v = a[i];
300 		if (w != 0) {
301 			v = a[i] + w;
302 			carry = (v < a[i]);
303 			/* hope compiler detects above as "carry flag set" */
304 		}
305 		/* else: b + carry == 0, two cases:
306 		 * b:ffffffff, carry:1
307 		 * b:00000000, carry:0
308 		 * in either case, r[i] = a[i] and carry remains unchanged
309 		 */
310 		r[i] = v;
311 	}
312 	return carry;
313 #endif
314 }
315 
316 /* Sub b from a into r. (r = a - b). Return !0 on underflow */
sp_256_sub_8(sp_digit * r,const sp_digit * a,const sp_digit * b)317 static int sp_256_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
318 {
319 #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
320 	sp_digit reg;
321 	asm volatile (
322 "\n		movl	(%0), %3"
323 "\n		subl	(%1), %3"
324 "\n		movl	%3, (%2)"
325 "\n"
326 "\n		movl	1*4(%0), %3"
327 "\n		sbbl	1*4(%1), %3"
328 "\n		movl	%3, 1*4(%2)"
329 "\n"
330 "\n		movl	2*4(%0), %3"
331 "\n		sbbl	2*4(%1), %3"
332 "\n		movl	%3, 2*4(%2)"
333 "\n"
334 "\n		movl	3*4(%0), %3"
335 "\n		sbbl	3*4(%1), %3"
336 "\n		movl	%3, 3*4(%2)"
337 "\n"
338 "\n		movl	4*4(%0), %3"
339 "\n		sbbl	4*4(%1), %3"
340 "\n		movl	%3, 4*4(%2)"
341 "\n"
342 "\n		movl	5*4(%0), %3"
343 "\n		sbbl	5*4(%1), %3"
344 "\n		movl	%3, 5*4(%2)"
345 "\n"
346 "\n		movl	6*4(%0), %3"
347 "\n		sbbl	6*4(%1), %3"
348 "\n		movl	%3, 6*4(%2)"
349 "\n"
350 "\n		movl	7*4(%0), %3"
351 "\n		sbbl	7*4(%1), %3"
352 "\n		movl	%3, 7*4(%2)"
353 "\n"
354 "\n		sbbl	%3, %3"
355 "\n"
356 		: "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
357 		: "0" (a), "1" (b), "2" (r)
358 		: "memory"
359 	);
360 	return reg;
361 #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
362 	uint64_t reg;
363 	asm volatile (
364 "\n		movq	(%0), %3"
365 "\n		subq	(%1), %3"
366 "\n		movq	%3, (%2)"
367 "\n"
368 "\n		movq	1*8(%0), %3"
369 "\n		sbbq	1*8(%1), %3"
370 "\n		movq	%3, 1*8(%2)"
371 "\n"
372 "\n		movq	2*8(%0), %3"
373 "\n		sbbq	2*8(%1), %3"
374 "\n		movq	%3, 2*8(%2)"
375 "\n"
376 "\n		movq	3*8(%0), %3"
377 "\n		sbbq	3*8(%1), %3"
378 "\n		movq	%3, 3*8(%2)"
379 "\n"
380 "\n		sbbq	%3, %3"
381 "\n"
382 		: "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
383 		: "0" (a), "1" (b), "2" (r)
384 		: "memory"
385 	);
386 	return reg;
387 #else
388 	int i;
389 	sp_digit borrow;
390 
391 	borrow = 0;
392 	for (i = 0; i < 8; i++) {
393 		sp_digit w, v;
394 		w = b[i] + borrow;
395 		v = a[i];
396 		if (w != 0) {
397 			v = a[i] - w;
398 			borrow = (v > a[i]);
399 			/* hope compiler detects above as "carry flag set" */
400 		}
401 		/* else: b + borrow == 0, two cases:
402 		 * b:ffffffff, borrow:1
403 		 * b:00000000, borrow:0
404 		 * in either case, r[i] = a[i] and borrow remains unchanged
405 		 */
406 		r[i] = v;
407 	}
408 	return borrow;
409 #endif
410 }
411 
412 /* Sub p256_mod from r. (r = r - p256_mod). */
413 #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
sp_256_sub_8_p256_mod(sp_digit * r)414 static void sp_256_sub_8_p256_mod(sp_digit* r)
415 {
416 //p256_mod[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
417 	asm volatile (
418 "\n		subl	$0xffffffff, (%0)"
419 "\n		sbbl	$0xffffffff, 1*4(%0)"
420 "\n		sbbl	$0xffffffff, 2*4(%0)"
421 "\n		sbbl	$0, 3*4(%0)"
422 "\n		sbbl	$0, 4*4(%0)"
423 "\n		sbbl	$0, 5*4(%0)"
424 "\n		sbbl	$1, 6*4(%0)"
425 "\n		sbbl	$0xffffffff, 7*4(%0)"
426 "\n"
427 		: "=r" (r)
428 		: "0" (r)
429 		: "memory"
430 	);
431 }
432 #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
sp_256_sub_8_p256_mod(sp_digit * r)433 static void sp_256_sub_8_p256_mod(sp_digit* r)
434 {
435 	uint64_t reg;
436 	uint64_t ooff;
437 //p256_mod[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
438 	asm volatile (
439 "\n		addq	$1, (%0)"	// adding 1 is the same as subtracting ffffffffffffffff
440 "\n		cmc"			// only carry bit needs inverting
441 "\n"
442 "\n		sbbq	%1, 1*8(%0)"	// %1 holds 00000000ffffffff
443 "\n"
444 "\n		sbbq	$0, 2*8(%0)"
445 "\n"
446 "\n		movq	3*8(%0), %2"
447 "\n		sbbq	$0, %2"		// adding 00000000ffffffff (in %1)
448 "\n		addq	%1, %2"		// is the same as subtracting ffffffff00000001
449 "\n		movq	%2, 3*8(%0)"
450 "\n"
451 		: "=r" (r), "=r" (ooff), "=r" (reg)
452 		: "0" (r), "1" (0x00000000ffffffff)
453 		: "memory"
454 	);
455 }
456 #else
sp_256_sub_8_p256_mod(sp_digit * r)457 static void sp_256_sub_8_p256_mod(sp_digit* r)
458 {
459 	sp_256_sub_8(r, r, p256_mod);
460 }
461 #endif
462 
463 /* Multiply a and b into r. (r = a * b)
464  * r should be [16] array (512 bits), and must not coincide with a or b.
465  */
sp_256to512_mul_8(sp_digit * r,const sp_digit * a,const sp_digit * b)466 static void sp_256to512_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
467 {
468 #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
469 	int k;
470 	uint32_t accl;
471 	uint32_t acch;
472 
473 	acch = accl = 0;
474 	for (k = 0; k < 15; k++) {
475 		int i, j;
476 		uint32_t acc_hi;
477 		i = k - 7;
478 		if (i < 0)
479 			i = 0;
480 		j = k - i;
481 		acc_hi = 0;
482 		do {
483 ////////////////////////
484 //			uint64_t m = ((uint64_t)a[i]) * b[j];
485 //			acc_hi:acch:accl += m;
486 			asm volatile (
487 			// a[i] is already loaded in %%eax
488 "\n			mull	%7"
489 "\n			addl	%%eax, %0"
490 "\n			adcl	%%edx, %1"
491 "\n			adcl	$0, %2"
492 			: "=rm" (accl), "=rm" (acch), "=rm" (acc_hi)
493 			: "0" (accl), "1" (acch), "2" (acc_hi), "a" (a[i]), "m" (b[j])
494 			: "cc", "dx"
495 			);
496 ////////////////////////
497 		        j--;
498 			i++;
499 		} while (i != 8 && i <= k);
500 		r[k] = accl;
501 		accl = acch;
502 		acch = acc_hi;
503 	}
504 	r[15] = accl;
505 #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
506 	const uint64_t* aa = (const void*)a;
507 	const uint64_t* bb = (const void*)b;
508 	uint64_t* rr = (void*)r;
509 	int k;
510 	uint64_t accl;
511 	uint64_t acch;
512 
513 	acch = accl = 0;
514 	for (k = 0; k < 7; k++) {
515 		int i, j;
516 		uint64_t acc_hi;
517 		i = k - 3;
518 		if (i < 0)
519 			i = 0;
520 		j = k - i;
521 		acc_hi = 0;
522 		do {
523 ////////////////////////
524 //			uint128_t m = ((uint128_t)a[i]) * b[j];
525 //			acc_hi:acch:accl += m;
526 			asm volatile (
527 			// aa[i] is already loaded in %%rax
528 "\n			mulq	%7"
529 "\n			addq	%%rax, %0"
530 "\n			adcq	%%rdx, %1"
531 "\n			adcq	$0, %2"
532 			: "=rm" (accl), "=rm" (acch), "=rm" (acc_hi)
533 			: "0" (accl), "1" (acch), "2" (acc_hi), "a" (aa[i]), "m" (bb[j])
534 			: "cc", "dx"
535 			);
536 ////////////////////////
537 			j--;
538 			i++;
539 		} while (i != 4 && i <= k);
540 		rr[k] = accl;
541 		accl = acch;
542 		acch = acc_hi;
543 	}
544 	rr[7] = accl;
545 #elif 0
546 	//TODO: arm assembly (untested)
547 	asm volatile (
548 "\n		mov	r5, #0"
549 "\n		mov	r6, #0"
550 "\n		mov	r7, #0"
551 "\n		mov	r8, #0"
552 "\n	1:"
553 "\n		subs	r3, r5, #28"
554 "\n		movcc	r3, #0"
555 "\n		sub	r4, r5, r3"
556 "\n	2:"
557 "\n		ldr	r14, [%[a], r3]"
558 "\n		ldr	r12, [%[b], r4]"
559 "\n		umull	r9, r10, r14, r12"
560 "\n		adds	r6, r6, r9"
561 "\n		adcs	r7, r7, r10"
562 "\n		adc	r8, r8, #0"
563 "\n		add	r3, r3, #4"
564 "\n		sub	r4, r4, #4"
565 "\n		cmp	r3, #32"
566 "\n		beq	3f"
567 "\n		cmp	r3, r5"
568 "\n		ble	2b"
569 "\n	3:"
570 "\n		str	r6, [%[r], r5]"
571 "\n		mov	r6, r7"
572 "\n		mov	r7, r8"
573 "\n		mov	r8, #0"
574 "\n		add	r5, r5, #4"
575 "\n		cmp	r5, #56"
576 "\n		ble	1b"
577 "\n		str	r6, [%[r], r5]"
578 		: [r] "r" (r), [a] "r" (a), [b] "r" (b)
579 		: "memory", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14"
580 	);
581 #else
582 	int i, j, k;
583 	uint64_t acc;
584 
585 	acc = 0;
586 	for (k = 0; k < 15; k++) {
587 		uint32_t acc_hi;
588 		i = k - 7;
589 		if (i < 0)
590 			i = 0;
591 		j = k - i;
592 		acc_hi = 0;
593 		do {
594 			uint64_t m = ((uint64_t)a[i]) * b[j];
595 			acc += m;
596 			if (acc < m)
597 				acc_hi++;
598 		        j--;
599 			i++;
600 		} while (i != 8 && i <= k);
601 		r[k] = acc;
602 		acc = (acc >> 32) | ((uint64_t)acc_hi << 32);
603 	}
604 	r[15] = acc;
605 #endif
606 }
607 
608 /* Shift number right one bit. Bottom bit is lost. */
609 #if UNALIGNED_LE_64BIT
sp_256_rshift1_8(sp_digit * rr,uint64_t carry)610 static void sp_256_rshift1_8(sp_digit* rr, uint64_t carry)
611 {
612 	uint64_t *r = (void*)rr;
613 	int i;
614 
615 	carry = (((uint64_t)!!carry) << 63);
616 	for (i = 3; i >= 0; i--) {
617 		uint64_t c = r[i] << 63;
618 		r[i] = (r[i] >> 1) | carry;
619 		carry = c;
620 	}
621 }
622 #else
sp_256_rshift1_8(sp_digit * r,sp_digit carry)623 static void sp_256_rshift1_8(sp_digit* r, sp_digit carry)
624 {
625 	int i;
626 
627 	carry = (((sp_digit)!!carry) << 31);
628 	for (i = 7; i >= 0; i--) {
629 		sp_digit c = r[i] << 31;
630 		r[i] = (r[i] >> 1) | carry;
631 		carry = c;
632 	}
633 }
634 #endif
635 
636 /* Divide the number by 2 mod the modulus (prime). (r = (r / 2) % m) */
sp_256_div2_8(sp_digit * r)637 static void sp_256_div2_8(sp_digit* r /*, const sp_digit* m*/)
638 {
639 	const sp_digit* m = p256_mod;
640 
641 	int carry = 0;
642 	if (r[0] & 1)
643 		carry = sp_256_add_8(r, r, m);
644 	sp_256_norm_8(r);
645 	sp_256_rshift1_8(r, carry);
646 }
647 
648 /* Add two Montgomery form numbers (r = a + b % m) */
sp_256_mont_add_8(sp_digit * r,const sp_digit * a,const sp_digit * b)649 static void sp_256_mont_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b
650 		/*, const sp_digit* m*/)
651 {
652 //	const sp_digit* m = p256_mod;
653 
654 	int carry = sp_256_add_8(r, a, b);
655 	sp_256_norm_8(r);
656 	if (carry) {
657 		sp_256_sub_8_p256_mod(r);
658 		sp_256_norm_8(r);
659 	}
660 }
661 
662 /* Subtract two Montgomery form numbers (r = a - b % m) */
sp_256_mont_sub_8(sp_digit * r,const sp_digit * a,const sp_digit * b)663 static void sp_256_mont_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b
664 		/*, const sp_digit* m*/)
665 {
666 	const sp_digit* m = p256_mod;
667 
668 	int borrow;
669 	borrow = sp_256_sub_8(r, a, b);
670 	sp_256_norm_8(r);
671 	if (borrow) {
672 		sp_256_add_8(r, r, m);
673 		sp_256_norm_8(r);
674 	}
675 }
676 
677 /* Double a Montgomery form number (r = a + a % m) */
sp_256_mont_dbl_8(sp_digit * r,const sp_digit * a)678 static void sp_256_mont_dbl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
679 {
680 //	const sp_digit* m = p256_mod;
681 
682 	int carry = sp_256_add_8(r, a, a);
683 	sp_256_norm_8(r);
684 	if (carry)
685 		sp_256_sub_8_p256_mod(r);
686 	sp_256_norm_8(r);
687 }
688 
689 /* Triple a Montgomery form number (r = a + a + a % m) */
sp_256_mont_tpl_8(sp_digit * r,const sp_digit * a)690 static void sp_256_mont_tpl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
691 {
692 //	const sp_digit* m = p256_mod;
693 
694 	int carry = sp_256_add_8(r, a, a);
695 	sp_256_norm_8(r);
696 	if (carry) {
697 		sp_256_sub_8_p256_mod(r);
698 		sp_256_norm_8(r);
699 	}
700 	carry = sp_256_add_8(r, r, a);
701 	sp_256_norm_8(r);
702 	if (carry) {
703 		sp_256_sub_8_p256_mod(r);
704 		sp_256_norm_8(r);
705 	}
706 }
707 
708 /* Shift the result in the high 256 bits down to the bottom. */
sp_512to256_mont_shift_8(sp_digit * r,sp_digit * a)709 static void sp_512to256_mont_shift_8(sp_digit* r, sp_digit* a)
710 {
711 	memcpy(r, a + 8, sizeof(*r) * 8);
712 }
713 
714 #if UNALIGNED_LE_64BIT
715 /* 64-bit little-endian optimized version.
716  * See generic 32-bit version below for explanation.
717  * The benefit of this version is: even though r[3] calculation is atrocious,
718  * we call sp_256_mul_add_4() four times, not 8.
719  * Measured run time improvement of curve_P256_compute_pubkey_and_premaster()
720  * call on x86-64: from ~1500us to ~900us. Code size +32 bytes.
721  */
sp_256_mul_add_4(uint64_t * r)722 static int sp_256_mul_add_4(uint64_t *r /*, const uint64_t* a, uint64_t b*/)
723 {
724 	uint64_t b = r[0];
725 
726 # if 0
727 	const uint64_t* a = (const void*)p256_mod;
728 //a[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
729 	uint128_t t;
730 	int i;
731 	t = 0;
732 	for (i = 0; i < 4; i++) {
733 		uint32_t t_hi;
734 		uint128_t m = ((uint128_t)b * a[i]) + r[i];
735 		t += m;
736 		t_hi = (t < m);
737 		r[i] = (uint64_t)t;
738 		t = (t >> 64) | ((uint128_t)t_hi << 64);
739 	}
740 	r[4] += (uint64_t)t;
741 	return (r[4] < (uint64_t)t); /* 1 if addition overflowed */
742 # else
743 	// Unroll, then optimize the above loop:
744 		//uint32_t t_hi;
745 		//uint128_t m;
746 		uint64_t t64, t64u;
747 
748 		//m = ((uint128_t)b * a[0]) + r[0];
749 		//  Since b is r[0] and a[0] is ffffffffffffffff, the above optimizes to:
750 		//  m = r[0] * ffffffffffffffff + r[0] = (r[0] << 64 - r[0]) + r[0] = r[0] << 64;
751 		//t += m;
752 		//  t = r[0] << 64 = b << 64;
753 		//t_hi = (t < m);
754 		//  t_hi = 0;
755 		//r[0] = (uint64_t)t;
756 //		r[0] = 0;
757 //the store can be eliminated since caller won't look at lower 256 bits of the result
758 		//t = (t >> 64) | ((uint128_t)t_hi << 64);
759 		//  t = b;
760 
761 		//m = ((uint128_t)b * a[1]) + r[1];
762 		//  Since a[1] is 00000000ffffffff, the above optimizes to:
763 		//  m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
764 		//t += m;
765 		//  t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
766 		//t_hi = (t < m);
767 		//  t_hi = 0;
768 		//r[1] = (uint64_t)t;
769 		r[1] += (b << 32);
770 		//t = (t >> 64) | ((uint128_t)t_hi << 64);
771 		t64 = (r[1] < (b << 32));
772 		t64 += (b >> 32);
773 
774 		//m = ((uint128_t)b * a[2]) + r[2];
775 		//  Since a[2] is 0000000000000000, the above optimizes to:
776 		//  m = b * 0 + r[2] = r[2];
777 		//t += m;
778 		//  t = t64 + r[2];
779 		//t_hi = (t < m);
780 		//  t_hi = 0;
781 		//r[2] = (uint64_t)t;
782 		r[2] += t64;
783 		//t = (t >> 64) | ((uint128_t)t_hi << 64);
784 		t64 = (r[2] < t64);
785 
786 		//m = ((uint128_t)b * a[3]) + r[3];
787 		//  Since a[3] is ffffffff00000001, the above optimizes to:
788 		//  m = b * ffffffff00000001 + r[3];
789 		//  m = b +  b*ffffffff00000000 + r[3]
790 		//  m = b + (b*ffffffff << 32) + r[3]
791 		//  m = b + (((b<<32) - b) << 32) + r[3]
792 		//t += m;
793 		//  t = t64 + (uint128_t)b + ((((uint128_t)b << 32) - b) << 32) + r[3];
794 		t64 += b;
795 		t64u = (t64 < b);
796 		t64 += r[3];
797 		t64u += (t64 < r[3]);
798 		{ // add ((((uint128_t)b << 32) - b) << 32):
799 			uint64_t lo, hi;
800 			//lo = (((b << 32) - b) << 32
801 			//hi = (((uint128_t)b << 32) - b) >> 32
802 			//but without uint128_t:
803 			hi = (b << 32) - b; /* make lower 32 bits of "hi", part 1 */
804 			b = (b >> 32) - (/*borrowed above?*/(b << 32) < b); /* upper 32 bits of "hi" are in b */
805 			lo = hi << 32;      /* (use "hi" value to calculate "lo",... */
806 			t64 += lo;          /* ...consume... */
807 			t64u += (t64 < lo); /* ..."lo") */
808 			hi >>= 32;          /* make lower 32 bits of "hi", part 2 */
809 			hi |= (b << 32);    /* combine lower and upper 32 bits */
810 			t64u += hi;         /* consume "hi" */
811 		}
812 		//t_hi = (t < m);
813 		//  t_hi = 0;
814 		//r[3] = (uint64_t)t;
815 		r[3] = t64;
816 		//t = (t >> 64) | ((uint128_t)t_hi << 64);
817 		//  t = t64u;
818 
819 	r[4] += t64u;
820 	return (r[4] < t64u); /* 1 if addition overflowed */
821 # endif
822 }
823 
sp_512to256_mont_reduce_8(sp_digit * r,sp_digit * aa)824 static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* aa/*, const sp_digit* m, sp_digit mp*/)
825 {
826 //	const sp_digit* m = p256_mod;
827 	int i;
828 	uint64_t *a = (void*)aa;
829 
830 	sp_digit carry = 0;
831 	for (i = 0; i < 4; i++) {
832 //		mu = a[i];
833 		if (sp_256_mul_add_4(a+i /*, m, mu*/)) {
834 			int j = i + 4;
835  inc_next_word:
836 			if (++j > 7) { /* a[8] array has no more words? */
837 				carry++;
838 				continue;
839 			}
840 			if (++a[j] == 0) /* did this overflow too? */
841 				goto inc_next_word;
842 		}
843 	}
844 	sp_512to256_mont_shift_8(r, aa);
845 	if (carry != 0)
846 		sp_256_sub_8_p256_mod(r);
847 	sp_256_norm_8(r);
848 }
849 
850 #else /* Generic 32-bit version */
851 
852 /* Mul a by scalar b and add into r. (r += a * b)
853  * a = p256_mod
854  * b = r[0]
855  */
sp_256_mul_add_8(sp_digit * r)856 static int sp_256_mul_add_8(sp_digit* r /*, const sp_digit* a, sp_digit b*/)
857 {
858 	sp_digit b = r[0];
859 	uint64_t t;
860 
861 # if 0
862 	const sp_digit* a = p256_mod;
863 //a[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
864 	int i;
865 	t = 0;
866 	for (i = 0; i < 8; i++) {
867 		uint32_t t_hi;
868 		uint64_t m = ((uint64_t)b * a[i]) + r[i];
869 		t += m;
870 		t_hi = (t < m);
871 		r[i] = (sp_digit)t;
872 		t = (t >> 32) | ((uint64_t)t_hi << 32);
873 	}
874 	r[8] += (sp_digit)t;
875 	return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
876 # else
877 	// Unroll, then optimize the above loop:
878 		//uint32_t t_hi;
879 		uint64_t m;
880 		uint32_t t32;
881 
882 		//m = ((uint64_t)b * a[0]) + r[0];
883 		//  Since b is r[0] and a[0] is ffffffff, the above optimizes to:
884 		//  m = r[0] * ffffffff + r[0] = (r[0] * 100000000 - r[0]) + r[0] = r[0] << 32;
885 		//t += m;
886 		//  t = r[0] << 32 = b << 32;
887 		//t_hi = (t < m);
888 		//  t_hi = 0;
889 		//r[0] = (sp_digit)t;
890 //		r[0] = 0;
891 //the store can be eliminated since caller won't look at lower 256 bits of the result
892 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
893 		//  t = b;
894 
895 		//m = ((uint64_t)b * a[1]) + r[1];
896 		//  Since a[1] is ffffffff, the above optimizes to:
897 		//  m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
898 		//t += m;
899 		//  t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
900 		//t_hi = (t < m);
901 		//  t_hi = 0;
902 		//r[1] = (sp_digit)t;
903 		//  r[1] = r[1];
904 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
905 		//  t = b;
906 
907 		//m = ((uint64_t)b * a[2]) + r[2];
908 		//  Since a[2] is ffffffff, the above optimizes to:
909 		//  m = b * ffffffff + r[2] = (b * 100000000 - b) + r[2] = (b << 32) - b + r[2];
910 		//t += m;
911 		//  t = b + (b << 32) - b + r[2] = (b << 32) + r[2]
912 		//t_hi = (t < m);
913 		//  t_hi = 0;
914 		//r[2] = (sp_digit)t;
915 		//  r[2] = r[2];
916 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
917 		//  t = b;
918 
919 		//m = ((uint64_t)b * a[3]) + r[3];
920 		//  Since a[3] is 00000000, the above optimizes to:
921 		//  m = b * 0 + r[3] = r[3];
922 		//t += m;
923 		//  t = b + r[3];
924 		//t_hi = (t < m);
925 		//  t_hi = 0;
926 		//r[3] = (sp_digit)t;
927 		r[3] = r[3] + b;
928 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
929 		t32 = (r[3] < b); // 0 or 1
930 
931 		//m = ((uint64_t)b * a[4]) + r[4];
932 		//  Since a[4] is 00000000, the above optimizes to:
933 		//  m = b * 0 + r[4] = r[4];
934 		//t += m;
935 		//  t = t32 + r[4];
936 		//t_hi = (t < m);
937 		//  t_hi = 0;
938 		//r[4] = (sp_digit)t;
939 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
940 		if (t32 != 0) {
941 			r[4]++;
942 			t32 = (r[4] == 0); // 0 or 1
943 
944 		//m = ((uint64_t)b * a[5]) + r[5];
945 		//  Since a[5] is 00000000, the above optimizes to:
946 		//  m = b * 0 + r[5] = r[5];
947 		//t += m;
948 		//  t = t32 + r[5]; (t32 is 0 or 1)
949 		//t_hi = (t < m);
950 		//  t_hi = 0;
951 		//r[5] = (sp_digit)t;
952 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
953 			if (t32 != 0) {
954 				r[5]++;
955 				t32 = (r[5] == 0); // 0 or 1
956 			}
957 		}
958 
959 		//m = ((uint64_t)b * a[6]) + r[6];
960 		//  Since a[6] is 00000001, the above optimizes to:
961 		//  m = (uint64_t)b + r[6]; // 33 bits at most
962 		//t += m;
963 		t = t32 + (uint64_t)b + r[6];
964 		//t_hi = (t < m);
965 		//  t_hi = 0;
966 		r[6] = (sp_digit)t;
967 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
968 		t = (t >> 32);
969 
970 		//m = ((uint64_t)b * a[7]) + r[7];
971 		//  Since a[7] is ffffffff, the above optimizes to:
972 		//  m = b * ffffffff + r[7] = (b * 100000000 - b) + r[7]
973 		m = ((uint64_t)b << 32) - b + r[7];
974 		t += m;
975 		//t_hi = (t < m);
976 		//  t_hi in fact is always 0 here (256bit * 32bit can't have more than 32 bits of overflow)
977 		r[7] = (sp_digit)t;
978 		//t = (t >> 32) | ((uint64_t)t_hi << 32);
979 		t = (t >> 32);
980 
981 	r[8] += (sp_digit)t;
982 	return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
983 # endif
984 }
985 
986 /* Reduce the number back to 256 bits using Montgomery reduction.
987  * Note: the result is NOT guaranteed to be less than p256_mod!
988  * (it is only guaranteed to fit into 256 bits).
989  *
990  * r   Result.
991  * a   Double-wide number to reduce. Clobbered.
992  * m   The single precision number representing the modulus.
993  * mp  The digit representing the negative inverse of m mod 2^n.
994  *
995  * Montgomery reduction on multiprecision integers:
996  * Montgomery reduction requires products modulo R.
997  * When R is a power of B [in our case R=2^128, B=2^32], there is a variant
998  * of Montgomery reduction which requires products only of machine word sized
999  * integers. T is stored as an little-endian word array a[0..n]. The algorithm
1000  * reduces it one word at a time. First an appropriate multiple of modulus
1001  * is added to make T divisible by B. [In our case, it is p256_mp_mod * a[0].]
1002  * Then a multiple of modulus is added to make T divisible by B^2.
1003  * [In our case, it is (p256_mp_mod * a[1]) << 32.]
1004  * And so on. Eventually T is divisible by R, and after division by R
1005  * the algorithm is in the same place as the usual Montgomery reduction.
1006  *
1007  * TODO: Can conditionally use 64-bit (if bit-little-endian arch) logic?
1008  */
sp_512to256_mont_reduce_8(sp_digit * r,sp_digit * a)1009 static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* a/*, const sp_digit* m, sp_digit mp*/)
1010 {
1011 //	const sp_digit* m = p256_mod;
1012 	sp_digit mp = p256_mp_mod;
1013 
1014 	int i;
1015 //	sp_digit mu;
1016 
1017 	if (mp != 1) {
1018 		sp_digit word16th = 0;
1019 		for (i = 0; i < 8; i++) {
1020 //			mu = (sp_digit)(a[i] * mp);
1021 			if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
1022 				int j = i + 8;
1023  inc_next_word0:
1024 				if (++j > 15) { /* a[16] array has no more words? */
1025 					word16th++;
1026 					continue;
1027 				}
1028 				if (++a[j] == 0) /* did this overflow too? */
1029 					goto inc_next_word0;
1030 			}
1031 		}
1032 		sp_512to256_mont_shift_8(r, a);
1033 		if (word16th != 0)
1034 			sp_256_sub_8_p256_mod(r);
1035 		sp_256_norm_8(r);
1036 	}
1037 	else { /* Same code for explicit mp == 1 (which is always the case for P256) */
1038 		sp_digit word16th = 0;
1039 		for (i = 0; i < 8; i++) {
1040 //			mu = a[i];
1041 			if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
1042 				int j = i + 8;
1043  inc_next_word:
1044 				if (++j > 15) { /* a[16] array has no more words? */
1045 					word16th++;
1046 					continue;
1047 				}
1048 				if (++a[j] == 0) /* did this overflow too? */
1049 					goto inc_next_word;
1050 			}
1051 		}
1052 		sp_512to256_mont_shift_8(r, a);
1053 		if (word16th != 0)
1054 			sp_256_sub_8_p256_mod(r);
1055 		sp_256_norm_8(r);
1056 	}
1057 }
1058 #endif
1059 
1060 /* Multiply two Montogmery form numbers mod the modulus (prime).
1061  * (r = a * b mod m)
1062  *
1063  * r   Result of multiplication.
1064  * a   First number to multiply in Montogmery form.
1065  * b   Second number to multiply in Montogmery form.
1066  * m   Modulus (prime).
1067  * mp  Montogmery multiplier.
1068  */
sp_256_mont_mul_8(sp_digit * r,const sp_digit * a,const sp_digit * b)1069 static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b
1070 		/*, const sp_digit* m, sp_digit mp*/)
1071 {
1072 	//const sp_digit* m = p256_mod;
1073 	//sp_digit mp = p256_mp_mod;
1074 	sp_digit t[2 * 8];
1075 	sp_256to512_mul_8(t, a, b);
1076 	sp_512to256_mont_reduce_8(r, t /*, m, mp*/);
1077 }
1078 
1079 /* Square the Montgomery form number. (r = a * a mod m)
1080  *
1081  * r   Result of squaring.
1082  * a   Number to square in Montogmery form.
1083  * m   Modulus (prime).
1084  * mp  Montogmery multiplier.
1085  */
sp_256_mont_sqr_8(sp_digit * r,const sp_digit * a)1086 static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a
1087 		/*, const sp_digit* m, sp_digit mp*/)
1088 {
1089 	//const sp_digit* m = p256_mod;
1090 	//sp_digit mp = p256_mp_mod;
1091 	sp_256_mont_mul_8(r, a, a /*, m, mp*/);
1092 }
1093 
sp_256_mont_mul_and_reduce_8(sp_digit * r,const sp_digit * a,const sp_digit * b)1094 static NOINLINE void sp_256_mont_mul_and_reduce_8(sp_digit* r,
1095 		const sp_digit* a, const sp_digit* b
1096 		/*, const sp_digit* m, sp_digit mp*/)
1097 {
1098 	sp_digit rr[2 * 8];
1099 
1100 	sp_256_mont_mul_8(rr, a, b /*, p256_mod, p256_mp_mod*/);
1101 	memset(rr + 8, 0, sizeof(rr) / 2);
1102 	sp_512to256_mont_reduce_8(r, rr /*, p256_mod, p256_mp_mod*/);
1103 }
1104 
1105 /* Invert the number, in Montgomery form, modulo the modulus (prime) of the
1106  * P256 curve. (r = 1 / a mod m)
1107  *
1108  * r   Inverse result. Must not coincide with a.
1109  * a   Number to invert.
1110  */
sp_256_mont_inv_8(sp_digit * r,sp_digit * a)1111 static void sp_256_mont_inv_8(sp_digit* r, sp_digit* a)
1112 {
1113 	int i;
1114 
1115 	memcpy(r, a, sizeof(sp_digit) * 8);
1116 	for (i = 254; i >= 0; i--) {
1117 		sp_256_mont_sqr_8(r, r /*, p256_mod, p256_mp_mod*/);
1118 /* p256_mod - 2:
1119  * ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff - 2
1120  * Bit pattern:
1121  * 2    2         2         2         2         2         2         1...1
1122  * 5    5         4         3         2         1         0         9...0         9...1
1123  * 543210987654321098765432109876543210987654321098765432109876543210...09876543210...09876543210
1124  * 111111111111111111111111111111110000000000000000000000000000000100...00000111111...11111111101
1125  */
1126 		/*if (p256_mod_minus_2[i / 32] & ((sp_digit)1 << (i % 32)))*/
1127 		if (i >= 224 || i == 192 || (i <= 95 && i != 1))
1128 			sp_256_mont_mul_8(r, r, a /*, p256_mod, p256_mp_mod*/);
1129 	}
1130 }
1131 
1132 /* Multiply a number by Montogmery normalizer mod modulus (prime).
1133  *
1134  * r  The resulting Montgomery form number.
1135  * a  The number to convert.
1136  */
sp_256_mod_mul_norm_8(sp_digit * r,const sp_digit * a)1137 static void sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a)
1138 {
1139 	int64_t t[8];
1140 	int32_t o;
1141 
1142 #define A(n) ((uint64_t)a[n])
1143 	/*  1  1  0 -1 -1 -1 -1  0 */
1144 	t[0] = 0 + A(0) + A(1) - A(3) - A(4) - A(5) - A(6);
1145 	/*  0  1  1  0 -1 -1 -1 -1 */
1146 	t[1] = 0 + A(1) + A(2) - A(4) - A(5) - A(6) - A(7);
1147 	/*  0  0  1  1  0 -1 -1 -1 */
1148 	t[2] = 0 + A(2) + A(3) - A(5) - A(6) - A(7);
1149 	/* -1 -1  0  2  2  1  0 -1 */
1150 	t[3] = 0 - A(0) - A(1) + 2 * A(3) + 2 * A(4) + A(5) - A(7);
1151 	/*  0 -1 -1  0  2  2  1  0 */
1152 	t[4] = 0 - A(1) - A(2) + 2 * A(4) + 2 * A(5) + A(6);
1153 	/*  0  0 -1 -1  0  2  2  1 */
1154 	t[5] = 0 - A(2) - A(3) + 2 * A(5) + 2 * A(6) + A(7);
1155 	/* -1 -1  0  0  0  1  3  2 */
1156 	t[6] = 0 - A(0) - A(1) + A(5) + 3 * A(6) + 2 * A(7);
1157 	/*  1  0 -1 -1 -1 -1  0  3 */
1158 	t[7] = 0 + A(0) - A(2) - A(3) - A(4) - A(5) + 3 * A(7);
1159 #undef A
1160 
1161 	t[1] += t[0] >> 32; t[0] &= 0xffffffff;
1162 	t[2] += t[1] >> 32; t[1] &= 0xffffffff;
1163 	t[3] += t[2] >> 32; t[2] &= 0xffffffff;
1164 	t[4] += t[3] >> 32; t[3] &= 0xffffffff;
1165 	t[5] += t[4] >> 32; t[4] &= 0xffffffff;
1166 	t[6] += t[5] >> 32; t[5] &= 0xffffffff;
1167 	t[7] += t[6] >> 32; t[6] &= 0xffffffff;
1168 	o     = t[7] >> 32; //t[7] &= 0xffffffff;
1169 	t[0] += o;
1170 	t[3] -= o;
1171 	t[6] -= o;
1172 	t[7] += o;
1173 	r[0] = (sp_digit)t[0];
1174 	t[1] += t[0] >> 32;
1175 	r[1] = (sp_digit)t[1];
1176 	t[2] += t[1] >> 32;
1177 	r[2] = (sp_digit)t[2];
1178 	t[3] += t[2] >> 32;
1179 	r[3] = (sp_digit)t[3];
1180 	t[4] += t[3] >> 32;
1181 	r[4] = (sp_digit)t[4];
1182 	t[5] += t[4] >> 32;
1183 	r[5] = (sp_digit)t[5];
1184 	t[6] += t[5] >> 32;
1185 	r[6] = (sp_digit)t[6];
1186 //	t[7] += t[6] >> 32;
1187 //	r[7] = (sp_digit)t[7];
1188 	r[7] = (sp_digit)t[7] + (sp_digit)(t[6] >> 32);
1189 }
1190 
1191 /* Map the Montgomery form projective co-ordinate point to an affine point.
1192  *
1193  * r  Resulting affine co-ordinate point.
1194  * p  Montgomery form projective co-ordinate point.
1195  */
sp_256_map_8(sp_point * r,sp_point * p)1196 static void sp_256_map_8(sp_point* r, sp_point* p)
1197 {
1198 	sp_digit t1[8];
1199 	sp_digit t2[8];
1200 
1201 	sp_256_mont_inv_8(t1, p->z);
1202 
1203 	sp_256_mont_sqr_8(t2, t1 /*, p256_mod, p256_mp_mod*/);
1204 	sp_256_mont_mul_8(t1, t2, t1 /*, p256_mod, p256_mp_mod*/);
1205 
1206 	/* x /= z^2 */
1207 	sp_256_mont_mul_and_reduce_8(r->x, p->x, t2 /*, p256_mod, p256_mp_mod*/);
1208 	/* Reduce x to less than modulus */
1209 	if (sp_256_cmp_8(r->x, p256_mod) >= 0)
1210 		sp_256_sub_8_p256_mod(r->x);
1211 	sp_256_norm_8(r->x);
1212 
1213 	/* y /= z^3 */
1214 	sp_256_mont_mul_and_reduce_8(r->y, p->y, t1 /*, p256_mod, p256_mp_mod*/);
1215 	/* Reduce y to less than modulus */
1216 	if (sp_256_cmp_8(r->y, p256_mod) >= 0)
1217 		sp_256_sub_8_p256_mod(r->y);
1218 	sp_256_norm_8(r->y);
1219 
1220 	memset(r->z, 0, sizeof(r->z));
1221 	r->z[0] = 1;
1222 }
1223 
1224 /* Double the Montgomery form projective point p.
1225  *
1226  * r  Result of doubling point.
1227  * p  Point to double.
1228  */
sp_256_proj_point_dbl_8(sp_point * r,sp_point * p)1229 static void sp_256_proj_point_dbl_8(sp_point* r, sp_point* p)
1230 {
1231 	sp_digit t1[8];
1232 	sp_digit t2[8];
1233 
1234 	/* Put point to double into result */
1235 	if (r != p)
1236 		*r = *p; /* struct copy */
1237 
1238 	if (r->infinity)
1239 		return;
1240 
1241 	/* T1 = Z * Z */
1242 	sp_256_mont_sqr_8(t1, r->z /*, p256_mod, p256_mp_mod*/);
1243 	/* Z = Y * Z */
1244 	sp_256_mont_mul_8(r->z, r->y, r->z /*, p256_mod, p256_mp_mod*/);
1245 	/* Z = 2Z */
1246 	sp_256_mont_dbl_8(r->z, r->z /*, p256_mod*/);
1247 	/* T2 = X - T1 */
1248 	sp_256_mont_sub_8(t2, r->x, t1 /*, p256_mod*/);
1249 	/* T1 = X + T1 */
1250 	sp_256_mont_add_8(t1, r->x, t1 /*, p256_mod*/);
1251 	/* T2 = T1 * T2 */
1252 	sp_256_mont_mul_8(t2, t1, t2 /*, p256_mod, p256_mp_mod*/);
1253 	/* T1 = 3T2 */
1254 	sp_256_mont_tpl_8(t1, t2 /*, p256_mod*/);
1255 	/* Y = 2Y */
1256 	sp_256_mont_dbl_8(r->y, r->y /*, p256_mod*/);
1257 	/* Y = Y * Y */
1258 	sp_256_mont_sqr_8(r->y, r->y /*, p256_mod, p256_mp_mod*/);
1259 	/* T2 = Y * Y */
1260 	sp_256_mont_sqr_8(t2, r->y /*, p256_mod, p256_mp_mod*/);
1261 	/* T2 = T2/2 */
1262 	sp_256_div2_8(t2 /*, p256_mod*/);
1263 	/* Y = Y * X */
1264 	sp_256_mont_mul_8(r->y, r->y, r->x /*, p256_mod, p256_mp_mod*/);
1265 	/* X = T1 * T1 */
1266 	sp_256_mont_mul_8(r->x, t1, t1 /*, p256_mod, p256_mp_mod*/);
1267 	/* X = X - Y */
1268 	sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
1269 	/* X = X - Y */
1270 	sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
1271 	/* Y = Y - X */
1272 	sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
1273 	/* Y = Y * T1 */
1274 	sp_256_mont_mul_8(r->y, r->y, t1 /*, p256_mod, p256_mp_mod*/);
1275 	/* Y = Y - T2 */
1276 	sp_256_mont_sub_8(r->y, r->y, t2 /*, p256_mod*/);
1277 	dump_512("y2 %s\n", r->y);
1278 }
1279 
1280 /* Add two Montgomery form projective points.
1281  *
1282  * r  Result of addition.
1283  * p  Frist point to add.
1284  * q  Second point to add.
1285  */
sp_256_proj_point_add_8(sp_point * r,sp_point * p,sp_point * q)1286 static NOINLINE void sp_256_proj_point_add_8(sp_point* r, sp_point* p, sp_point* q)
1287 {
1288 	sp_digit t1[8];
1289 	sp_digit t2[8];
1290 	sp_digit t3[8];
1291 	sp_digit t4[8];
1292 	sp_digit t5[8];
1293 
1294 	/* Ensure only the first point is the same as the result. */
1295 	if (q == r) {
1296 		sp_point* a = p;
1297 		p = q;
1298 		q = a;
1299 	}
1300 
1301 	/* Check double */
1302 	sp_256_sub_8(t1, p256_mod, q->y);
1303 	sp_256_norm_8(t1);
1304 	if (sp_256_cmp_equal_8(p->x, q->x)
1305 	 && sp_256_cmp_equal_8(p->z, q->z)
1306 	 && (sp_256_cmp_equal_8(p->y, q->y) || sp_256_cmp_equal_8(p->y, t1))
1307 	) {
1308 		sp_256_proj_point_dbl_8(r, p);
1309 		return;
1310 	}
1311 
1312 	if (p->infinity || q->infinity) {
1313 		*r = p->infinity ? *q : *p; /* struct copy */
1314 		return;
1315 	}
1316 
1317 	/* U1 = X1*Z2^2 */
1318 	sp_256_mont_sqr_8(t1, q->z /*, p256_mod, p256_mp_mod*/);
1319 	sp_256_mont_mul_8(t3, t1, q->z /*, p256_mod, p256_mp_mod*/);
1320 	sp_256_mont_mul_8(t1, t1, r->x /*, p256_mod, p256_mp_mod*/);
1321 	/* U2 = X2*Z1^2 */
1322 	sp_256_mont_sqr_8(t2, r->z /*, p256_mod, p256_mp_mod*/);
1323 	sp_256_mont_mul_8(t4, t2, r->z /*, p256_mod, p256_mp_mod*/);
1324 	sp_256_mont_mul_8(t2, t2, q->x /*, p256_mod, p256_mp_mod*/);
1325 	/* S1 = Y1*Z2^3 */
1326 	sp_256_mont_mul_8(t3, t3, r->y /*, p256_mod, p256_mp_mod*/);
1327 	/* S2 = Y2*Z1^3 */
1328 	sp_256_mont_mul_8(t4, t4, q->y /*, p256_mod, p256_mp_mod*/);
1329 	/* H = U2 - U1 */
1330 	sp_256_mont_sub_8(t2, t2, t1 /*, p256_mod*/);
1331 	/* R = S2 - S1 */
1332 	sp_256_mont_sub_8(t4, t4, t3 /*, p256_mod*/);
1333 	/* Z3 = H*Z1*Z2 */
1334 	sp_256_mont_mul_8(r->z, r->z, q->z /*, p256_mod, p256_mp_mod*/);
1335 	sp_256_mont_mul_8(r->z, r->z, t2 /*, p256_mod, p256_mp_mod*/);
1336 	/* X3 = R^2 - H^3 - 2*U1*H^2 */
1337 	sp_256_mont_sqr_8(r->x, t4 /*, p256_mod, p256_mp_mod*/);
1338 	sp_256_mont_sqr_8(t5, t2 /*, p256_mod, p256_mp_mod*/);
1339 	sp_256_mont_mul_8(r->y, t1, t5 /*, p256_mod, p256_mp_mod*/);
1340 	sp_256_mont_mul_8(t5, t5, t2 /*, p256_mod, p256_mp_mod*/);
1341 	sp_256_mont_sub_8(r->x, r->x, t5 /*, p256_mod*/);
1342 	sp_256_mont_dbl_8(t1, r->y /*, p256_mod*/);
1343 	sp_256_mont_sub_8(r->x, r->x, t1 /*, p256_mod*/);
1344 	/* Y3 = R*(U1*H^2 - X3) - S1*H^3 */
1345 	sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
1346 	sp_256_mont_mul_8(r->y, r->y, t4 /*, p256_mod, p256_mp_mod*/);
1347 	sp_256_mont_mul_8(t5, t5, t3 /*, p256_mod, p256_mp_mod*/);
1348 	sp_256_mont_sub_8(r->y, r->y, t5 /*, p256_mod*/);
1349 }
1350 
1351 /* Multiply the point by the scalar and return the result.
1352  * If map is true then convert result to affine co-ordinates.
1353  *
1354  * r     Resulting point.
1355  * g     Point to multiply.
1356  * k     Scalar to multiply by.
1357  * map   Indicates whether to convert result to affine.
1358  */
sp_256_ecc_mulmod_8(sp_point * r,const sp_point * g,const sp_digit * k)1359 static void sp_256_ecc_mulmod_8(sp_point* r, const sp_point* g, const sp_digit* k /*, int map*/)
1360 {
1361 	enum { map = 1 }; /* we always convert result to affine coordinates */
1362 	sp_point t[3];
1363 	sp_digit n = n; /* for compiler */
1364 	int c, y;
1365 
1366 	memset(t, 0, sizeof(t));
1367 
1368 	/* t[0] = {0, 0, 1} * norm */
1369 	t[0].infinity = 1;
1370 	/* t[1] = {g->x, g->y, g->z} * norm */
1371 	sp_256_mod_mul_norm_8(t[1].x, g->x);
1372 	sp_256_mod_mul_norm_8(t[1].y, g->y);
1373 	sp_256_mod_mul_norm_8(t[1].z, g->z);
1374 
1375 	/* For every bit, starting from most significant... */
1376 	k += 7;
1377 	c = 256;
1378 	for (;;) {
1379 		if ((c & 0x1f) == 0) {
1380 			if (c == 0)
1381 				break;
1382 			n = *k--;
1383 		}
1384 
1385 		y = (n >> 31);
1386 		dbg("y:%d t[%d] = t[0]+t[1]\n", y, y^1);
1387 		sp_256_proj_point_add_8(&t[y^1], &t[0], &t[1]);
1388 		dump_512("t[0].x %s\n", t[0].x);
1389 		dump_512("t[0].y %s\n", t[0].y);
1390 		dump_512("t[0].z %s\n", t[0].z);
1391 		dump_512("t[1].x %s\n", t[1].x);
1392 		dump_512("t[1].y %s\n", t[1].y);
1393 		dump_512("t[1].z %s\n", t[1].z);
1394 		dbg("t[2] = t[%d]\n", y);
1395 		t[2] = t[y]; /* struct copy */
1396 		dbg("t[2] *= 2\n");
1397 		sp_256_proj_point_dbl_8(&t[2], &t[2]);
1398 		dump_512("t[2].x %s\n", t[2].x);
1399 		dump_512("t[2].y %s\n", t[2].y);
1400 		dump_512("t[2].z %s\n", t[2].z);
1401 		t[y] = t[2]; /* struct copy */
1402 
1403 		n <<= 1;
1404 		c--;
1405 	}
1406 
1407 	if (map)
1408 		sp_256_map_8(r, &t[0]);
1409 	else
1410 		*r = t[0]; /* struct copy */
1411 
1412 	memset(t, 0, sizeof(t)); //paranoia
1413 }
1414 
1415 /* Multiply the base point of P256 by the scalar and return the result.
1416  * If map is true then convert result to affine co-ordinates.
1417  *
1418  * r     Resulting point.
1419  * k     Scalar to multiply by.
1420  * map   Indicates whether to convert result to affine.
1421  */
sp_256_ecc_mulmod_base_8(sp_point * r,sp_digit * k)1422 static void sp_256_ecc_mulmod_base_8(sp_point* r, sp_digit* k /*, int map*/)
1423 {
1424 	/* Since this function is called only once, save space:
1425 	 * don't have "static const sp_point p256_base = {...}",
1426 	 * it would have more zeros than data.
1427 	 */
1428 	static const uint8_t p256_base_bin[] = {
1429 		/* x (big-endian) */
1430 		0x6b,0x17,0xd1,0xf2,0xe1,0x2c,0x42,0x47,0xf8,0xbc,0xe6,0xe5,0x63,0xa4,0x40,0xf2,0x77,0x03,0x7d,0x81,0x2d,0xeb,0x33,0xa0,0xf4,0xa1,0x39,0x45,0xd8,0x98,0xc2,0x96,
1431 		/* y */
1432 		0x4f,0xe3,0x42,0xe2,0xfe,0x1a,0x7f,0x9b,0x8e,0xe7,0xeb,0x4a,0x7c,0x0f,0x9e,0x16,0x2b,0xce,0x33,0x57,0x6b,0x31,0x5e,0xce,0xcb,0xb6,0x40,0x68,0x37,0xbf,0x51,0xf5,
1433 		/* z will be set to 1, infinity flag to "false" */
1434 	};
1435 	sp_point p256_base;
1436 
1437 	sp_256_point_from_bin2x32(&p256_base, p256_base_bin);
1438 
1439 	sp_256_ecc_mulmod_8(r, &p256_base, k /*, map*/);
1440 }
1441 
1442 /* Multiply the point by the scalar and serialize the X ordinate.
1443  * The number is 0 padded to maximum size on output.
1444  *
1445  * priv    Scalar to multiply the point by.
1446  * pub2x32 Point to multiply.
1447  * out32   Buffer to hold X ordinate.
1448  */
sp_ecc_secret_gen_256(const sp_digit priv[8],const uint8_t * pub2x32,uint8_t * out32)1449 static void sp_ecc_secret_gen_256(const sp_digit priv[8], const uint8_t *pub2x32, uint8_t* out32)
1450 {
1451 	sp_point point[1];
1452 
1453 #if FIXED_PEER_PUBKEY
1454 	memset((void*)pub2x32, 0x55, 64);
1455 #endif
1456 	dump_hex("peerkey %s\n", pub2x32, 32); /* in TLS, this is peer's public key */
1457 	dump_hex("        %s\n", pub2x32 + 32, 32);
1458 
1459 	sp_256_point_from_bin2x32(point, pub2x32);
1460 	dump_512("point->x %s\n", point->x);
1461 	dump_512("point->y %s\n", point->y);
1462 
1463 	sp_256_ecc_mulmod_8(point, point, priv);
1464 
1465 	sp_256_to_bin_8(point->x, out32);
1466 	dump_hex("out32: %s\n", out32, 32);
1467 }
1468 
1469 /* Generates a random scalar in [1..order-1] range. */
sp_256_ecc_gen_k_8(sp_digit k[8])1470 static void sp_256_ecc_gen_k_8(sp_digit k[8])
1471 {
1472 	/* Since 32-bit words are "dense", no need to use
1473 	 * sp_256_from_bin_8(k, buf) to convert random stream
1474 	 * to sp_digit array - just store random bits there directly.
1475 	 */
1476 	tls_get_random(k, 8 * sizeof(k[0]));
1477 #if FIXED_SECRET
1478 	memset(k, 0x77, 8 * sizeof(k[0]));
1479 #endif
1480 
1481 // If scalar is too large, try again (pseudo-code)
1482 //	if (k >= 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551 - 1) // order of P256
1483 //		goto pick_another_random;
1484 //	k++; // ensure non-zero
1485 	/* Simpler alternative, at the cost of not choosing some valid
1486 	 * random values, and slightly non-uniform distribution */
1487 	if (k[0] == 0)
1488 		k[0] = 1;
1489 	if (k[7] >= 0xffffffff)
1490 		k[7] = 0xfffffffe;
1491 }
1492 
1493 /* Makes a random EC key pair. */
sp_ecc_make_key_256(sp_digit privkey[8],uint8_t * pubkey)1494 static void sp_ecc_make_key_256(sp_digit privkey[8], uint8_t *pubkey)
1495 {
1496 	sp_point point[1];
1497 
1498 	sp_256_ecc_gen_k_8(privkey);
1499 	dump_256("privkey %s\n", privkey);
1500 	sp_256_ecc_mulmod_base_8(point, privkey);
1501 	dump_512("point->x %s\n", point->x);
1502 	dump_512("point->y %s\n", point->y);
1503 	sp_256_to_bin_8(point->x, pubkey);
1504 	sp_256_to_bin_8(point->y, pubkey + 32);
1505 
1506 	memset(point, 0, sizeof(point)); //paranoia
1507 }
1508 
curve_P256_compute_pubkey_and_premaster(uint8_t * pubkey2x32,uint8_t * premaster32,const uint8_t * peerkey2x32)1509 void FAST_FUNC curve_P256_compute_pubkey_and_premaster(
1510 		uint8_t *pubkey2x32, uint8_t *premaster32,
1511 		const uint8_t *peerkey2x32)
1512 {
1513 	sp_digit privkey[8];
1514 
1515 	dump_hex("peerkey2x32: %s\n", peerkey2x32, 64);
1516 	sp_ecc_make_key_256(privkey, pubkey2x32);
1517 	dump_hex("pubkey: %s\n", pubkey2x32, 32);
1518 	dump_hex("        %s\n", pubkey2x32 + 32, 32);
1519 
1520 	/* Combine our privkey and peer's public key to generate premaster */
1521 	sp_ecc_secret_gen_256(privkey, /*x,y:*/peerkey2x32, premaster32);
1522 	dump_hex("premaster: %s\n", premaster32, 32);
1523 }
1524