4 * \brief Multi-precision integer library
\r
7 * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
\r
8 * SPDX-License-Identifier: Apache-2.0
\r
10 * Licensed under the Apache License, Version 2.0 (the "License"); you may
\r
11 * not use this file except in compliance with the License.
\r
12 * You may obtain a copy of the License at
\r
14 * http://www.apache.org/licenses/LICENSE-2.0
\r
16 * Unless required by applicable law or agreed to in writing, software
\r
17 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
\r
18 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
19 * See the License for the specific language governing permissions and
\r
20 * limitations under the License.
\r
22 * This file is part of mbed TLS (https://tls.mbed.org)
\r
25 * Multiply source vector [s] with b, add result
\r
26 * to destination vector [d] and set carry c.
\r
28 * Currently supports:
\r
30 * . IA-32 (386+) . AMD64 / EM64T
\r
31 * . IA-32 (SSE2) . Motorola 68000
\r
32 * . PowerPC, 32-bit . MicroBlaze
\r
33 * . PowerPC, 64-bit . TriCore
\r
34 * . SPARC v8 . ARM v3+
\r
36 * . C, longlong . C, generic
\r
38 #ifndef MBEDTLS_BN_MUL_H
\r
39 #define MBEDTLS_BN_MUL_H
\r
41 #if !defined(MBEDTLS_CONFIG_FILE)
\r
44 #include MBEDTLS_CONFIG_FILE
\r
49 #if defined(MBEDTLS_HAVE_ASM)
\r
55 /* armcc5 --gnu defines __GNUC__ but doesn't support GNU's extended asm */
\r
56 #if defined(__GNUC__) && \
\r
57 ( !defined(__ARMCC_VERSION) || __ARMCC_VERSION >= 6000000 )
\r
60 * Disable use of the i386 assembly code below if option -O0, to disable all
\r
61 * compiler optimisations, is passed, detected with __OPTIMIZE__
\r
62 * This is done as the number of registers used in the assembly code doesn't
\r
63 * work with the -O0 option.
\r
65 #if defined(__i386__) && defined(__OPTIMIZE__)
\r
67 #define MULADDC_INIT \
\r
69 "movl %%ebx, %0 \n\t" \
\r
70 "movl %5, %%esi \n\t" \
\r
71 "movl %6, %%edi \n\t" \
\r
72 "movl %7, %%ecx \n\t" \
\r
73 "movl %8, %%ebx \n\t"
\r
75 #define MULADDC_CORE \
\r
78 "addl %%ecx, %%eax \n\t" \
\r
79 "adcl $0, %%edx \n\t" \
\r
80 "addl (%%edi), %%eax \n\t" \
\r
81 "adcl $0, %%edx \n\t" \
\r
82 "movl %%edx, %%ecx \n\t" \
\r
85 #if defined(MBEDTLS_HAVE_SSE2)
\r
87 #define MULADDC_HUIT \
\r
88 "movd %%ecx, %%mm1 \n\t" \
\r
89 "movd %%ebx, %%mm0 \n\t" \
\r
90 "movd (%%edi), %%mm3 \n\t" \
\r
91 "paddq %%mm3, %%mm1 \n\t" \
\r
92 "movd (%%esi), %%mm2 \n\t" \
\r
93 "pmuludq %%mm0, %%mm2 \n\t" \
\r
94 "movd 4(%%esi), %%mm4 \n\t" \
\r
95 "pmuludq %%mm0, %%mm4 \n\t" \
\r
96 "movd 8(%%esi), %%mm6 \n\t" \
\r
97 "pmuludq %%mm0, %%mm6 \n\t" \
\r
98 "movd 12(%%esi), %%mm7 \n\t" \
\r
99 "pmuludq %%mm0, %%mm7 \n\t" \
\r
100 "paddq %%mm2, %%mm1 \n\t" \
\r
101 "movd 4(%%edi), %%mm3 \n\t" \
\r
102 "paddq %%mm4, %%mm3 \n\t" \
\r
103 "movd 8(%%edi), %%mm5 \n\t" \
\r
104 "paddq %%mm6, %%mm5 \n\t" \
\r
105 "movd 12(%%edi), %%mm4 \n\t" \
\r
106 "paddq %%mm4, %%mm7 \n\t" \
\r
107 "movd %%mm1, (%%edi) \n\t" \
\r
108 "movd 16(%%esi), %%mm2 \n\t" \
\r
109 "pmuludq %%mm0, %%mm2 \n\t" \
\r
110 "psrlq $32, %%mm1 \n\t" \
\r
111 "movd 20(%%esi), %%mm4 \n\t" \
\r
112 "pmuludq %%mm0, %%mm4 \n\t" \
\r
113 "paddq %%mm3, %%mm1 \n\t" \
\r
114 "movd 24(%%esi), %%mm6 \n\t" \
\r
115 "pmuludq %%mm0, %%mm6 \n\t" \
\r
116 "movd %%mm1, 4(%%edi) \n\t" \
\r
117 "psrlq $32, %%mm1 \n\t" \
\r
118 "movd 28(%%esi), %%mm3 \n\t" \
\r
119 "pmuludq %%mm0, %%mm3 \n\t" \
\r
120 "paddq %%mm5, %%mm1 \n\t" \
\r
121 "movd 16(%%edi), %%mm5 \n\t" \
\r
122 "paddq %%mm5, %%mm2 \n\t" \
\r
123 "movd %%mm1, 8(%%edi) \n\t" \
\r
124 "psrlq $32, %%mm1 \n\t" \
\r
125 "paddq %%mm7, %%mm1 \n\t" \
\r
126 "movd 20(%%edi), %%mm5 \n\t" \
\r
127 "paddq %%mm5, %%mm4 \n\t" \
\r
128 "movd %%mm1, 12(%%edi) \n\t" \
\r
129 "psrlq $32, %%mm1 \n\t" \
\r
130 "paddq %%mm2, %%mm1 \n\t" \
\r
131 "movd 24(%%edi), %%mm5 \n\t" \
\r
132 "paddq %%mm5, %%mm6 \n\t" \
\r
133 "movd %%mm1, 16(%%edi) \n\t" \
\r
134 "psrlq $32, %%mm1 \n\t" \
\r
135 "paddq %%mm4, %%mm1 \n\t" \
\r
136 "movd 28(%%edi), %%mm5 \n\t" \
\r
137 "paddq %%mm5, %%mm3 \n\t" \
\r
138 "movd %%mm1, 20(%%edi) \n\t" \
\r
139 "psrlq $32, %%mm1 \n\t" \
\r
140 "paddq %%mm6, %%mm1 \n\t" \
\r
141 "movd %%mm1, 24(%%edi) \n\t" \
\r
142 "psrlq $32, %%mm1 \n\t" \
\r
143 "paddq %%mm3, %%mm1 \n\t" \
\r
144 "movd %%mm1, 28(%%edi) \n\t" \
\r
145 "addl $32, %%edi \n\t" \
\r
146 "addl $32, %%esi \n\t" \
\r
147 "psrlq $32, %%mm1 \n\t" \
\r
148 "movd %%mm1, %%ecx \n\t"
\r
150 #define MULADDC_STOP \
\r
152 "movl %4, %%ebx \n\t" \
\r
153 "movl %%ecx, %1 \n\t" \
\r
154 "movl %%edi, %2 \n\t" \
\r
155 "movl %%esi, %3 \n\t" \
\r
156 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
\r
157 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
\r
158 : "eax", "ebx", "ecx", "edx", "esi", "edi" \
\r
163 #define MULADDC_STOP \
\r
164 "movl %4, %%ebx \n\t" \
\r
165 "movl %%ecx, %1 \n\t" \
\r
166 "movl %%edi, %2 \n\t" \
\r
167 "movl %%esi, %3 \n\t" \
\r
168 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
\r
169 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
\r
170 : "eax", "ebx", "ecx", "edx", "esi", "edi" \
\r
175 #if defined(__amd64__) || defined (__x86_64__)
\r
177 #define MULADDC_INIT \
\r
179 "xorq %%r8, %%r8\n"
\r
181 #define MULADDC_CORE \
\r
182 "movq (%%rsi), %%rax\n" \
\r
184 "addq $8, %%rsi\n" \
\r
185 "addq %%rcx, %%rax\n" \
\r
186 "movq %%r8, %%rcx\n" \
\r
187 "adcq $0, %%rdx\n" \
\r
189 "addq %%rax, (%%rdi)\n" \
\r
190 "adcq %%rdx, %%rcx\n" \
\r
193 #define MULADDC_STOP \
\r
194 : "+c" (c), "+D" (d), "+S" (s) \
\r
196 : "rax", "rdx", "r8" \
\r
201 #if defined(__mc68020__) || defined(__mcpu32__)
\r
203 #define MULADDC_INIT \
\r
205 "movl %3, %%a2 \n\t" \
\r
206 "movl %4, %%a3 \n\t" \
\r
207 "movl %5, %%d3 \n\t" \
\r
208 "movl %6, %%d2 \n\t" \
\r
209 "moveq #0, %%d0 \n\t"
\r
211 #define MULADDC_CORE \
\r
212 "movel %%a2@+, %%d1 \n\t" \
\r
213 "mulul %%d2, %%d4:%%d1 \n\t" \
\r
214 "addl %%d3, %%d1 \n\t" \
\r
215 "addxl %%d0, %%d4 \n\t" \
\r
216 "moveq #0, %%d3 \n\t" \
\r
217 "addl %%d1, %%a3@+ \n\t" \
\r
218 "addxl %%d4, %%d3 \n\t"
\r
220 #define MULADDC_STOP \
\r
221 "movl %%d3, %0 \n\t" \
\r
222 "movl %%a3, %1 \n\t" \
\r
223 "movl %%a2, %2 \n\t" \
\r
224 : "=m" (c), "=m" (d), "=m" (s) \
\r
225 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
226 : "d0", "d1", "d2", "d3", "d4", "a2", "a3" \
\r
229 #define MULADDC_HUIT \
\r
230 "movel %%a2@+, %%d1 \n\t" \
\r
231 "mulul %%d2, %%d4:%%d1 \n\t" \
\r
232 "addxl %%d3, %%d1 \n\t" \
\r
233 "addxl %%d0, %%d4 \n\t" \
\r
234 "addl %%d1, %%a3@+ \n\t" \
\r
235 "movel %%a2@+, %%d1 \n\t" \
\r
236 "mulul %%d2, %%d3:%%d1 \n\t" \
\r
237 "addxl %%d4, %%d1 \n\t" \
\r
238 "addxl %%d0, %%d3 \n\t" \
\r
239 "addl %%d1, %%a3@+ \n\t" \
\r
240 "movel %%a2@+, %%d1 \n\t" \
\r
241 "mulul %%d2, %%d4:%%d1 \n\t" \
\r
242 "addxl %%d3, %%d1 \n\t" \
\r
243 "addxl %%d0, %%d4 \n\t" \
\r
244 "addl %%d1, %%a3@+ \n\t" \
\r
245 "movel %%a2@+, %%d1 \n\t" \
\r
246 "mulul %%d2, %%d3:%%d1 \n\t" \
\r
247 "addxl %%d4, %%d1 \n\t" \
\r
248 "addxl %%d0, %%d3 \n\t" \
\r
249 "addl %%d1, %%a3@+ \n\t" \
\r
250 "movel %%a2@+, %%d1 \n\t" \
\r
251 "mulul %%d2, %%d4:%%d1 \n\t" \
\r
252 "addxl %%d3, %%d1 \n\t" \
\r
253 "addxl %%d0, %%d4 \n\t" \
\r
254 "addl %%d1, %%a3@+ \n\t" \
\r
255 "movel %%a2@+, %%d1 \n\t" \
\r
256 "mulul %%d2, %%d3:%%d1 \n\t" \
\r
257 "addxl %%d4, %%d1 \n\t" \
\r
258 "addxl %%d0, %%d3 \n\t" \
\r
259 "addl %%d1, %%a3@+ \n\t" \
\r
260 "movel %%a2@+, %%d1 \n\t" \
\r
261 "mulul %%d2, %%d4:%%d1 \n\t" \
\r
262 "addxl %%d3, %%d1 \n\t" \
\r
263 "addxl %%d0, %%d4 \n\t" \
\r
264 "addl %%d1, %%a3@+ \n\t" \
\r
265 "movel %%a2@+, %%d1 \n\t" \
\r
266 "mulul %%d2, %%d3:%%d1 \n\t" \
\r
267 "addxl %%d4, %%d1 \n\t" \
\r
268 "addxl %%d0, %%d3 \n\t" \
\r
269 "addl %%d1, %%a3@+ \n\t" \
\r
270 "addxl %%d0, %%d3 \n\t"
\r
272 #endif /* MC68000 */
\r
274 #if defined(__powerpc64__) || defined(__ppc64__)
\r
276 #if defined(__MACH__) && defined(__APPLE__)
\r
278 #define MULADDC_INIT \
\r
284 "addi r3, r3, -8 \n\t" \
\r
285 "addi r4, r4, -8 \n\t" \
\r
286 "addic r5, r5, 0 \n\t"
\r
288 #define MULADDC_CORE \
\r
289 "ldu r7, 8(r3) \n\t" \
\r
290 "mulld r8, r7, r6 \n\t" \
\r
291 "mulhdu r9, r7, r6 \n\t" \
\r
292 "adde r8, r8, r5 \n\t" \
\r
293 "ld r7, 8(r4) \n\t" \
\r
294 "addze r5, r9 \n\t" \
\r
295 "addc r8, r8, r7 \n\t" \
\r
296 "stdu r8, 8(r4) \n\t"
\r
298 #define MULADDC_STOP \
\r
299 "addze r5, r5 \n\t" \
\r
300 "addi r4, r4, 8 \n\t" \
\r
301 "addi r3, r3, 8 \n\t" \
\r
302 "std r5, %0 \n\t" \
\r
303 "std r4, %1 \n\t" \
\r
304 "std r3, %2 \n\t" \
\r
305 : "=m" (c), "=m" (d), "=m" (s) \
\r
306 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
307 : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
\r
311 #else /* __MACH__ && __APPLE__ */
\r
313 #define MULADDC_INIT \
\r
315 "ld %%r3, %3 \n\t" \
\r
316 "ld %%r4, %4 \n\t" \
\r
317 "ld %%r5, %5 \n\t" \
\r
318 "ld %%r6, %6 \n\t" \
\r
319 "addi %%r3, %%r3, -8 \n\t" \
\r
320 "addi %%r4, %%r4, -8 \n\t" \
\r
321 "addic %%r5, %%r5, 0 \n\t"
\r
323 #define MULADDC_CORE \
\r
324 "ldu %%r7, 8(%%r3) \n\t" \
\r
325 "mulld %%r8, %%r7, %%r6 \n\t" \
\r
326 "mulhdu %%r9, %%r7, %%r6 \n\t" \
\r
327 "adde %%r8, %%r8, %%r5 \n\t" \
\r
328 "ld %%r7, 8(%%r4) \n\t" \
\r
329 "addze %%r5, %%r9 \n\t" \
\r
330 "addc %%r8, %%r8, %%r7 \n\t" \
\r
331 "stdu %%r8, 8(%%r4) \n\t"
\r
333 #define MULADDC_STOP \
\r
334 "addze %%r5, %%r5 \n\t" \
\r
335 "addi %%r4, %%r4, 8 \n\t" \
\r
336 "addi %%r3, %%r3, 8 \n\t" \
\r
337 "std %%r5, %0 \n\t" \
\r
338 "std %%r4, %1 \n\t" \
\r
339 "std %%r3, %2 \n\t" \
\r
340 : "=m" (c), "=m" (d), "=m" (s) \
\r
341 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
342 : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
\r
345 #endif /* __MACH__ && __APPLE__ */
\r
347 #elif defined(__powerpc__) || defined(__ppc__) /* end PPC64/begin PPC32 */
\r
349 #if defined(__MACH__) && defined(__APPLE__)
\r
351 #define MULADDC_INIT \
\r
353 "lwz r3, %3 \n\t" \
\r
354 "lwz r4, %4 \n\t" \
\r
355 "lwz r5, %5 \n\t" \
\r
356 "lwz r6, %6 \n\t" \
\r
357 "addi r3, r3, -4 \n\t" \
\r
358 "addi r4, r4, -4 \n\t" \
\r
359 "addic r5, r5, 0 \n\t"
\r
361 #define MULADDC_CORE \
\r
362 "lwzu r7, 4(r3) \n\t" \
\r
363 "mullw r8, r7, r6 \n\t" \
\r
364 "mulhwu r9, r7, r6 \n\t" \
\r
365 "adde r8, r8, r5 \n\t" \
\r
366 "lwz r7, 4(r4) \n\t" \
\r
367 "addze r5, r9 \n\t" \
\r
368 "addc r8, r8, r7 \n\t" \
\r
369 "stwu r8, 4(r4) \n\t"
\r
371 #define MULADDC_STOP \
\r
372 "addze r5, r5 \n\t" \
\r
373 "addi r4, r4, 4 \n\t" \
\r
374 "addi r3, r3, 4 \n\t" \
\r
375 "stw r5, %0 \n\t" \
\r
376 "stw r4, %1 \n\t" \
\r
377 "stw r3, %2 \n\t" \
\r
378 : "=m" (c), "=m" (d), "=m" (s) \
\r
379 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
380 : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
\r
383 #else /* __MACH__ && __APPLE__ */
\r
385 #define MULADDC_INIT \
\r
387 "lwz %%r3, %3 \n\t" \
\r
388 "lwz %%r4, %4 \n\t" \
\r
389 "lwz %%r5, %5 \n\t" \
\r
390 "lwz %%r6, %6 \n\t" \
\r
391 "addi %%r3, %%r3, -4 \n\t" \
\r
392 "addi %%r4, %%r4, -4 \n\t" \
\r
393 "addic %%r5, %%r5, 0 \n\t"
\r
395 #define MULADDC_CORE \
\r
396 "lwzu %%r7, 4(%%r3) \n\t" \
\r
397 "mullw %%r8, %%r7, %%r6 \n\t" \
\r
398 "mulhwu %%r9, %%r7, %%r6 \n\t" \
\r
399 "adde %%r8, %%r8, %%r5 \n\t" \
\r
400 "lwz %%r7, 4(%%r4) \n\t" \
\r
401 "addze %%r5, %%r9 \n\t" \
\r
402 "addc %%r8, %%r8, %%r7 \n\t" \
\r
403 "stwu %%r8, 4(%%r4) \n\t"
\r
405 #define MULADDC_STOP \
\r
406 "addze %%r5, %%r5 \n\t" \
\r
407 "addi %%r4, %%r4, 4 \n\t" \
\r
408 "addi %%r3, %%r3, 4 \n\t" \
\r
409 "stw %%r5, %0 \n\t" \
\r
410 "stw %%r4, %1 \n\t" \
\r
411 "stw %%r3, %2 \n\t" \
\r
412 : "=m" (c), "=m" (d), "=m" (s) \
\r
413 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
414 : "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
\r
417 #endif /* __MACH__ && __APPLE__ */
\r
422 * The Sparc(64) assembly is reported to be broken.
\r
423 * Disable it for now, until we're able to fix it.
\r
425 #if 0 && defined(__sparc__)
\r
426 #if defined(__sparc64__)
\r
428 #define MULADDC_INIT \
\r
430 "ldx %3, %%o0 \n\t" \
\r
431 "ldx %4, %%o1 \n\t" \
\r
432 "ld %5, %%o2 \n\t" \
\r
435 #define MULADDC_CORE \
\r
436 "ld [%%o0], %%o4 \n\t" \
\r
437 "inc 4, %%o0 \n\t" \
\r
438 "ld [%%o1], %%o5 \n\t" \
\r
439 "umul %%o3, %%o4, %%o4 \n\t" \
\r
440 "addcc %%o4, %%o2, %%o4 \n\t" \
\r
441 "rd %%y, %%g1 \n\t" \
\r
442 "addx %%g1, 0, %%g1 \n\t" \
\r
443 "addcc %%o4, %%o5, %%o4 \n\t" \
\r
444 "st %%o4, [%%o1] \n\t" \
\r
445 "addx %%g1, 0, %%o2 \n\t" \
\r
448 #define MULADDC_STOP \
\r
449 "st %%o2, %0 \n\t" \
\r
450 "stx %%o1, %1 \n\t" \
\r
451 "stx %%o0, %2 \n\t" \
\r
452 : "=m" (c), "=m" (d), "=m" (s) \
\r
453 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
454 : "g1", "o0", "o1", "o2", "o3", "o4", \
\r
458 #else /* __sparc64__ */
\r
460 #define MULADDC_INIT \
\r
462 "ld %3, %%o0 \n\t" \
\r
463 "ld %4, %%o1 \n\t" \
\r
464 "ld %5, %%o2 \n\t" \
\r
467 #define MULADDC_CORE \
\r
468 "ld [%%o0], %%o4 \n\t" \
\r
469 "inc 4, %%o0 \n\t" \
\r
470 "ld [%%o1], %%o5 \n\t" \
\r
471 "umul %%o3, %%o4, %%o4 \n\t" \
\r
472 "addcc %%o4, %%o2, %%o4 \n\t" \
\r
473 "rd %%y, %%g1 \n\t" \
\r
474 "addx %%g1, 0, %%g1 \n\t" \
\r
475 "addcc %%o4, %%o5, %%o4 \n\t" \
\r
476 "st %%o4, [%%o1] \n\t" \
\r
477 "addx %%g1, 0, %%o2 \n\t" \
\r
480 #define MULADDC_STOP \
\r
481 "st %%o2, %0 \n\t" \
\r
482 "st %%o1, %1 \n\t" \
\r
483 "st %%o0, %2 \n\t" \
\r
484 : "=m" (c), "=m" (d), "=m" (s) \
\r
485 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
486 : "g1", "o0", "o1", "o2", "o3", "o4", \
\r
490 #endif /* __sparc64__ */
\r
491 #endif /* __sparc__ */
\r
493 #if defined(__microblaze__) || defined(microblaze)
\r
495 #define MULADDC_INIT \
\r
497 "lwi r3, %3 \n\t" \
\r
498 "lwi r4, %4 \n\t" \
\r
499 "lwi r5, %5 \n\t" \
\r
500 "lwi r6, %6 \n\t" \
\r
501 "andi r7, r6, 0xffff \n\t" \
\r
502 "bsrli r6, r6, 16 \n\t"
\r
504 #define MULADDC_CORE \
\r
505 "lhui r8, r3, 0 \n\t" \
\r
506 "addi r3, r3, 2 \n\t" \
\r
507 "lhui r9, r3, 0 \n\t" \
\r
508 "addi r3, r3, 2 \n\t" \
\r
509 "mul r10, r9, r6 \n\t" \
\r
510 "mul r11, r8, r7 \n\t" \
\r
511 "mul r12, r9, r7 \n\t" \
\r
512 "mul r13, r8, r6 \n\t" \
\r
513 "bsrli r8, r10, 16 \n\t" \
\r
514 "bsrli r9, r11, 16 \n\t" \
\r
515 "add r13, r13, r8 \n\t" \
\r
516 "add r13, r13, r9 \n\t" \
\r
517 "bslli r10, r10, 16 \n\t" \
\r
518 "bslli r11, r11, 16 \n\t" \
\r
519 "add r12, r12, r10 \n\t" \
\r
520 "addc r13, r13, r0 \n\t" \
\r
521 "add r12, r12, r11 \n\t" \
\r
522 "addc r13, r13, r0 \n\t" \
\r
523 "lwi r10, r4, 0 \n\t" \
\r
524 "add r12, r12, r10 \n\t" \
\r
525 "addc r13, r13, r0 \n\t" \
\r
526 "add r12, r12, r5 \n\t" \
\r
527 "addc r5, r13, r0 \n\t" \
\r
528 "swi r12, r4, 0 \n\t" \
\r
529 "addi r4, r4, 4 \n\t"
\r
531 #define MULADDC_STOP \
\r
532 "swi r5, %0 \n\t" \
\r
533 "swi r4, %1 \n\t" \
\r
534 "swi r3, %2 \n\t" \
\r
535 : "=m" (c), "=m" (d), "=m" (s) \
\r
536 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
537 : "r3", "r4", "r5", "r6", "r7", "r8", \
\r
538 "r9", "r10", "r11", "r12", "r13" \
\r
541 #endif /* MicroBlaze */
\r
543 #if defined(__tricore__)
\r
545 #define MULADDC_INIT \
\r
547 "ld.a %%a2, %3 \n\t" \
\r
548 "ld.a %%a3, %4 \n\t" \
\r
549 "ld.w %%d4, %5 \n\t" \
\r
550 "ld.w %%d1, %6 \n\t" \
\r
551 "xor %%d5, %%d5 \n\t"
\r
553 #define MULADDC_CORE \
\r
554 "ld.w %%d0, [%%a2+] \n\t" \
\r
555 "madd.u %%e2, %%e4, %%d0, %%d1 \n\t" \
\r
556 "ld.w %%d0, [%%a3] \n\t" \
\r
557 "addx %%d2, %%d2, %%d0 \n\t" \
\r
558 "addc %%d3, %%d3, 0 \n\t" \
\r
559 "mov %%d4, %%d3 \n\t" \
\r
560 "st.w [%%a3+], %%d2 \n\t"
\r
562 #define MULADDC_STOP \
\r
563 "st.w %0, %%d4 \n\t" \
\r
564 "st.a %1, %%a3 \n\t" \
\r
565 "st.a %2, %%a2 \n\t" \
\r
566 : "=m" (c), "=m" (d), "=m" (s) \
\r
567 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
568 : "d0", "d1", "e2", "d4", "a2", "a3" \
\r
571 #endif /* TriCore */
\r
574 * Note, gcc -O0 by default uses r7 for the frame pointer, so it complains about
\r
575 * our use of r7 below, unless -fomit-frame-pointer is passed.
\r
577 * On the other hand, -fomit-frame-pointer is implied by any -Ox options with
\r
578 * x !=0, which we can detect using __OPTIMIZE__ (which is also defined by
\r
579 * clang and armcc5 under the same conditions).
\r
581 * So, only use the optimized assembly below for optimized build, which avoids
\r
582 * the build error and is pretty reasonable anyway.
\r
584 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
\r
585 #define MULADDC_CANNOT_USE_R7
\r
588 #if defined(__arm__) && !defined(MULADDC_CANNOT_USE_R7)
\r
590 #if defined(__thumb__) && !defined(__thumb2__)
\r
592 #define MULADDC_INIT \
\r
594 "ldr r0, %3 \n\t" \
\r
595 "ldr r1, %4 \n\t" \
\r
596 "ldr r2, %5 \n\t" \
\r
597 "ldr r3, %6 \n\t" \
\r
598 "lsr r7, r3, #16 \n\t" \
\r
599 "mov r9, r7 \n\t" \
\r
600 "lsl r7, r3, #16 \n\t" \
\r
601 "lsr r7, r7, #16 \n\t" \
\r
604 #define MULADDC_CORE \
\r
605 "ldmia r0!, {r6} \n\t" \
\r
606 "lsr r7, r6, #16 \n\t" \
\r
607 "lsl r6, r6, #16 \n\t" \
\r
608 "lsr r6, r6, #16 \n\t" \
\r
609 "mov r4, r8 \n\t" \
\r
610 "mul r4, r6 \n\t" \
\r
611 "mov r3, r9 \n\t" \
\r
612 "mul r6, r3 \n\t" \
\r
613 "mov r5, r9 \n\t" \
\r
614 "mul r5, r7 \n\t" \
\r
615 "mov r3, r8 \n\t" \
\r
616 "mul r7, r3 \n\t" \
\r
617 "lsr r3, r6, #16 \n\t" \
\r
618 "add r5, r5, r3 \n\t" \
\r
619 "lsr r3, r7, #16 \n\t" \
\r
620 "add r5, r5, r3 \n\t" \
\r
621 "add r4, r4, r2 \n\t" \
\r
622 "mov r2, #0 \n\t" \
\r
623 "adc r5, r2 \n\t" \
\r
624 "lsl r3, r6, #16 \n\t" \
\r
625 "add r4, r4, r3 \n\t" \
\r
626 "adc r5, r2 \n\t" \
\r
627 "lsl r3, r7, #16 \n\t" \
\r
628 "add r4, r4, r3 \n\t" \
\r
629 "adc r5, r2 \n\t" \
\r
630 "ldr r3, [r1] \n\t" \
\r
631 "add r4, r4, r3 \n\t" \
\r
632 "adc r2, r5 \n\t" \
\r
633 "stmia r1!, {r4} \n\t"
\r
635 #define MULADDC_STOP \
\r
636 "str r2, %0 \n\t" \
\r
637 "str r1, %1 \n\t" \
\r
638 "str r0, %2 \n\t" \
\r
639 : "=m" (c), "=m" (d), "=m" (s) \
\r
640 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
641 : "r0", "r1", "r2", "r3", "r4", "r5", \
\r
642 "r6", "r7", "r8", "r9", "cc" \
\r
645 #elif defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)
\r
647 #define MULADDC_INIT \
\r
650 #define MULADDC_CORE \
\r
651 "ldr r0, [%0], #4 \n\t" \
\r
652 "ldr r1, [%1] \n\t" \
\r
653 "umaal r1, %2, %3, r0 \n\t" \
\r
654 "str r1, [%1], #4 \n\t"
\r
656 #define MULADDC_STOP \
\r
657 : "=r" (s), "=r" (d), "=r" (c) \
\r
658 : "r" (b), "0" (s), "1" (d), "2" (c) \
\r
659 : "r0", "r1", "memory" \
\r
664 #define MULADDC_INIT \
\r
666 "ldr r0, %3 \n\t" \
\r
667 "ldr r1, %4 \n\t" \
\r
668 "ldr r2, %5 \n\t" \
\r
671 #define MULADDC_CORE \
\r
672 "ldr r4, [r0], #4 \n\t" \
\r
673 "mov r5, #0 \n\t" \
\r
674 "ldr r6, [r1] \n\t" \
\r
675 "umlal r2, r5, r3, r4 \n\t" \
\r
676 "adds r7, r6, r2 \n\t" \
\r
677 "adc r2, r5, #0 \n\t" \
\r
678 "str r7, [r1], #4 \n\t"
\r
680 #define MULADDC_STOP \
\r
681 "str r2, %0 \n\t" \
\r
682 "str r1, %1 \n\t" \
\r
683 "str r0, %2 \n\t" \
\r
684 : "=m" (c), "=m" (d), "=m" (s) \
\r
685 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
686 : "r0", "r1", "r2", "r3", "r4", "r5", \
\r
694 #if defined(__alpha__)
\r
696 #define MULADDC_INIT \
\r
698 "ldq $1, %3 \n\t" \
\r
699 "ldq $2, %4 \n\t" \
\r
700 "ldq $3, %5 \n\t" \
\r
703 #define MULADDC_CORE \
\r
704 "ldq $6, 0($1) \n\t" \
\r
705 "addq $1, 8, $1 \n\t" \
\r
706 "mulq $6, $4, $7 \n\t" \
\r
707 "umulh $6, $4, $6 \n\t" \
\r
708 "addq $7, $3, $7 \n\t" \
\r
709 "cmpult $7, $3, $3 \n\t" \
\r
710 "ldq $5, 0($2) \n\t" \
\r
711 "addq $7, $5, $7 \n\t" \
\r
712 "cmpult $7, $5, $5 \n\t" \
\r
713 "stq $7, 0($2) \n\t" \
\r
714 "addq $2, 8, $2 \n\t" \
\r
715 "addq $6, $3, $3 \n\t" \
\r
716 "addq $5, $3, $3 \n\t"
\r
718 #define MULADDC_STOP \
\r
719 "stq $3, %0 \n\t" \
\r
720 "stq $2, %1 \n\t" \
\r
721 "stq $1, %2 \n\t" \
\r
722 : "=m" (c), "=m" (d), "=m" (s) \
\r
723 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
724 : "$1", "$2", "$3", "$4", "$5", "$6", "$7" \
\r
728 #if defined(__mips__) && !defined(__mips64)
\r
730 #define MULADDC_INIT \
\r
732 "lw $10, %3 \n\t" \
\r
733 "lw $11, %4 \n\t" \
\r
734 "lw $12, %5 \n\t" \
\r
737 #define MULADDC_CORE \
\r
738 "lw $14, 0($10) \n\t" \
\r
739 "multu $13, $14 \n\t" \
\r
740 "addi $10, $10, 4 \n\t" \
\r
743 "addu $14, $12, $14 \n\t" \
\r
744 "lw $15, 0($11) \n\t" \
\r
745 "sltu $12, $14, $12 \n\t" \
\r
746 "addu $15, $14, $15 \n\t" \
\r
747 "sltu $14, $15, $14 \n\t" \
\r
748 "addu $12, $12, $9 \n\t" \
\r
749 "sw $15, 0($11) \n\t" \
\r
750 "addu $12, $12, $14 \n\t" \
\r
751 "addi $11, $11, 4 \n\t"
\r
753 #define MULADDC_STOP \
\r
754 "sw $12, %0 \n\t" \
\r
755 "sw $11, %1 \n\t" \
\r
756 "sw $10, %2 \n\t" \
\r
757 : "=m" (c), "=m" (d), "=m" (s) \
\r
758 : "m" (s), "m" (d), "m" (c), "m" (b) \
\r
759 : "$9", "$10", "$11", "$12", "$13", "$14", "$15", "lo", "hi" \
\r
765 #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
\r
767 #define MULADDC_INIT \
\r
773 #define MULADDC_CORE \
\r
776 __asm add eax, ecx \
\r
778 __asm add eax, [edi] \
\r
780 __asm mov ecx, edx \
\r
783 #if defined(MBEDTLS_HAVE_SSE2)
\r
785 #define EMIT __asm _emit
\r
787 #define MULADDC_HUIT \
\r
788 EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
\r
789 EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
\r
790 EMIT 0x0F EMIT 0x6E EMIT 0x1F \
\r
791 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
\r
792 EMIT 0x0F EMIT 0x6E EMIT 0x16 \
\r
793 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
\r
794 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
\r
795 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
\r
796 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
\r
797 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
\r
798 EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
\r
799 EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
\r
800 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
\r
801 EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
\r
802 EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
\r
803 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
\r
804 EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
\r
805 EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
\r
806 EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
\r
807 EMIT 0x0F EMIT 0x7E EMIT 0x0F \
\r
808 EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
\r
809 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
\r
810 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
811 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
\r
812 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
\r
813 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
\r
814 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
\r
815 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
\r
816 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
\r
817 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
818 EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
\r
819 EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
\r
820 EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
\r
821 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
\r
822 EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
\r
823 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
\r
824 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
825 EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
\r
826 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
\r
827 EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
\r
828 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
\r
829 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
830 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
\r
831 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
\r
832 EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
\r
833 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
\r
834 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
835 EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
\r
836 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
\r
837 EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
\r
838 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
\r
839 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
840 EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
\r
841 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
\r
842 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
843 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
\r
844 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
\r
845 EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
\r
846 EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
\r
847 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
\r
848 EMIT 0x0F EMIT 0x7E EMIT 0xC9
\r
850 #define MULADDC_STOP \
\r
851 EMIT 0x0F EMIT 0x77 \
\r
858 #define MULADDC_STOP \
\r
866 #endif /* MBEDTLS_HAVE_ASM */
\r
868 #if !defined(MULADDC_CORE)
\r
869 #if defined(MBEDTLS_HAVE_UDBL)
\r
871 #define MULADDC_INIT \
\r
873 mbedtls_t_udbl r; \
\r
874 mbedtls_mpi_uint r0, r1;
\r
876 #define MULADDC_CORE \
\r
877 r = *(s++) * (mbedtls_t_udbl) b; \
\r
878 r0 = (mbedtls_mpi_uint) r; \
\r
879 r1 = (mbedtls_mpi_uint)( r >> biL ); \
\r
880 r0 += c; r1 += (r0 < c); \
\r
881 r0 += *d; r1 += (r0 < *d); \
\r
882 c = r1; *(d++) = r0;
\r
884 #define MULADDC_STOP \
\r
888 #define MULADDC_INIT \
\r
890 mbedtls_mpi_uint s0, s1, b0, b1; \
\r
891 mbedtls_mpi_uint r0, r1, rx, ry; \
\r
892 b0 = ( b << biH ) >> biH; \
\r
895 #define MULADDC_CORE \
\r
896 s0 = ( *s << biH ) >> biH; \
\r
897 s1 = ( *s >> biH ); s++; \
\r
898 rx = s0 * b1; r0 = s0 * b0; \
\r
899 ry = s1 * b0; r1 = s1 * b1; \
\r
900 r1 += ( rx >> biH ); \
\r
901 r1 += ( ry >> biH ); \
\r
902 rx <<= biH; ry <<= biH; \
\r
903 r0 += rx; r1 += (r0 < rx); \
\r
904 r0 += ry; r1 += (r0 < ry); \
\r
905 r0 += c; r1 += (r0 < c); \
\r
906 r0 += *d; r1 += (r0 < *d); \
\r
907 c = r1; *(d++) = r0;
\r
909 #define MULADDC_STOP \
\r
912 #endif /* C (generic) */
\r
913 #endif /* C (longlong) */
\r
915 #endif /* bn_mul.h */
\r