/home/dko/projects/mobilec/tags/MobileC-v1.10.2/MobileC-v1.10.2/src/security/xyssl-0.9/include/xyssl/bn_mul.h

Go to the documentation of this file.
00001 
00004 /*
00005  *      Multiply source vector [s] with b, add result
00006  *       to destination vector [d] and set carry c.
00007  *
00008  *      Currently supports:
00009  *
00010  *         . IA-32 (386+)         . AMD64 / EM64T
00011  *         . IA-32 (SSE2)         . Motorola 68000
00012  *         . PowerPC, 32-bit      . MicroBlaze
00013  *         . PowerPC, 64-bit      . TriCore
00014  *         . SPARC v8             . ARM v3+
00015  *         . Alpha                . MIPS32
00016  *         . C, longlong          . C, generic
00017  */
00018 #ifndef XYSSL_BN_MUL_H
00019 #define XYSSL_BN_MUL_H
00020 
00021 #include "xyssl/config.h"
00022 
00023 #if defined(XYSSL_HAVE_ASM)
00024 
00025 #if defined(__GNUC__)
00026 #if defined(__i386__)
00027 
00028 #define MULADDC_INIT                            \
00029     asm( "movl   %%ebx, %0      " : "=m" (t));  \
00030     asm( "movl   %0, %%esi      " :: "m" (s));  \
00031     asm( "movl   %0, %%edi      " :: "m" (d));  \
00032     asm( "movl   %0, %%ecx      " :: "m" (c));  \
00033     asm( "movl   %0, %%ebx      " :: "m" (b));
00034 
00035 #define MULADDC_CORE                            \
00036     asm( "lodsl                 " );            \
00037     asm( "mull   %ebx           " );            \
00038     asm( "addl   %ecx,   %eax   " );            \
00039     asm( "adcl   $0,     %edx   " );            \
00040     asm( "addl   (%edi), %eax   " );            \
00041     asm( "adcl   $0,     %edx   " );            \
00042     asm( "movl   %edx,   %ecx   " );            \
00043     asm( "stosl                 " );
00044 
00045 #define MULADDC_STOP                            \
00046     asm( "movl   %0, %%ebx      " :: "m" (t));  \
00047     asm( "movl   %%ecx, %0      " : "=m" (c));  \
00048     asm( "movl   %%edi, %0      " : "=m" (d));  \
00049     asm( "movl   %%esi, %0      " : "=m" (s) :: \
00050     "eax", "ecx", "edx", "esi", "edi" );
00051 
00052 #if defined(XYSSL_HAVE_SSE2)
00053 
00054 #define MULADDC_HUIT                            \
00055     asm( "movd     %ecx,     %mm1     " );      \
00056     asm( "movd     %ebx,     %mm0     " );      \
00057     asm( "movd     (%edi),   %mm3     " );      \
00058     asm( "paddq    %mm3,     %mm1     " );      \
00059     asm( "movd     (%esi),   %mm2     " );      \
00060     asm( "pmuludq  %mm0,     %mm2     " );      \
00061     asm( "movd     4(%esi),  %mm4     " );      \
00062     asm( "pmuludq  %mm0,     %mm4     " );      \
00063     asm( "movd     8(%esi),  %mm6     " );      \
00064     asm( "pmuludq  %mm0,     %mm6     " );      \
00065     asm( "movd     12(%esi), %mm7     " );      \
00066     asm( "pmuludq  %mm0,     %mm7     " );      \
00067     asm( "paddq    %mm2,     %mm1     " );      \
00068     asm( "movd     4(%edi),  %mm3     " );      \
00069     asm( "paddq    %mm4,     %mm3     " );      \
00070     asm( "movd     8(%edi),  %mm5     " );      \
00071     asm( "paddq    %mm6,     %mm5     " );      \
00072     asm( "movd     12(%edi), %mm4     " );      \
00073     asm( "paddq    %mm4,     %mm7     " );      \
00074     asm( "movd     %mm1,     (%edi)   " );      \
00075     asm( "movd     16(%esi), %mm2     " );      \
00076     asm( "pmuludq  %mm0,     %mm2     " );      \
00077     asm( "psrlq    $32,      %mm1     " );      \
00078     asm( "movd     20(%esi), %mm4     " );      \
00079     asm( "pmuludq  %mm0,     %mm4     " );      \
00080     asm( "paddq    %mm3,     %mm1     " );      \
00081     asm( "movd     24(%esi), %mm6     " );      \
00082     asm( "pmuludq  %mm0,     %mm6     " );      \
00083     asm( "movd     %mm1,     4(%edi)  " );      \
00084     asm( "psrlq    $32,      %mm1     " );      \
00085     asm( "movd     28(%esi), %mm3     " );      \
00086     asm( "pmuludq  %mm0,     %mm3     " );      \
00087     asm( "paddq    %mm5,     %mm1     " );      \
00088     asm( "movd     16(%edi), %mm5     " );      \
00089     asm( "paddq    %mm5,     %mm2     " );      \
00090     asm( "movd     %mm1,     8(%edi)  " );      \
00091     asm( "psrlq    $32,      %mm1     " );      \
00092     asm( "paddq    %mm7,     %mm1     " );      \
00093     asm( "movd     20(%edi), %mm5     " );      \
00094     asm( "paddq    %mm5,     %mm4     " );      \
00095     asm( "movd     %mm1,     12(%edi) " );      \
00096     asm( "psrlq    $32,      %mm1     " );      \
00097     asm( "paddq    %mm2,     %mm1     " );      \
00098     asm( "movd     24(%edi), %mm5     " );      \
00099     asm( "paddq    %mm5,     %mm6     " );      \
00100     asm( "movd     %mm1,     16(%edi) " );      \
00101     asm( "psrlq    $32,      %mm1     " );      \
00102     asm( "paddq    %mm4,     %mm1     " );      \
00103     asm( "movd     28(%edi), %mm5     " );      \
00104     asm( "paddq    %mm5,     %mm3     " );      \
00105     asm( "movd     %mm1,     20(%edi) " );      \
00106     asm( "psrlq    $32,      %mm1     " );      \
00107     asm( "paddq    %mm6,     %mm1     " );      \
00108     asm( "movd     %mm1,     24(%edi) " );      \
00109     asm( "psrlq    $32,      %mm1     " );      \
00110     asm( "paddq    %mm3,     %mm1     " );      \
00111     asm( "movd     %mm1,     28(%edi) " );      \
00112     asm( "addl     $32,      %edi     " );      \
00113     asm( "addl     $32,      %esi     " );      \
00114     asm( "psrlq    $32,      %mm1     " );      \
00115     asm( "movd     %mm1,     %ecx     " );
00116 
00117 #endif /* SSE2 */
00118 #endif /* i386 */
00119 
00120 #if defined(__amd64__) || defined (__x86_64__)
00121 
00122 #define MULADDC_INIT                            \
00123     asm( "movq   %0, %%rsi      " :: "m" (s));  \
00124     asm( "movq   %0, %%rdi      " :: "m" (d));  \
00125     asm( "movq   %0, %%rcx      " :: "m" (c));  \
00126     asm( "movq   %0, %%rbx      " :: "m" (b));  \
00127     asm( "xorq   %r8, %r8       " );
00128 
00129 #define MULADDC_CORE                            \
00130     asm( "movq  (%rsi),%rax     " );            \
00131     asm( "mulq   %rbx           " );            \
00132     asm( "addq   $8,   %rsi     " );            \
00133     asm( "addq   %rcx, %rax     " );            \
00134     asm( "movq   %r8,  %rcx     " );            \
00135     asm( "adcq   $0,   %rdx     " );            \
00136     asm( "nop                   " );            \
00137     asm( "addq   %rax, (%rdi)   " );            \
00138     asm( "adcq   %rdx, %rcx     " );            \
00139     asm( "addq   $8,   %rdi     " );
00140 
00141 #define MULADDC_STOP                            \
00142     asm( "movq   %%rcx, %0      " : "=m" (c));  \
00143     asm( "movq   %%rdi, %0      " : "=m" (d));  \
00144     asm( "movq   %%rsi, %0      " : "=m" (s) :: \
00145     "rax", "rcx", "rdx", "rbx", "rsi", "rdi", "r8" );
00146 
00147 #endif /* AMD64 */
00148 
00149 #if defined(__mc68020__) || defined(__mcpu32__)
00150 
00151 #define MULADDC_INIT                            \
00152     asm( "movl   %0, %%a2       " :: "m" (s));  \
00153     asm( "movl   %0, %%a3       " :: "m" (d));  \
00154     asm( "movl   %0, %%d3       " :: "m" (c));  \
00155     asm( "movl   %0, %%d2       " :: "m" (b));  \
00156     asm( "moveq  #0, %d0        " );
00157 
00158 #define MULADDC_CORE                            \
00159     asm( "movel  %a2@+, %d1     " );            \
00160     asm( "mulul  %d2, %d4:%d1   " );            \
00161     asm( "addl   %d3, %d1       " );            \
00162     asm( "addxl  %d0, %d4       " );            \
00163     asm( "moveq  #0,  %d3       " );            \
00164     asm( "addl   %d1, %a3@+     " );            \
00165     asm( "addxl  %d4, %d3       " );
00166 
00167 #define MULADDC_STOP                            \
00168     asm( "movl   %%d3, %0       " : "=m" (c));  \
00169     asm( "movl   %%a3, %0       " : "=m" (d));  \
00170     asm( "movl   %%a2, %0       " : "=m" (s) :: \
00171     "d0", "d1", "d2", "d3", "d4", "a2", "a3" );
00172 
00173 #define MULADDC_HUIT                            \
00174     asm( "movel  %a2@+, %d1     " );            \
00175     asm( "mulul  %d2, %d4:%d1   " );            \
00176     asm( "addxl  %d3, %d1       " );            \
00177     asm( "addxl  %d0, %d4       " );            \
00178     asm( "addl   %d1, %a3@+     " );            \
00179     asm( "movel  %a2@+, %d1     " );            \
00180     asm( "mulul  %d2, %d3:%d1   " );            \
00181     asm( "addxl  %d4, %d1       " );            \
00182     asm( "addxl  %d0, %d3       " );            \
00183     asm( "addl   %d1, %a3@+     " );            \
00184     asm( "movel  %a2@+, %d1     " );            \
00185     asm( "mulul  %d2, %d4:%d1   " );            \
00186     asm( "addxl  %d3, %d1       " );            \
00187     asm( "addxl  %d0, %d4       " );            \
00188     asm( "addl   %d1, %a3@+     " );            \
00189     asm( "movel  %a2@+, %d1     " );            \
00190     asm( "mulul  %d2, %d3:%d1   " );            \
00191     asm( "addxl  %d4, %d1       " );            \
00192     asm( "addxl  %d0, %d3       " );            \
00193     asm( "addl   %d1, %a3@+     " );            \
00194     asm( "movel  %a2@+, %d1     " );            \
00195     asm( "mulul  %d2, %d4:%d1   " );            \
00196     asm( "addxl  %d3, %d1       " );            \
00197     asm( "addxl  %d0, %d4       " );            \
00198     asm( "addl   %d1, %a3@+     " );            \
00199     asm( "movel  %a2@+, %d1     " );            \
00200     asm( "mulul  %d2, %d3:%d1   " );            \
00201     asm( "addxl  %d4, %d1       " );            \
00202     asm( "addxl  %d0, %d3       " );            \
00203     asm( "addl   %d1, %a3@+     " );            \
00204     asm( "movel  %a2@+, %d1     " );            \
00205     asm( "mulul  %d2, %d4:%d1   " );            \
00206     asm( "addxl  %d3, %d1       " );            \
00207     asm( "addxl  %d0, %d4       " );            \
00208     asm( "addl   %d1, %a3@+     " );            \
00209     asm( "movel  %a2@+, %d1     " );            \
00210     asm( "mulul  %d2, %d3:%d1   " );            \
00211     asm( "addxl  %d4, %d1       " );            \
00212     asm( "addxl  %d0, %d3       " );            \
00213     asm( "addl   %d1, %a3@+     " );            \
00214     asm( "addxl  %d0, %d3       " );
00215 
00216 #endif /* MC68000 */
00217 
00218 #if defined(__powerpc__)   || defined(__ppc__)
00219 #if defined(__powerpc64__) || defined(__ppc64__)
00220 
00221 #if defined(__MACH__) && defined(__APPLE__)
00222 
00223 #define MULADDC_INIT                            \
00224     asm( "ld     r3, %0         " :: "m" (s));  \
00225     asm( "ld     r4, %0         " :: "m" (d));  \
00226     asm( "ld     r5, %0         " :: "m" (c));  \
00227     asm( "ld     r6, %0         " :: "m" (b));  \
00228     asm( "addi   r3, r3, -8     " );            \
00229     asm( "addi   r4, r4, -8     " );            \
00230     asm( "addic  r5, r5,  0     " );
00231 
00232 #define MULADDC_CORE                            \
00233     asm( "ldu    r7, 8(r3)      " );            \
00234     asm( "mulld  r8, r7, r6     " );            \
00235     asm( "mulhdu r9, r7, r6     " );            \
00236     asm( "adde   r8, r8, r5     " );            \
00237     asm( "ld     r7, 8(r4)      " );            \
00238     asm( "addze  r5, r9         " );            \
00239     asm( "addc   r8, r8, r7     " );            \
00240     asm( "stdu   r8, 8(r4)      " );
00241 
00242 #define MULADDC_STOP                            \
00243     asm( "addze  r5, r5         " );            \
00244     asm( "addi   r4, r4, 8      " );            \
00245     asm( "addi   r3, r3, 8      " );            \
00246     asm( "std    r5, %0         " : "=m" (c));  \
00247     asm( "std    r4, %0         " : "=m" (d));  \
00248     asm( "std    r3, %0         " : "=m" (s) :: \
00249     "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00250 
00251 #else
00252 
00253 #define MULADDC_INIT                            \
00254     asm( "ld     %%r3, %0       " :: "m" (s));  \
00255     asm( "ld     %%r4, %0       " :: "m" (d));  \
00256     asm( "ld     %%r5, %0       " :: "m" (c));  \
00257     asm( "ld     %%r6, %0       " :: "m" (b));  \
00258     asm( "addi   %r3, %r3, -8   " );            \
00259     asm( "addi   %r4, %r4, -8   " );            \
00260     asm( "addic  %r5, %r5,  0   " );
00261 
00262 #define MULADDC_CORE                            \
00263     asm( "ldu    %r7, 8(%r3)    " );            \
00264     asm( "mulld  %r8, %r7, %r6  " );            \
00265     asm( "mulhdu %r9, %r7, %r6  " );            \
00266     asm( "adde   %r8, %r8, %r5  " );            \
00267     asm( "ld     %r7, 8(%r4)    " );            \
00268     asm( "addze  %r5, %r9       " );            \
00269     asm( "addc   %r8, %r8, %r7  " );            \
00270     asm( "stdu   %r8, 8(%r4)    " );
00271 
00272 #define MULADDC_STOP                            \
00273     asm( "addze  %r5, %r5       " );            \
00274     asm( "addi   %r4, %r4, 8    " );            \
00275     asm( "addi   %r3, %r3, 8    " );            \
00276     asm( "std    %%r5, %0       " : "=m" (c));  \
00277     asm( "std    %%r4, %0       " : "=m" (d));  \
00278     asm( "std    %%r3, %0       " : "=m" (s) :: \
00279     "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00280 
00281 #endif
00282 
00283 #else /* PPC32 */
00284 
00285 #if defined(__MACH__) && defined(__APPLE__)
00286 
00287 #define MULADDC_INIT                            \
00288     asm( "lwz    r3, %0         " :: "m" (s));  \
00289     asm( "lwz    r4, %0         " :: "m" (d));  \
00290     asm( "lwz    r5, %0         " :: "m" (c));  \
00291     asm( "lwz    r6, %0         " :: "m" (b));  \
00292     asm( "addi   r3, r3, -4     " );            \
00293     asm( "addi   r4, r4, -4     " );            \
00294     asm( "addic  r5, r5,  0     " );
00295 
00296 #define MULADDC_CORE                            \
00297     asm( "lwzu   r7, 4(r3)      " );            \
00298     asm( "mullw  r8, r7, r6     " );            \
00299     asm( "mulhwu r9, r7, r6     " );            \
00300     asm( "adde   r8, r8, r5     " );            \
00301     asm( "lwz    r7, 4(r4)      " );            \
00302     asm( "addze  r5, r9         " );            \
00303     asm( "addc   r8, r8, r7     " );            \
00304     asm( "stwu   r8, 4(r4)      " );
00305 
00306 #define MULADDC_STOP                            \
00307     asm( "addze  r5, r5         " );            \
00308     asm( "addi   r4, r4, 4      " );            \
00309     asm( "addi   r3, r3, 4      " );            \
00310     asm( "stw    r5, %0         " : "=m" (c));  \
00311     asm( "stw    r4, %0         " : "=m" (d));  \
00312     asm( "stw    r3, %0         " : "=m" (s) :: \
00313     "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00314 
00315 #else
00316 
00317 #define MULADDC_INIT                            \
00318     asm( "lwz    %%r3, %0       " :: "m" (s));  \
00319     asm( "lwz    %%r4, %0       " :: "m" (d));  \
00320     asm( "lwz    %%r5, %0       " :: "m" (c));  \
00321     asm( "lwz    %%r6, %0       " :: "m" (b));  \
00322     asm( "addi   %r3, %r3, -4   " );            \
00323     asm( "addi   %r4, %r4, -4   " );            \
00324     asm( "addic  %r5, %r5,  0   " );
00325 
00326 #define MULADDC_CORE                            \
00327     asm( "lwzu   %r7, 4(%r3)    " );            \
00328     asm( "mullw  %r8, %r7, %r6  " );            \
00329     asm( "mulhwu %r9, %r7, %r6  " );            \
00330     asm( "adde   %r8, %r8, %r5  " );            \
00331     asm( "lwz    %r7, 4(%r4)    " );            \
00332     asm( "addze  %r5, %r9       " );            \
00333     asm( "addc   %r8, %r8, %r7  " );            \
00334     asm( "stwu   %r8, 4(%r4)    " );
00335 
00336 #define MULADDC_STOP                            \
00337     asm( "addze  %r5, %r5       " );            \
00338     asm( "addi   %r4, %r4, 4    " );            \
00339     asm( "addi   %r3, %r3, 4    " );            \
00340     asm( "stw    %%r5, %0       " : "=m" (c));  \
00341     asm( "stw    %%r4, %0       " : "=m" (d));  \
00342     asm( "stw    %%r3, %0       " : "=m" (s) :: \
00343     "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00344 
00345 #endif
00346 
00347 #endif /* PPC32 */
00348 #endif /* PPC64 */
00349 
00350 #if defined(__sparc__)
00351 
00352 #define MULADDC_INIT                            \
00353     asm( "ld     %0, %%o0       " :: "m" (s));  \
00354     asm( "ld     %0, %%o1       " :: "m" (d));  \
00355     asm( "ld     %0, %%o2       " :: "m" (c));  \
00356     asm( "ld     %0, %%o3       " :: "m" (b));
00357 
00358 #define MULADDC_CORE                            \
00359     asm( "ld    [%o0], %o4      " );            \
00360     asm( "inc      4,  %o0      " );            \
00361     asm( "ld    [%o1], %o5      " );            \
00362     asm( "umul   %o3,  %o4, %o4 " );            \
00363     asm( "addcc  %o4,  %o2, %o4 " );            \
00364     asm( "rd      %y,  %g1      " );            \
00365     asm( "addx   %g1,    0, %g1 " );            \
00366     asm( "addcc  %o4,  %o5, %o4 " );            \
00367     asm( "st     %o4, [%o1]     " );            \
00368     asm( "addx   %g1,    0, %o2 " );            \
00369     asm( "inc      4,  %o1      " );
00370 
00371 #define MULADDC_STOP                            \
00372     asm( "st     %%o2, %0       " : "=m" (c));  \
00373     asm( "st     %%o1, %0       " : "=m" (d));  \
00374     asm( "st     %%o0, %0       " : "=m" (s) :: \
00375     "g1", "o0", "o1", "o2", "o3", "o4", "o5" );
00376 
00377 #endif /* SPARCv8 */
00378 
00379 #if defined(__microblaze__) || defined(microblaze)
00380 
00381 #define MULADDC_INIT                            \
00382     asm( "lwi   r3,   %0        " :: "m" (s));  \
00383     asm( "lwi   r4,   %0        " :: "m" (d));  \
00384     asm( "lwi   r5,   %0        " :: "m" (c));  \
00385     asm( "lwi   r6,   %0        " :: "m" (b));  \
00386     asm( "andi  r7,   r6, 0xffff" );            \
00387     asm( "bsrli r6,   r6, 16    " );
00388 
00389 #define MULADDC_CORE                            \
00390     asm( "lhui  r8,   r3,   0   " );            \
00391     asm( "addi  r3,   r3,   2   " );            \
00392     asm( "lhui  r9,   r3,   0   " );            \
00393     asm( "addi  r3,   r3,   2   " );            \
00394     asm( "mul   r10,  r9,  r6   " );            \
00395     asm( "mul   r11,  r8,  r7   " );            \
00396     asm( "mul   r12,  r9,  r7   " );            \
00397     asm( "mul   r13,  r8,  r6   " );            \
00398     asm( "bsrli  r8, r10,  16   " );            \
00399     asm( "bsrli  r9, r11,  16   " );            \
00400     asm( "add   r13, r13,  r8   " );            \
00401     asm( "add   r13, r13,  r9   " );            \
00402     asm( "bslli r10, r10,  16   " );            \
00403     asm( "bslli r11, r11,  16   " );            \
00404     asm( "add   r12, r12, r10   " );            \
00405     asm( "addc  r13, r13,  r0   " );            \
00406     asm( "add   r12, r12, r11   " );            \
00407     asm( "addc  r13, r13,  r0   " );            \
00408     asm( "lwi   r10,  r4,   0   " );            \
00409     asm( "add   r12, r12, r10   " );            \
00410     asm( "addc  r13, r13,  r0   " );            \
00411     asm( "add   r12, r12,  r5   " );            \
00412     asm( "addc   r5, r13,  r0   " );            \
00413     asm( "swi   r12,  r4,   0   " );            \
00414     asm( "addi   r4,  r4,   4   " );
00415 
00416 #define MULADDC_STOP                            \
00417     asm( "swi   r5,   %0        " : "=m" (c));  \
00418     asm( "swi   r4,   %0        " : "=m" (d));  \
00419     asm( "swi   r3,   %0        " : "=m" (s) :: \
00420      "r3", "r4" , "r5" , "r6" , "r7" , "r8" ,   \
00421      "r9", "r10", "r11", "r12", "r13" );
00422 
00423 #endif /* MicroBlaze */
00424 
00425 #if defined(__tricore__)
00426 
00427 #define MULADDC_INIT                            \
00428     asm( "ld.a   %%a2, %0       " :: "m" (s));  \
00429     asm( "ld.a   %%a3, %0       " :: "m" (d));  \
00430     asm( "ld.w   %%d4, %0       " :: "m" (c));  \
00431     asm( "ld.w   %%d1, %0       " :: "m" (b));  \
00432     asm( "xor    %d5, %d5       " );
00433 
00434 #define MULADDC_CORE                            \
00435     asm( "ld.w   %d0,   [%a2+]      " );        \
00436     asm( "madd.u %e2, %e4, %d0, %d1 " );        \
00437     asm( "ld.w   %d0,   [%a3]       " );        \
00438     asm( "addx   %d2,    %d2,  %d0  " );        \
00439     asm( "addc   %d3,    %d3,    0  " );        \
00440     asm( "mov    %d4,    %d3        " );        \
00441     asm( "st.w  [%a3+],  %d2        " );
00442 
00443 #define MULADDC_STOP                            \
00444     asm( "st.w   %0, %%d4       " : "=m" (c));  \
00445     asm( "st.a   %0, %%a3       " : "=m" (d));  \
00446     asm( "st.a   %0, %%a2       " : "=m" (s) :: \
00447     "d0", "d1", "e2", "d4", "a2", "a3" );
00448 
00449 #endif /* TriCore */
00450 
00451 #if defined(__arm__)
00452 
00453 #define MULADDC_INIT                            \
00454     asm( "ldr    r0, %0         " :: "m" (s));  \
00455     asm( "ldr    r1, %0         " :: "m" (d));  \
00456     asm( "ldr    r2, %0         " :: "m" (c));  \
00457     asm( "ldr    r3, %0         " :: "m" (b));
00458 
00459 #define MULADDC_CORE                            \
00460     asm( "ldr    r4, [r0], #4   " );            \
00461     asm( "mov    r5, #0         " );            \
00462     asm( "ldr    r6, [r1]       " );            \
00463     asm( "umlal  r2, r5, r3, r4 " );            \
00464     asm( "adds   r7, r6, r2     " );            \
00465     asm( "adc    r2, r5, #0     " );            \
00466     asm( "str    r7, [r1], #4   " );
00467 
00468 #define MULADDC_STOP                            \
00469     asm( "str    r2, %0         " : "=m" (c));  \
00470     asm( "str    r1, %0         " : "=m" (d));  \
00471     asm( "str    r0, %0         " : "=m" (s) :: \
00472     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7" );
00473 
00474 #endif /* ARMv3 */
00475 
00476 #if defined(__alpha__)
00477 
00478 #define MULADDC_INIT                            \
00479     asm( "ldq    $1, %0         " :: "m" (s));  \
00480     asm( "ldq    $2, %0         " :: "m" (d));  \
00481     asm( "ldq    $3, %0         " :: "m" (c));  \
00482     asm( "ldq    $4, %0         " :: "m" (b));
00483 
00484 #define MULADDC_CORE                            \
00485     asm( "ldq    $6,  0($1)     " );            \
00486     asm( "addq   $1,  8, $1     " );            \
00487     asm( "mulq   $6, $4, $7     " );            \
00488     asm( "umulh  $6, $4, $6     " );            \
00489     asm( "addq   $7, $3, $7     " );            \
00490     asm( "cmpult $7, $3, $3     " );            \
00491     asm( "ldq    $5,  0($2)     " );            \
00492     asm( "addq   $7, $5, $7     " );            \
00493     asm( "cmpult $7, $5, $5     " );            \
00494     asm( "stq    $7,  0($2)     " );            \
00495     asm( "addq   $2,  8, $2     " );            \
00496     asm( "addq   $6, $3, $3     " );            \
00497     asm( "addq   $5, $3, $3     " );
00498 
00499 #define MULADDC_STOP                            \
00500     asm( "stq    $3, %0         " : "=m" (c));  \
00501     asm( "stq    $2, %0         " : "=m" (d));  \
00502     asm( "stq    $1, %0         " : "=m" (s) :: \
00503     "$1", "$2", "$3", "$4", "$5", "$6", "$7" );
00504 
00505 #endif /* Alpha */
00506 
00507 #if defined(__mips__)
00508 
00509 #define MULADDC_INIT                            \
00510     asm( "lw     $10, %0        " :: "m" (s));  \
00511     asm( "lw     $11, %0        " :: "m" (d));  \
00512     asm( "lw     $12, %0        " :: "m" (c));  \
00513     asm( "lw     $13, %0        " :: "m" (b));
00514 
00515 #define MULADDC_CORE                            \
00516     asm( "lw     $14, 0($10)    " );            \
00517     asm( "multu  $13, $14       " );            \
00518     asm( "addi   $10, $10, 4    " );            \
00519     asm( "mflo   $14            " );            \
00520     asm( "mfhi   $9             " );            \
00521     asm( "addu   $14, $12, $14  " );            \
00522     asm( "lw     $15, 0($11)    " );            \
00523     asm( "sltu   $12, $14, $12  " );            \
00524     asm( "addu   $15, $14, $15  " );            \
00525     asm( "sltu   $14, $15, $14  " );            \
00526     asm( "addu   $12, $12, $9   " );            \
00527     asm( "sw     $15, 0($11)    " );            \
00528     asm( "addu   $12, $12, $14  " );            \
00529     asm( "addi   $11, $11, 4    " );
00530 
00531 #define MULADDC_STOP                            \
00532     asm( "sw     $12, %0        " : "=m" (c));  \
00533     asm( "sw     $11, %0        " : "=m" (d));  \
00534     asm( "sw     $10, %0        " : "=m" (s) :: \
00535     "$9", "$10", "$11", "$12", "$13", "$14", "$15" );
00536 
00537 #endif /* MIPS */
00538 #endif /* GNUC */
00539 
00540 #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
00541 
00542 #define MULADDC_INIT                            \
00543     __asm   mov     esi, s                      \
00544     __asm   mov     edi, d                      \
00545     __asm   mov     ecx, c                      \
00546     __asm   mov     ebx, b
00547 
00548 #define MULADDC_CORE                            \
00549     __asm   lodsd                               \
00550     __asm   mul     ebx                         \
00551     __asm   add     eax, ecx                    \
00552     __asm   adc     edx, 0                      \
00553     __asm   add     eax, [edi]                  \
00554     __asm   adc     edx, 0                      \
00555     __asm   mov     ecx, edx                    \
00556     __asm   stosd
00557 
00558 #define MULADDC_STOP                            \
00559     __asm   mov     c, ecx                      \
00560     __asm   mov     d, edi                      \
00561     __asm   mov     s, esi                      \
00562 
00563 #if defined(XYSSL_HAVE_SSE2)
00564 
00565 #define EMIT __asm _emit
00566 
00567 #define MULADDC_HUIT                            \
00568     EMIT 0x0F  EMIT 0x6E  EMIT 0xC9             \
00569     EMIT 0x0F  EMIT 0x6E  EMIT 0xC3             \
00570     EMIT 0x0F  EMIT 0x6E  EMIT 0x1F             \
00571     EMIT 0x0F  EMIT 0xD4  EMIT 0xCB             \
00572     EMIT 0x0F  EMIT 0x6E  EMIT 0x16             \
00573     EMIT 0x0F  EMIT 0xF4  EMIT 0xD0             \
00574     EMIT 0x0F  EMIT 0x6E  EMIT 0x66  EMIT 0x04  \
00575     EMIT 0x0F  EMIT 0xF4  EMIT 0xE0             \
00576     EMIT 0x0F  EMIT 0x6E  EMIT 0x76  EMIT 0x08  \
00577     EMIT 0x0F  EMIT 0xF4  EMIT 0xF0             \
00578     EMIT 0x0F  EMIT 0x6E  EMIT 0x7E  EMIT 0x0C  \
00579     EMIT 0x0F  EMIT 0xF4  EMIT 0xF8             \
00580     EMIT 0x0F  EMIT 0xD4  EMIT 0xCA             \
00581     EMIT 0x0F  EMIT 0x6E  EMIT 0x5F  EMIT 0x04  \
00582     EMIT 0x0F  EMIT 0xD4  EMIT 0xDC             \
00583     EMIT 0x0F  EMIT 0x6E  EMIT 0x6F  EMIT 0x08  \
00584     EMIT 0x0F  EMIT 0xD4  EMIT 0xEE             \
00585     EMIT 0x0F  EMIT 0x6E  EMIT 0x67  EMIT 0x0C  \
00586     EMIT 0x0F  EMIT 0xD4  EMIT 0xFC             \
00587     EMIT 0x0F  EMIT 0x7E  EMIT 0x0F             \
00588     EMIT 0x0F  EMIT 0x6E  EMIT 0x56  EMIT 0x10  \
00589     EMIT 0x0F  EMIT 0xF4  EMIT 0xD0             \
00590     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00591     EMIT 0x0F  EMIT 0x6E  EMIT 0x66  EMIT 0x14  \
00592     EMIT 0x0F  EMIT 0xF4  EMIT 0xE0             \
00593     EMIT 0x0F  EMIT 0xD4  EMIT 0xCB             \
00594     EMIT 0x0F  EMIT 0x6E  EMIT 0x76  EMIT 0x18  \
00595     EMIT 0x0F  EMIT 0xF4  EMIT 0xF0             \
00596     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x04  \
00597     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00598     EMIT 0x0F  EMIT 0x6E  EMIT 0x5E  EMIT 0x1C  \
00599     EMIT 0x0F  EMIT 0xF4  EMIT 0xD8             \
00600     EMIT 0x0F  EMIT 0xD4  EMIT 0xCD             \
00601     EMIT 0x0F  EMIT 0x6E  EMIT 0x6F  EMIT 0x10  \
00602     EMIT 0x0F  EMIT 0xD4  EMIT 0xD5             \
00603     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x08  \
00604     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00605     EMIT 0x0F  EMIT 0xD4  EMIT 0xCF             \
00606     EMIT 0x0F  EMIT 0x6E  EMIT 0x6F  EMIT 0x14  \
00607     EMIT 0x0F  EMIT 0xD4  EMIT 0xE5             \
00608     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x0C  \
00609     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00610     EMIT 0x0F  EMIT 0xD4  EMIT 0xCA             \
00611     EMIT 0x0F  EMIT 0x6E  EMIT 0x6F  EMIT 0x18  \
00612     EMIT 0x0F  EMIT 0xD4  EMIT 0xF5             \
00613     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x10  \
00614     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00615     EMIT 0x0F  EMIT 0xD4  EMIT 0xCC             \
00616     EMIT 0x0F  EMIT 0x6E  EMIT 0x6F  EMIT 0x1C  \
00617     EMIT 0x0F  EMIT 0xD4  EMIT 0xDD             \
00618     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x14  \
00619     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00620     EMIT 0x0F  EMIT 0xD4  EMIT 0xCE             \
00621     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x18  \
00622     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00623     EMIT 0x0F  EMIT 0xD4  EMIT 0xCB             \
00624     EMIT 0x0F  EMIT 0x7E  EMIT 0x4F  EMIT 0x1C  \
00625     EMIT 0x83  EMIT 0xC7  EMIT 0x20             \
00626     EMIT 0x83  EMIT 0xC6  EMIT 0x20             \
00627     EMIT 0x0F  EMIT 0x73  EMIT 0xD1  EMIT 0x20  \
00628     EMIT 0x0F  EMIT 0x7E  EMIT 0xC9
00629 
00630 #endif /* SSE2 */
00631 #endif /* MSVC */
00632 
00633 #endif /* XYSSL_HAVE_ASM */
00634 
00635 #if !defined(MULADDC_CORE)
00636 #if defined(XYSSL_HAVE_LONGLONG)
00637 
00638 #define MULADDC_INIT                    \
00639 {                                       \
00640     t_dbl r;                            \
00641     t_int r0, r1;
00642 
00643 #define MULADDC_CORE                    \
00644     r   = *(s++) * (t_dbl) b;           \
00645     r0  = r;                            \
00646     r1  = r >> biL;                     \
00647     r0 += c;  r1 += (r0 <  c);          \
00648     r0 += *d; r1 += (r0 < *d);          \
00649     c = r1; *(d++) = r0;
00650 
00651 #define MULADDC_STOP                    \
00652 }
00653 
00654 #else
00655 #define MULADDC_INIT                    \
00656 {                                       \
00657     t_int s0, s1, b0, b1;               \
00658     t_int r0, r1, rx, ry;               \
00659     b0 = ( b << biH ) >> biH;           \
00660     b1 = ( b >> biH );
00661 
00662 #define MULADDC_CORE                    \
00663     s0 = ( *s << biH ) >> biH;          \
00664     s1 = ( *s >> biH ); s++;            \
00665     rx = s0 * b1; r0 = s0 * b0;         \
00666     ry = s1 * b0; r1 = s1 * b1;         \
00667     r1 += ( rx >> biH );                \
00668     r1 += ( ry >> biH );                \
00669     rx <<= biH; ry <<= biH;             \
00670     r0 += rx; r1 += (r0 < rx);          \
00671     r0 += ry; r1 += (r0 < ry);          \
00672     r0 +=  c; r1 += (r0 <  c);          \
00673     r0 += *d; r1 += (r0 < *d);          \
00674     c = r1; *(d++) = r0;
00675 
00676 #define MULADDC_STOP                    \
00677 }
00678 
00679 #endif /* C (generic)  */
00680 #endif /* C (longlong) */
00681 
00682 #endif /* bn_mul.h */

Generated on Fri Jul 11 17:59:45 2008 for Mobile-C by  doxygen 1.5.4