blob: 9f84585885a4ea73655125c8393d79cff15de57a [file] [log] [blame]
/* asm.c
*
* Copyright (C) 2006-2015 wolfSSL Inc.
*
* This file is part of wolfSSL. (formerly known as CyaSSL)
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <wolfssl/wolfcrypt/settings.h>
/*
* Based on public domain TomsFastMath 0.10 by Tom St Denis, tomstdenis@iahu.ca,
* http://math.libtomcrypt.com
*/
/******************************************************************/
/* fp_montgomery_reduce.c asm or generic */
/* Each platform needs to query info type 1 from cpuid to see if aesni is
* supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
*/
#if defined(HAVE_INTEL_MULX)
#ifndef _MSC_VER
#define cpuid(reg, leaf, sub)\
__asm__ __volatile__ ("cpuid":\
"=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\
"a" (leaf), "c"(sub));
#define XASM_LINK(f) asm(f)
#else
#include <intrin.h>
#define cpuid(a,b) __cpuid((int*)a,b)
#define XASM_LINK(f)
#endif /* _MSC_VER */
#define EAX 0
#define EBX 1
#define ECX 2
#define EDX 3
#define CPUID_AVX1 0x1
#define CPUID_AVX2 0x2
#define CPUID_RDRAND 0x4
#define CPUID_RDSEED 0x8
#define CPUID_BMI2 0x10 /* MULX, RORX */
#define CPUID_ADX 0x20 /* ADCX, ADOX */
#define IS_INTEL_AVX1 (cpuid_flags&CPUID_AVX1)
#define IS_INTEL_AVX2 (cpuid_flags&CPUID_AVX2)
#define IS_INTEL_BMI2 (cpuid_flags&CPUID_BMI2)
#define IS_INTEL_ADX (cpuid_flags&CPUID_ADX)
#define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
#define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
#define SET_FLAGS
static word32 cpuid_check = 0 ;
static word32 cpuid_flags = 0 ;
static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
int got_intel_cpu=0;
unsigned int reg[5];
reg[4] = '\0' ;
cpuid(reg, 0, 0);
if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
got_intel_cpu = 1;
}
if (got_intel_cpu) {
cpuid(reg, leaf, sub);
return((reg[num]>>bit)&0x1) ;
}
return 0 ;
}
INLINE static int set_cpuid_flags(void) {
if(cpuid_check == 0) {
if(cpuid_flag(7, 0, EBX, 8)){ cpuid_flags |= CPUID_BMI2 ; }
if(cpuid_flag(7, 0, EBX,19)){ cpuid_flags |= CPUID_ADX ; }
cpuid_check = 1 ;
return 0 ;
}
return 1 ;
}
#define RETURN return
#define IF_HAVE_INTEL_MULX(func, ret) \
if(cpuid_check==0)set_cpuid_flags() ; \
if(IS_INTEL_BMI2 && IS_INTEL_ADX){ func; ret ; }
#else
#define IF_HAVE_INTEL_MULX(func, ret)
#endif
#if defined(TFM_X86) && !defined(TFM_SSE2)
/* x86-32 code */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
__asm__( \
"movl %5,%%eax \n\t" \
"mull %4 \n\t" \
"addl %1,%%eax \n\t" \
"adcl $0,%%edx \n\t" \
"addl %%eax,%0 \n\t" \
"adcl $0,%%edx \n\t" \
"movl %%edx,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
:"0"(_c[LO]), "1"(cy), "g"(mu), "g"(*tmpm++) \
: "%eax", "%edx", "cc")
#define PROPCARRY \
__asm__( \
"addl %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbl %%al,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
:"0"(_c[LO]), "1"(cy) \
: "%eax", "cc")
/******************************************************************/
#elif defined(TFM_X86_64)
/* x86-64 code */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp;
#define INNERMUL \
__asm__( \
"movq %5,%%rax \n\t" \
"mulq %4 \n\t" \
"addq %1,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"addq %%rax,%0 \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rdx,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
:"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
: "%rax", "%rdx", "cc")
#if defined(HAVE_INTEL_MULX)
#define MULX_INIT(a0, c0, cy)\
__asm__ volatile( \
"xorq %%r10, %%r10\n\t" \
"movq %1,%%rdx\n\t" \
"addq %2, %0\n\t" /* c0+=cy; Set CF, OF */ \
"adoxq %%r10, %%r10\n\t" /* Reset OF */ \
:"+m"(c0):"r"(a0),"r"(cy):"%r8","%r9", "%r10","%r11","%r12","%rdx") ; \
#define MULX_INNERMUL_R1(c0, c1, pre, rdx)\
{ \
__asm__ volatile ( \
"movq %3, %%rdx\n\t" \
"mulx %%r11,%%r9, %%r8 \n\t" \
"movq %2, %%r12\n\t" \
"adoxq %%r9,%0 \n\t" \
"adcxq %%r8,%1 \n\t" \
:"+r"(c0),"+r"(c1):"m"(pre),"r"(rdx):"%r8","%r9", "%r10", "%r11","%r12","%rdx" \
); }
#define MULX_INNERMUL_R2(c0, c1, pre, rdx)\
{ \
__asm__ volatile ( \
"movq %3, %%rdx\n\t" \
"mulx %%r12,%%r9, %%r8 \n\t" \
"movq %2, %%r11\n\t" \
"adoxq %%r9,%0 \n\t" \
"adcxq %%r8,%1 \n\t" \
:"+r"(c0),"+r"(c1):"m"(pre),"r"(rdx):"%r8","%r9", "%r10", "%r11","%r12","%rdx" \
); }
#define MULX_LOAD_R1(val)\
__asm__ volatile ( \
"movq %0, %%r11\n\t"\
::"m"(val):"%r8","%r9", "%r10", "%r11","%r12","%rdx"\
) ;
#define MULX_INNERMUL_LAST(c0, c1, rdx)\
{ \
__asm__ volatile ( \
"movq %2, %%rdx\n\t" \
"mulx %%r12,%%r9, %%r8 \n\t" \
"movq $0, %%r10 \n\t" \
"adoxq %%r10, %%r9 \n\t" \
"adcq $0,%%r8 \n\t" \
"addq %%r9,%0 \n\t" \
"adcq $0,%%r8 \n\t" \
"movq %%r8,%1 \n\t" \
:"+m"(c0),"=m"(c1):"r"(rdx):"%r8","%r9","%r10", "%r11", "%r12","%rdx"\
); }
#define MULX_INNERMUL8(x,y,z,cy)\
{ word64 rdx = y ;\
MULX_LOAD_R1(x[0]) ;\
MULX_INIT(y, _c0, cy) ; /* rdx=y; z0+=cy; */ \
MULX_INNERMUL_R1(_c0, _c1, x[1], rdx) ;\
MULX_INNERMUL_R2(_c1, _c2, x[2], rdx) ;\
MULX_INNERMUL_R1(_c2, _c3, x[3], rdx) ;\
MULX_INNERMUL_R2(_c3, _c4, x[4], rdx) ;\
MULX_INNERMUL_R1(_c4, _c5, x[5], rdx) ;\
MULX_INNERMUL_R2(_c5, _c6, x[6], rdx) ;\
MULX_INNERMUL_R1(_c6, _c7, x[7], rdx) ;\
MULX_INNERMUL_LAST(_c7, cy, rdx) ;\
}
#define INNERMUL8_MULX \
{\
MULX_INNERMUL8(tmpm, mu, _c, cy);\
}
#endif
#define INNERMUL8 \
__asm__( \
"movq 0(%5),%%rax \n\t" \
"movq 0(%2),%%r10 \n\t" \
"movq 0x8(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x8(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x10(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x10(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x8(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x18(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x18(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x10(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x20(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x20(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x18(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x28(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x28(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x20(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x30(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x30(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x28(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"movq 0x38(%5),%%r11 \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq 0x38(%2),%%r10 \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x30(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
"movq %%r11,%%rax \n\t" \
"mulq %4 \n\t" \
"addq %%r10,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"addq %3,%%rax \n\t" \
"adcq $0,%%rdx \n\t" \
"movq %%rax,0x38(%0) \n\t" \
"movq %%rdx,%1 \n\t" \
\
:"=r"(_c), "=r"(cy) \
: "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\
: "%rax", "%rdx", "%r10", "%r11", "cc")\
#define PROPCARRY \
__asm__( \
"addq %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbq %%al,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
:"0"(_c[LO]), "1"(cy) \
: "%rax", "cc")
/******************************************************************/
#elif defined(TFM_SSE2)
/* SSE2 code (assumes 32-bit fp_digits) */
/* XMM register assignments:
* xmm0 *tmpm++, then Mu * (*tmpm++)
* xmm1 c[x], then Mu
* xmm2 mp
* xmm3 cy
* xmm4 _c[LO]
*/
#define MONT_START \
__asm__("movd %0,%%mm2"::"g"(mp))
#define MONT_FINI \
__asm__("emms")
#define LOOP_START \
__asm__( \
"movd %0,%%mm1 \n\t" \
"pxor %%mm3,%%mm3 \n\t" \
"pmuludq %%mm2,%%mm1 \n\t" \
:: "g"(c[x]))
/* pmuludq on mmx registers does a 32x32->64 multiply. */
#define INNERMUL \
__asm__( \
"movd %1,%%mm4 \n\t" \
"movd %2,%%mm0 \n\t" \
"paddq %%mm4,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm0 \n\t" \
"paddq %%mm0,%%mm3 \n\t" \
"movd %%mm3,%0 \n\t" \
"psrlq $32, %%mm3 \n\t" \
:"=g"(_c[LO]) : "0"(_c[LO]), "g"(*tmpm++) );
#define INNERMUL8 \
__asm__( \
"movd 0(%1),%%mm4 \n\t" \
"movd 0(%2),%%mm0 \n\t" \
"paddq %%mm4,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm0 \n\t" \
"movd 4(%2),%%mm5 \n\t" \
"paddq %%mm0,%%mm3 \n\t" \
"movd 4(%1),%%mm6 \n\t" \
"movd %%mm3,0(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm6,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm5 \n\t" \
"movd 8(%2),%%mm6 \n\t" \
"paddq %%mm5,%%mm3 \n\t" \
"movd 8(%1),%%mm7 \n\t" \
"movd %%mm3,4(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm7,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm6 \n\t" \
"movd 12(%2),%%mm7 \n\t" \
"paddq %%mm6,%%mm3 \n\t" \
"movd 12(%1),%%mm5 \n\t" \
"movd %%mm3,8(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm5,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm7 \n\t" \
"movd 16(%2),%%mm5 \n\t" \
"paddq %%mm7,%%mm3 \n\t" \
"movd 16(%1),%%mm6 \n\t" \
"movd %%mm3,12(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm6,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm5 \n\t" \
"movd 20(%2),%%mm6 \n\t" \
"paddq %%mm5,%%mm3 \n\t" \
"movd 20(%1),%%mm7 \n\t" \
"movd %%mm3,16(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm7,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm6 \n\t" \
"movd 24(%2),%%mm7 \n\t" \
"paddq %%mm6,%%mm3 \n\t" \
"movd 24(%1),%%mm5 \n\t" \
"movd %%mm3,20(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm5,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm7 \n\t" \
"movd 28(%2),%%mm5 \n\t" \
"paddq %%mm7,%%mm3 \n\t" \
"movd 28(%1),%%mm6 \n\t" \
"movd %%mm3,24(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
\
"paddq %%mm6,%%mm3 \n\t" \
"pmuludq %%mm1,%%mm5 \n\t" \
"paddq %%mm5,%%mm3 \n\t" \
"movd %%mm3,28(%0) \n\t" \
"psrlq $32, %%mm3 \n\t" \
:"=r"(_c) : "0"(_c), "r"(tmpm) );
/* TAO switched tmpm from "g" to "r" after gcc tried to index the indexed stack
pointer */
#define LOOP_END \
__asm__( "movd %%mm3,%0 \n" :"=r"(cy))
#define PROPCARRY \
__asm__( \
"addl %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbl %%al,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
:"0"(_c[LO]), "1"(cy) \
: "%eax", "cc")
/******************************************************************/
#elif defined(TFM_ARM)
/* ARMv4 code */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#ifdef __thumb__
#define INNERMUL \
__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" ITE CS \n\t" \
" MOVCS %0,#1 \n\t" \
" MOVCC %0,#0 \n\t" \
" UMLAL r0,%0,%3,%4 \n\t" \
" STR r0,%1 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0]):"r0","cc");
#define PROPCARRY \
__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" STR r0,%1 \n\t" \
" ITE CS \n\t" \
" MOVCS %0,#1 \n\t" \
" MOVCC %0,#0 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"m"(_c[0]):"r0","cc");
/* TAO thumb mode uses ite (if then else) to detect carry directly
* fixed unmatched constraint warning by changing 1 to m */
#else /* __thumb__ */
#define INNERMUL \
__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" MOVCS %0,#1 \n\t" \
" MOVCC %0,#0 \n\t" \
" UMLAL r0,%0,%3,%4 \n\t" \
" STR r0,%1 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc");
#define PROPCARRY \
__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" STR r0,%1 \n\t" \
" MOVCS %0,#1 \n\t" \
" MOVCC %0,#0 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","cc");
#endif /* __thumb__ */
#elif defined(TFM_PPC32)
/* PPC32 */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
__asm__( \
" mullw 16,%3,%4 \n\t" \
" mulhwu 17,%3,%4 \n\t" \
" addc 16,16,%0 \n\t" \
" addze 17,17 \n\t" \
" lwz 18,%1 \n\t" \
" addc 16,16,18 \n\t" \
" addze %0,17 \n\t" \
" stw 16,%1 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm;
#define PROPCARRY \
__asm__( \
" lwz 16,%1 \n\t" \
" addc 16,16,%0 \n\t" \
" stw 16,%1 \n\t" \
" xor %0,%0,%0 \n\t" \
" addze %0,%0 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc");
#elif defined(TFM_PPC64)
/* PPC64 */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
__asm__( \
" mulld 16,%3,%4 \n\t" \
" mulhdu 17,%3,%4 \n\t" \
" addc 16,16,%0 \n\t" \
" addze 17,17 \n\t" \
" ldx 18,0,%1 \n\t" \
" addc 16,16,18 \n\t" \
" addze %0,17 \n\t" \
" sdx 16,0,%1 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm;
#define PROPCARRY \
__asm__( \
" ldx 16,0,%1 \n\t" \
" addc 16,16,%0 \n\t" \
" sdx 16,0,%1 \n\t" \
" xor %0,%0,%0 \n\t" \
" addze %0,%0 \n\t" \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc");
/******************************************************************/
#elif defined(TFM_AVR32)
/* AVR32 */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
__asm__( \
" ld.w r2,%1 \n\t" \
" add r2,%0 \n\t" \
" eor r3,r3 \n\t" \
" acr r3 \n\t" \
" macu.d r2,%3,%4 \n\t" \
" st.w %1,r2 \n\t" \
" mov %0,r3 \n\t" \
:"=r"(cy),"=r"(_c):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c):"r2","r3");
#define PROPCARRY \
__asm__( \
" ld.w r2,%1 \n\t" \
" add r2,%0 \n\t" \
" st.w %1,r2 \n\t" \
" eor %0,%0 \n\t" \
" acr %0 \n\t" \
:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc");
#else
/* ISO C code */
#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
do { fp_word t; \
t = ((fp_word)_c[0] + (fp_word)cy) + \
(((fp_word)mu) * ((fp_word)*tmpm++)); \
_c[0] = (fp_digit)t; \
cy = (fp_digit)(t >> DIGIT_BIT); \
} while (0)
#define PROPCARRY \
do { fp_digit t = _c[0] += cy; cy = (t < cy); } while (0)
#endif
/******************************************************************/
#define LO 0
/* end fp_montogomery_reduce.c asm */
/* start fp_sqr_comba.c asm */
#if defined(TFM_X86)
/* x86-32 optimized */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
#define SQRADD(i, j) \
__asm__( \
"movl %6,%%eax \n\t" \
"mull %%eax \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","cc");
#define SQRADD2(i, j) \
__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx", "cc");
#define SQRADDSC(i, j) \
__asm__( \
"movl %3,%%eax \n\t" \
"mull %4 \n\t" \
"movl %%eax,%0 \n\t" \
"movl %%edx,%1 \n\t" \
"xorl %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%eax","%edx","cc");
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc");
#define SQRADDDB \
__asm__( \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
#elif defined(TFM_X86_64)
/* x86-64 optimized */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
#define SQRADD(i, j) \
__asm__( \
"movq %6,%%rax \n\t" \
"mulq %%rax \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","cc");
#define SQRADD2(i, j) \
__asm__( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
#define SQRADDSC(i, j) \
__asm__( \
"movq %3,%%rax \n\t" \
"mulq %4 \n\t" \
"movq %%rax,%0 \n\t" \
"movq %%rdx,%1 \n\t" \
"xorq %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%rax","%rdx","cc");
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
__asm__( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc");
#define SQRADDDB \
__asm__( \
"addq %6,%0 \n\t" \
"adcq %7,%1 \n\t" \
"adcq %8,%2 \n\t" \
"addq %6,%0 \n\t" \
"adcq %7,%1 \n\t" \
"adcq %8,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
#elif defined(TFM_SSE2)
/* SSE2 Optimized */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI \
__asm__("emms");
#define SQRADD(i, j) \
__asm__( \
"movd %6,%%mm0 \n\t" \
"pmuludq %%mm0,%%mm0\n\t" \
"movd %%mm0,%%eax \n\t" \
"psrlq $32,%%mm0 \n\t" \
"addl %%eax,%0 \n\t" \
"movd %%mm0,%%eax \n\t" \
"adcl %%eax,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc");
#define SQRADD2(i, j) \
__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
"movd %%mm0,%%eax \n\t" \
"psrlq $32,%%mm0 \n\t" \
"movd %%mm0,%%edx \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
#define SQRADDSC(i, j) \
__asm__( \
"movd %3,%%mm0 \n\t" \
"movd %4,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
"movd %%mm0,%0 \n\t" \
"psrlq $32,%%mm0 \n\t" \
"movd %%mm0,%1 \n\t" \
"xorl %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "m"(i), "m"(j));
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
"movd %%mm0,%%eax \n\t" \
"psrlq $32,%%mm0 \n\t" \
"movd %%mm0,%%edx \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc");
#define SQRADDDB \
__asm__( \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
#elif defined(TFM_ARM)
/* ARM code */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
__asm__( \
" UMULL r0,r1,%6,%6 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc");
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
#define SQRADDSC(i, j) \
__asm__( \
" UMULL %0,%1,%3,%4 \n\t" \
" SUB %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "r"(i), "r"(j) : "cc");
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc");
#define SQRADDDB \
__asm__( \
" ADDS %0,%0,%3 \n\t" \
" ADCS %1,%1,%4 \n\t" \
" ADC %2,%2,%5 \n\t" \
" ADDS %0,%0,%3 \n\t" \
" ADCS %1,%1,%4 \n\t" \
" ADC %2,%2,%5 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
#elif defined(TFM_PPC32)
/* PPC32 */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
__asm__( \
" mullw 16,%6,%6 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%6 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc");
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
__asm__( \
" mullw 16,%6,%7 \n\t" \
" mulhwu 17,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" adde %1,%1,17 \n\t" \
" addze %2,%2 \n\t" \
" addc %0,%0,16 \n\t" \
" adde %1,%1,17 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
#define SQRADDSC(i, j) \
__asm__( \
" mullw %0,%6,%7 \n\t" \
" mulhwu %1,%6,%7 \n\t" \
" xor %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
#define SQRADDAC(i, j) \
__asm__( \
" mullw 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%7 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
#define SQRADDDB \
__asm__( \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
#elif defined(TFM_PPC64)
/* PPC64 */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
__asm__( \
" mulld 16,%6,%6 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhdu 16,%6,%6 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc");
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
__asm__( \
" mulld 16,%6,%7 \n\t" \
" mulhdu 17,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" adde %1,%1,17 \n\t" \
" addze %2,%2 \n\t" \
" addc %0,%0,16 \n\t" \
" adde %1,%1,17 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
#define SQRADDSC(i, j) \
__asm__( \
" mulld %0,%6,%7 \n\t" \
" mulhdu %1,%6,%7 \n\t" \
" xor %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
#define SQRADDAC(i, j) \
__asm__( \
" mulld 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhdu 16,%6,%7 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
#define SQRADDDB \
__asm__( \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
#elif defined(TFM_AVR32)
/* AVR32 */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
__asm__( \
" mulu.d r2,%6,%6 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
" acr %2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r2","r3");
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
" acr %2, \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
" acr %2, \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2", "r3");
#define SQRADDSC(i, j) \
__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" mov %0,r2 \n\t" \
" mov %1,r3 \n\t" \
" eor %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "r2", "r3");
#define SQRADDAC(i, j) \
__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
" acr %2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r2", "r3");
#define SQRADDDB \
__asm__( \
" add %0,%0,%3 \n\t" \
" adc %1,%1,%4 \n\t" \
" adc %2,%2,%5 \n\t" \
" add %0,%0,%3 \n\t" \
" adc %1,%1,%4 \n\t" \
" adc %2,%2,%5 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
#else
#define TFM_ISO
/* ISO C portable code */
#define COMBA_START
#define CLEAR_CARRY \
c0 = c1 = c2 = 0;
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define CARRY_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
do { fp_word t; \
t = c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \
t = c1 + (t >> DIGIT_BIT); c1 = (fp_digit)t; \
c2 +=(fp_digit) (t >> DIGIT_BIT); \
} while (0);
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
do { fp_word t; \
t = ((fp_word)i) * ((fp_word)j); \
tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
c2 +=(fp_digit)( tt >> DIGIT_BIT); \
tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
c2 +=(fp_digit) (tt >> DIGIT_BIT); \
} while (0);
#define SQRADDSC(i, j) \
do { fp_word t; \
t = ((fp_word)i) * ((fp_word)j); \
sc0 = (fp_digit)t; sc1 = (t >> DIGIT_BIT); sc2 = 0; \
} while (0);
#define SQRADDAC(i, j) \
do { fp_word t; \
t = sc0 + ((fp_word)i) * ((fp_word)j); sc0 = (fp_digit)t; \
t = sc1 + (t >> DIGIT_BIT); sc1 = (fp_digit)t; \
sc2 += (fp_digit)(t >> DIGIT_BIT); \
} while (0);
#define SQRADDDB \
do { fp_word t; \
t = ((fp_word)sc0) + ((fp_word)sc0) + c0; c0 = (fp_digit)t; \
t = ((fp_word)sc1) + ((fp_word)sc1) + c1 + (t >> DIGIT_BIT); \
c1 = (fp_digit)t; \
c2 = c2 + (fp_digit)(((fp_word)sc2) + ((fp_word)sc2) + (t >> DIGIT_BIT)); \
} while (0);
#endif
#ifdef TFM_SMALL_SET
#include "fp_sqr_comba_small_set.i"
#endif
#if defined(TFM_SQR3)
#include "fp_sqr_comba_3.i"
#endif
#if defined(TFM_SQR4)
#include "fp_sqr_comba_4.i"
#endif
#if defined(TFM_SQR6)
#include "fp_sqr_comba_6.i"
#endif
#if defined(TFM_SQR7)
#include "fp_sqr_comba_7.i"
#endif
#if defined(TFM_SQR8)
#include "fp_sqr_comba_8.i"
#endif
#if defined(TFM_SQR9)
#include "fp_sqr_comba_9.i"
#endif
#if defined(TFM_SQR12)
#include "fp_sqr_comba_12.i"
#endif
#if defined(TFM_SQR17)
#include "fp_sqr_comba_17.i"
#endif
#if defined(TFM_SQR20)
#include "fp_sqr_comba_20.i"
#endif
#if defined(TFM_SQR24)
#include "fp_sqr_comba_24.i"
#endif
#if defined(TFM_SQR28)
#include "fp_sqr_comba_28.i"
#endif
#if defined(TFM_SQR32)
#include "fp_sqr_comba_32.i"
#endif
#if defined(TFM_SQR48)
#include "fp_sqr_comba_48.i"
#endif
#if defined(TFM_SQR64)
#include "fp_sqr_comba_64.i"
#endif
/* end fp_sqr_comba.c asm */
/* start fp_mul_comba.c asm */
/* these are the combas. Worship them. */
#if defined(TFM_X86)
/* Generic x86 optimized code */
/* anything you need at the start */
#define COMBA_START
/* clear the chaining variables */
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
/* forward the carry to the next digit */
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
/* store the first sum */
#define COMBA_STORE(x) \
x = c0;
/* store the second sum [carry] */
#define COMBA_STORE2(x) \
x = c1;
/* anything you need at the end */
#define COMBA_FINI
/* this should multiply i and j */
#define MULADD(i, j) \
__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
"adcl %%edx,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
#elif defined(TFM_X86_64)
/* x86-64 optimized */
/* anything you need at the start */
#define COMBA_START
/* clear the chaining variables */
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
/* forward the carry to the next digit */
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
/* store the first sum */
#define COMBA_STORE(x) \
x = c0;
/* store the second sum [carry] */
#define COMBA_STORE2(x) \
x = c1;
/* anything you need at the end */
#define COMBA_FINI
/* this should multiply i and j */
#define MULADD(i, j) \
__asm__ ( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
#if defined(HAVE_INTEL_MULX)
#define MULADD_MULX(b0, c0, c1, rdx)\
__asm__ volatile ( \
"movq %3, %%rdx\n\t" \
"mulx %2,%%r9, %%r8 \n\t" \
"adoxq %%r9,%0 \n\t" \
"adcxq %%r8,%1 \n\t" \
:"+r"(c0),"+r"(c1):"r"(b0), "r"(rdx):"%r8","%r9","%r10","%rdx"\
)
#define MULADD_MULX_ADD_CARRY(c0, c1)\
__asm__ volatile(\
"mov $0, %%r10\n\t"\
"movq %1, %%r8\n\t"\
"adox %%r10, %0\n\t"\
"adcx %%r10, %1\n\t"\
:"+r"(c0),"+r"(c1)::"%r8","%r9","%r10","%rdx") ;
#define MULADD_SET_A(a0)\
__asm__ volatile("add $0, %%r8\n\t" \
"movq %0,%%rdx\n\t" \
::"r"(a0):"%r8","%r9","%r10","%rdx") ;
#define MULADD_BODY(a,b,c)\
{ word64 rdx = a->dp[ix] ; \
cp = &(c->dp[iz]) ; \
c0 = cp[0] ; c1 = cp[1]; \
MULADD_SET_A(rdx) ; \
MULADD_MULX(b0, c0, c1, rdx) ;\
cp[0]=c0; c0=cp[2]; \
MULADD_MULX(b1, c1, c0, rdx) ;\
cp[1]=c1; c1=cp[3]; \
MULADD_MULX(b2, c0, c1, rdx) ;\
cp[2]=c0; c0=cp[4]; \
MULADD_MULX(b3, c1, c0, rdx) ;\
cp[3]=c1; c1=cp[5]; \
MULADD_MULX_ADD_CARRY(c0, c1);\
cp[4]=c0; cp[5]=c1; \
}
#define TFM_INTEL_MUL_COMBA(a, b, c)\
for(ix=0; ix<pa; ix++)c->dp[ix]=0 ; \
for(iy=0; (iy<b->used); iy+=4) { \
fp_digit *bp ; \
bp = &(b->dp[iy+0]) ; \
fp_digit b0 = bp[0] , b1= bp[1], \
b2= bp[2], b3= bp[3]; \
ix=0, iz=iy; \
while(ix<a->used) { \
fp_digit c0, c1; \
fp_digit *cp ; \
MULADD_BODY(a,b,c); \
ix++ ; iz++ ; \
} \
};
#endif
#elif defined(TFM_SSE2)
/* use SSE2 optimizations */
/* anything you need at the start */
#define COMBA_START
/* clear the chaining variables */
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
/* forward the carry to the next digit */
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
/* store the first sum */
#define COMBA_STORE(x) \
x = c0;
/* store the second sum [carry] */
#define COMBA_STORE2(x) \
x = c1;
/* anything you need at the end */
#define COMBA_FINI \
__asm__("emms");
/* this should multiply i and j */
#define MULADD(i, j) \
__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
"movd %%mm0,%%eax \n\t" \
"psrlq $32,%%mm0 \n\t" \
"addl %%eax,%0 \n\t" \
"movd %%mm0,%%eax \n\t" \
"adcl %%eax,%1 \n\t" \
"adcl $0,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","cc");
#elif defined(TFM_ARM)
/* ARM code */
#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define COMBA_FINI
#define MULADD(i, j) \
__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
#elif defined(TFM_PPC32)
/* For 32-bit PPC */
#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define COMBA_FINI
/* untested: will mulhwu change the flags? Docs say no */
#define MULADD(i, j) \
__asm__( \
" mullw 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%7 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16");
#elif defined(TFM_PPC64)
/* For 64-bit PPC */
#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define COMBA_FINI
/* untested: will mulhwu change the flags? Docs say no */
#define MULADD(i, j) \
____asm__( \
" mulld 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhdu 16,%6,%7 \n\t" \
" adde %1,%1,16 \n\t" \
" addze %2,%2 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16");
#elif defined(TFM_AVR32)
/* ISO C code */
#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define COMBA_FINI
#define MULADD(i, j) \
____asm__( \
" mulu.d r2,%6,%7 \n\t"\
" add %0,r2 \n\t"\
" adc %1,%1,r3 \n\t"\
" acr %2 \n\t"\
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2","r3");
#else
/* ISO C code */
#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
#define COMBA_FORWARD \
do { c0 = c1; c1 = c2; c2 = 0; } while (0);
#define COMBA_STORE(x) \
x = c0;
#define COMBA_STORE2(x) \
x = c1;
#define COMBA_FINI
#define MULADD(i, j) \
do { fp_word t; \
t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \
t = (fp_word)c1 + (t >> DIGIT_BIT); \
c1 = (fp_digit)t; c2 += (fp_digit)(t >> DIGIT_BIT); \
} while (0);
#endif
#ifdef TFM_SMALL_SET
#include "fp_mul_comba_small_set.i"
#endif
#if defined(TFM_MUL3)
#include "fp_mul_comba_3.i"
#endif
#if defined(TFM_MUL4)
#include "fp_mul_comba_4.i"
#endif
#if defined(TFM_MUL6)
#include "fp_mul_comba_6.i"
#endif
#if defined(TFM_MUL7)
#include "fp_mul_comba_7.i"
#endif
#if defined(TFM_MUL8)
#include "fp_mul_comba_8.i"
#endif
#if defined(TFM_MUL9)
#include "fp_mul_comba_9.i"
#endif
#if defined(TFM_MUL12)
#include "fp_mul_comba_12.i"
#endif
#if defined(TFM_MUL17)
#include "fp_mul_comba_17.i"
#endif
#if defined(TFM_MUL20)
#include "fp_mul_comba_20.i"
#endif
#if defined(TFM_MUL24)
#include "fp_mul_comba_24.i"
#endif
#if defined(TFM_MUL28)
#include "fp_mul_comba_28.i"
#endif
#if defined(TFM_MUL32)
#include "fp_mul_comba_32.i"
#endif
#if defined(TFM_MUL48)
#include "fp_mul_comba_48.i"
#endif
#if defined(TFM_MUL64)
#include "fp_mul_comba_64.i"
#endif
/* end fp_mul_comba.c asm */