/netbsd-src/external/lgpl3/gmp/dist/mpn/powerpc64/mode64/p9/ |
H A D | sqr_basecase.asm | 64 addc u0, u0, u0 70 addc u0, u0, u0 71 add u0, u0, cy 77 addc u0, u0, u0 78 add u0, u0, cy 82 add u0, u0, u0 83 add u0, u0, cy 90 addc u0, u0, u0 96 adde u0, u0, u0 102 adde u0, u0, u0
|
H A D | gcd_22.asm | 55 define(`u0', `r5') 75 cmpld cr7, v0, u0 76 L(top): subfc t0, v0, u0 C 0 12 80 subfc s0, u0, v0 C 0 86 isel v0, v0, u0, 2 C 6 use condition set by subfe 87 isel u0, t0, s0, 2 C 6 91 srd u0, u0, cnt C 8 94 or u0, u0, tmp C 10 97 cmpld cr7, v0, u0 103 L(top1):isel v0, u0, v0, 29 C v = min(u,v) [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/generic/ |
H A D | gcd_22.c | 39 mpn_gcd_22 (mp_limb_t u1, mp_limb_t u0, mp_limb_t v1, mp_limb_t v0) in mpn_gcd_22() argument 42 ASSERT (u0 & v0 & 1); in mpn_gcd_22() 45 u0 = (u0 >> 1) | (u1 << (GMP_LIMB_BITS - 1)); in mpn_gcd_22() 54 sub_ddmmss (t1, t0, u1, u0, v1, v0); in mpn_gcd_22() 62 g.d1 = (u1 << 1) | (u0 >> (GMP_LIMB_BITS - 1)); in mpn_gcd_22() 63 g.d0 = (u0 << 1) | 1; in mpn_gcd_22() 71 u0 = (t1 ^ vgtu) - vgtu; in mpn_gcd_22() 73 u0 >>= c + 1; in mpn_gcd_22() 92 u0 = (t0 ^ vgtu) - vgtu; in mpn_gcd_22() 96 u0 = u1; in mpn_gcd_22() [all …]
|
H A D | gcdext_lehmer.c | 59 MPN_CMP (c, ctx->u0, ctx->u1, un); in mpn_gcdext_hook() 60 ASSERT (c != 0 || (un == 1 && ctx->u0[0] == 1 && ctx->u1[0] == 1)); in mpn_gcdext_hook() 65 up = d ? ctx->u0 : ctx->u1; in mpn_gcdext_hook() 75 mp_ptr u0 = ctx->u0; in mpn_gcdext_hook() local 81 MP_PTR_SWAP (u0, u1); in mpn_gcdext_hook() 92 cy = mpn_add_n (u0, u0, u1, un); in mpn_gcdext_hook() 94 cy = mpn_addmul_1 (u0, u1, un, q); in mpn_gcdext_hook() 126 cy = mpn_add (u0, tp, u1n, u0, un); in mpn_gcdext_hook() 131 cy = mpn_add (u0, u0, un, tp, u1n); in mpn_gcdext_hook() 134 u0[un] = cy; in mpn_gcdext_hook() [all …]
|
H A D | gcdext.c | 208 mp_ptr u0; in mpn_gcdext() local 283 u0 = tp; tp += ualloc; in mpn_gcdext() 312 MPN_COPY (u0, M.p[1][0], M.n); in mpn_gcdext() 315 while ( (u0[un-1] | u1[un-1] ) == 0) in mpn_gcdext() 325 ctx.u0 = u0; in mpn_gcdext() 367 MPN_COPY (t0, u0, un); in mpn_gcdext() 370 un = hgcd_mul_matrix_vector (&M, u0, t0, u1, un, t0 + un); in mpn_gcdext() 373 ASSERT ( (u0[un-1] | u1[un-1]) > 0); in mpn_gcdext() 380 ctx.u0 = u0; in mpn_gcdext() 427 MPN_CMP (c, u0, u1, un); in mpn_gcdext() [all …]
|
H A D | submul_1.c | 43 mp_limb_t u0, crec, c, p1, p0, r0; in mpn_submul_1() local 51 u0 = *up++; in mpn_submul_1() 52 umul_ppmm (p1, p0, u0, v0); in mpn_submul_1() 80 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, cl, xl, c1, c2, c3; in mpn_submul_1() local 93 u0 = *up++; in mpn_submul_1() 95 umul_ppmm (p1, p0, u0, shifted_v0); in mpn_submul_1() 116 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, xw, cl, xl; in mpn_submul_1() local 129 u0 = *up++; in mpn_submul_1() 131 umul_ppmm (p1, p0, u0, shifted_v0); in mpn_submul_1()
|
H A D | addmul_1.c | 44 mp_limb_t u0, crec, c, p1, p0, r0; in mpn_addmul_1() local 52 u0 = *up++; in mpn_addmul_1() 53 umul_ppmm (p1, p0, u0, v0); in mpn_addmul_1() 81 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, crec, xl, c1, c2, c3; in mpn_addmul_1() local 94 u0 = *up++; in mpn_addmul_1() 96 umul_ppmm (p1, p0, u0, shifted_v0); in mpn_addmul_1() 117 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, xw, crec, xl; in mpn_addmul_1() local 130 u0 = *up++; in mpn_addmul_1() 132 umul_ppmm (p1, p0, u0, shifted_v0); in mpn_addmul_1()
|
H A D | div_qr_1n_pi1.c | 192 mp_limb_t u0, u2; in mpn_div_qr_1n_pi1() local 215 u0 = up[n-1]; /* Early read, to allow qp == up. */ in mpn_div_qr_1n_pi1() 218 add_mssaaaa (u2, u1, u0, u0, up[n-2], p1, p0); in mpn_div_qr_1n_pi1() 248 ADDC_LIMB (cy, u0, u0, u2 & B2); in mpn_div_qr_1n_pi1() 249 u0 -= (-cy) & d; in mpn_div_qr_1n_pi1() 256 add_mssaaaa (u2, u1, u0, u0, up[j], p1, p0); in mpn_div_qr_1n_pi1() 266 udiv_qrnnd_preinv (t, u0, u1, u0, d, dinv); in mpn_div_qr_1n_pi1() 272 return u0; in mpn_div_qr_1n_pi1()
|
H A D | matrix22_mul.c | 134 mp_ptr s0, t0, u0, u1; in mpn_matrix22_mul_strassen() local 138 u0 = tp; tp += rn + mn + 1; in mpn_matrix22_mul_strassen() 141 MUL (u0, r1, rn, m2, mn); /* u5 = s5 * t6 */ in mpn_matrix22_mul_strassen() 170 r0[rn+mn] = mpn_add_n (r0, u0, u1, rn + mn); in mpn_matrix22_mul_strassen() 207 u0[rn+mn] = 0; in mpn_matrix22_mul_strassen() 210 r3s = abs_sub_n (r3, u0, r3, rn + mn + 1); in mpn_matrix22_mul_strassen() 214 ASSERT_NOCARRY (mpn_add_n (r3, r3, u0, rn + mn + 1)); in mpn_matrix22_mul_strassen() 230 MUL (u0, r2, rn, t0, mn + 1); /* u6 = s6 * t4 */ in mpn_matrix22_mul_strassen() 231 ASSERT (u0[rn+mn] < 2); in mpn_matrix22_mul_strassen() 241 t0s = add_signed_n (r2, r3, r3s, u0, t0s, rn + mn); in mpn_matrix22_mul_strassen() [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/x86_64/coreibwl/ |
H A D | sqr_basecase.asm | 184 L(mf0): mulx( u0, w0, w1) C up[0]^2 185 add u0, u0 191 L(mf3): mulx( u0, w2, w3) C up[0]^2 192 add u0, u0 200 L(mf4): mulx( u0, w0, w1) C up[0]^2 201 add u0, u0 209 L(mf5): mulx( u0, w2, w3) C up[0]^2 210 add u0, u0 218 L(mf6): mulx( u0, w0, w1) C up[0]^2 219 add u0, u0 [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/powerpc64/ |
H A D | lshift.asm | 93 L(gt3): ld u0, -8(up) 96 srd r9, u0, tnc 111 L(gt2): ld u0, -24(up) 113 srd r9, u0, tnc 116 sld r12, u0, cnt 118 ld u0, -40(up) 127 ld u0, -24(up) 129 srd r9, u0, tnc 132 sld r12, u0, cnt 138 ld u0, -8(up) [all …]
|
H A D | rshift.asm | 93 L(gt3): ld u0, 0(up) 96 sld r9, u0, tnc 111 L(gt2): ld u0, 16(up) 113 sld r9, u0, tnc 116 srd r12, u0, cnt 118 ld u0, 32(up) 127 ld u0, 16(up) 129 sld r9, u0, tnc 132 srd r12, u0, cnt 138 ld u0, 0(up) [all …]
|
/netbsd-src/common/lib/libc/quad/ |
H A D | muldi3.c | 115 #define u0 u.ul[L] in __muldi3() macro 139 prod.q = __lmulq(u0, v0); in __muldi3() 147 low.q = __lmulq(u0, v0); in __muldi3() 149 if (u1 >= u0) in __muldi3() 150 negmid = 0, udiff = u1 - u0; in __muldi3() 152 negmid = 1, udiff = u0 - u1; in __muldi3() 170 #undef u0 in __muldi3() 195 u_int u1, u0, v1, v0, udiff, vdiff, high, mid, low; in __lmulq() local 201 u0 = LHALF(u); in __lmulq() 205 low = u0 * v0; in __lmulq() [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/arm/v6/ |
H A D | sqr_basecase.asm | 82 define(`u0', r7) 107 ldm up, {v0,v1,u0} 121 ldm up, {v0,v1,u0} 156 L(top): ldr u0, [up, #4] 162 umaal r5, cya, u0, v0 165 umaal r4, cyb, u0, v1 166 L(ko1): ldr u0, [up, #12] 172 umaal r5, cya, u0, v0 175 umaal r4, cyb, u0, v1 180 ldr u0, [up, #4] [all …]
|
H A D | addmul_3.asm | 103 ldr u0, [up, #12] 123 ldr u0, [up, #4] 130 ldr u0, [up, #-4] 134 umaal w1, cy0, u0, v0 136 umaal w2, cy1, u0, v1 138 umaal w0, cy2, u0, v2 141 ldr u0, [up, #4] 146 umaal w0, cy0, u0, v0 148 umaal w1, cy1, u0, v1 150 umaal w2, cy2, u0, v2 [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/powerpc64/mode64/p7/ |
H A D | gcd_22.asm | 55 define(`u0', `r5') 75 L(top): subfc. t0, v0, u0 C 0 12 79 subfc s0, u0, v0 C 0 87 isel v0, v0, u0, 2 C 6 use condition set by subfe 89 isel u0, t0, s0, 2 C 6 92 srd u0, u0, cnt C 8 95 or u0, u0, tmp C 10 104 L(top1):isel v0, u0, v0, 29 C v = min(u,v) 105 isel u0, r10, r11, 29 C u = |u - v| 107 srd u0, u0, cnt [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/arm/v7a/cora15/ |
H A D | submul_1.asm | 67 define(`u0', `r8') define(`u1', `r9') 92 L(b00): ldrd u0, u1, [up] 94 mvn u0, u0 97 umaal w0, r6, u0, v0 103 L(b10): ldrd u0, u1, [up], #8 105 mvn u0, u0 108 umaal w0, r4, u0, v0 126 L(top): ldrd u0, u1, [up, #0] 128 mvn u0, u0 132 umlal w0, r6, u0, v0 C 1 2 [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/x86_64/ |
H A D | gcd_22.asm | 77 define(`u0', `%rsi') 103 sub u0, t0 108 mov u0, s0 111 sub v0, u0 114 L(bck): cmovc t0, u0 C u = |u - v| 122 C Rightshift (u1,,u0) into (u1,,u0) 123 L(shr): shr R8(cnt), u0 128 or t1, u0 137 C mov u0, %rsi 141 rep;bsf u0, cnt C tzcnt! [all …]
|
H A D | gcd_11.asm | 78 define(`u0', `%rdi') 93 L(top): cmovc %rdx, u0 C u = |u - v| 98 shr R8(%rcx), u0 99 L(ent): mov u0, %rax 101 sub u0, %rdx 102 sub v0, u0 111 shr $MAXSHIFT, u0 112 mov u0, %rdx
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/x86/ |
H A D | gcd_11.asm | 82 define(`u0', `%eax') 94 mov 16(%esp), u0 97 sub v0, u0 C u = u - v 0 102 mov u0, %edi C 1 103 mov u0, %ecx C 1 105 xor %ebx, u0 C 2 107 sub %ebx, u0 C u = |u - v| 3 111 shr %cl, u0 C 4 112 sub v0, u0 C u = u - v 0,5 122 shr $MAXSHIFT, u0 [all …]
|
/netbsd-src/external/lgpl3/mpfr/dist/src/ |
H A D | ai.c | 335 mpfr_t u0, u1; in mpfr_ai2() local 426 mpfr_init2 (u0, wprec); mpfr_init2 (u1, wprec); in mpfr_ai2() 439 mpfr_set_prec (u0, wprec); mpfr_set_prec (u1, wprec); in mpfr_ai2() 442 mpfr_set_ui (u0, 1, MPFR_RNDN); in mpfr_ai2() 464 mpfr_set_ui (u0, 9, MPFR_RNDN); in mpfr_ai2() 465 mpfr_cbrt (u0, u0, MPFR_RNDN); in mpfr_ai2() 466 mpfr_mul (u0, u0, temp2, MPFR_RNDN); in mpfr_ai2() 467 mpfr_ui_div (u0, 1, u0, MPFR_RNDN); /* u0 = 1/( Gamma (2/3)*9^(1/3) ) */ in mpfr_ai2() 494 mpfr_mul (s, s, u0, MPFR_RNDN); in mpfr_ai2() 497 mpfr_mul (u0, u0, z[L], MPFR_RNDN); in mpfr_ai2() [all …]
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/x86_64/bt1/ |
H A D | gcd_11.asm | 78 define(`u0', `%rdi') 95 sub u0, t0 99 L(top): mov u0, s0 100 sub v0, u0 101 cmovc t0, u0 C u = |u - v| 106 L(shr): shr R8(cnt), u0 108 sub u0, t0 117 bsf u0, cnt
|
/netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/ultrasparct3/ |
H A D | aormul_4.asm | 118 L(odd): mov u1, u0 138 mulx u0, v0, %l0 C w 0 140 mulx u0, v1, %l1 C w 1 142 mulx u0, v2, %l2 C w 2 144 mulx u0, v3, %l3 C w 3 150 umulxhi(u0, v0, %l4) C w 1 183 mulx u0, v0, %l0 185 mulx u0, v1, %l1 187 mulx u0, v2, %l2 189 mulx u0, v3, %l3 [all …]
|
/netbsd-src/external/gpl3/gcc.old/dist/libgcc/config/epiphany/ |
H A D | udivsi3-float.c | 34 fu u0, u1, u2, u1b, u2b; in __udivsi3() local 46 u0.f = a; in __udivsi3() 49 u0.f = (int) a; in __udivsi3() 53 u0.i = (a >> 8) - 0x00800000 + 0x3f800000 + (31 << 23); in __udivsi3() 59 u0.i = (t - c); in __udivsi3() 63 s0 = u0.i + 1 /* Compensate for rounding errors. */ in __udivsi3()
|
/netbsd-src/external/gpl3/gcc/dist/libgcc/config/epiphany/ |
H A D | udivsi3-float.c | 34 fu u0, u1, u2, u1b, u2b; in __udivsi3() local 46 u0.f = a; in __udivsi3() 49 u0.f = (int) a; in __udivsi3() 53 u0.i = (a >> 8) - 0x00800000 + 0x3f800000 + (31 << 23); in __udivsi3() 59 u0.i = (t - c); in __udivsi3() 63 s0 = u0.i + 1 /* Compensate for rounding errors. */ in __udivsi3()
|