xref: /netbsd-src/external/gpl3/gcc.old/dist/libquadmath/printf/addmul_1.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1*627f7eb2Smrg /* mpn_addmul_1 -- multiply the S1_SIZE long limb vector pointed to by S1_PTR
2*627f7eb2Smrg    by S2_LIMB, add the S1_SIZE least significant limbs of the product to the
3*627f7eb2Smrg    limb vector pointed to by RES_PTR.  Return the most significant limb of
4*627f7eb2Smrg    the product, adjusted for carry-out from the addition.
5*627f7eb2Smrg 
6*627f7eb2Smrg Copyright (C) 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
7*627f7eb2Smrg 
8*627f7eb2Smrg This file is part of the GNU MP Library.
9*627f7eb2Smrg 
10*627f7eb2Smrg The GNU MP Library is free software; you can redistribute it and/or modify
11*627f7eb2Smrg it under the terms of the GNU Lesser General Public License as published by
12*627f7eb2Smrg the Free Software Foundation; either version 2.1 of the License, or (at your
13*627f7eb2Smrg option) any later version.
14*627f7eb2Smrg 
15*627f7eb2Smrg The GNU MP Library is distributed in the hope that it will be useful, but
16*627f7eb2Smrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17*627f7eb2Smrg or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
18*627f7eb2Smrg License for more details.
19*627f7eb2Smrg 
20*627f7eb2Smrg You should have received a copy of the GNU Lesser General Public License
21*627f7eb2Smrg along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
22*627f7eb2Smrg the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
23*627f7eb2Smrg MA 02111-1307, USA. */
24*627f7eb2Smrg 
25*627f7eb2Smrg #include <config.h>
26*627f7eb2Smrg #include "gmp-impl.h"
27*627f7eb2Smrg 
28*627f7eb2Smrg mp_limb_t
mpn_addmul_1(res_ptr,s1_ptr,s1_size,s2_limb)29*627f7eb2Smrg mpn_addmul_1 (res_ptr, s1_ptr, s1_size, s2_limb)
30*627f7eb2Smrg      register mp_ptr res_ptr;
31*627f7eb2Smrg      register mp_srcptr s1_ptr;
32*627f7eb2Smrg      mp_size_t s1_size;
33*627f7eb2Smrg      register mp_limb_t s2_limb;
34*627f7eb2Smrg {
35*627f7eb2Smrg   register mp_limb_t cy_limb;
36*627f7eb2Smrg   register mp_size_t j;
37*627f7eb2Smrg   register mp_limb_t prod_high, prod_low;
38*627f7eb2Smrg   register mp_limb_t x;
39*627f7eb2Smrg 
40*627f7eb2Smrg   /* The loop counter and index J goes from -SIZE to -1.  This way
41*627f7eb2Smrg      the loop becomes faster.  */
42*627f7eb2Smrg   j = -s1_size;
43*627f7eb2Smrg 
44*627f7eb2Smrg   /* Offset the base pointers to compensate for the negative indices.  */
45*627f7eb2Smrg   res_ptr -= j;
46*627f7eb2Smrg   s1_ptr -= j;
47*627f7eb2Smrg 
48*627f7eb2Smrg   cy_limb = 0;
49*627f7eb2Smrg   do
50*627f7eb2Smrg     {
51*627f7eb2Smrg       umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);
52*627f7eb2Smrg 
53*627f7eb2Smrg       prod_low += cy_limb;
54*627f7eb2Smrg       cy_limb = (prod_low < cy_limb) + prod_high;
55*627f7eb2Smrg 
56*627f7eb2Smrg       x = res_ptr[j];
57*627f7eb2Smrg       prod_low = x + prod_low;
58*627f7eb2Smrg       cy_limb += (prod_low < x);
59*627f7eb2Smrg       res_ptr[j] = prod_low;
60*627f7eb2Smrg     }
61*627f7eb2Smrg   while (++j != 0);
62*627f7eb2Smrg 
63*627f7eb2Smrg   return cy_limb;
64*627f7eb2Smrg }
65