xref: /netbsd-src/external/gpl3/gcc/dist/libquadmath/printf/submul_1.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1*181254a7Smrg /* mpn_submul_1 -- multiply the S1_SIZE long limb vector pointed to by S1_PTR
2*181254a7Smrg    by S2_LIMB, subtract the S1_SIZE least significant limbs of the product
3*181254a7Smrg    from the limb vector pointed to by RES_PTR.  Return the most significant
4*181254a7Smrg    limb of the product, adjusted for carry-out from the subtraction.
5*181254a7Smrg 
6*181254a7Smrg Copyright (C) 1992, 1993, 1994, 1996, 2005 Free Software Foundation, Inc.
7*181254a7Smrg 
8*181254a7Smrg This file is part of the GNU MP Library.
9*181254a7Smrg 
10*181254a7Smrg The GNU MP Library is free software; you can redistribute it and/or modify
11*181254a7Smrg it under the terms of the GNU Lesser General Public License as published by
12*181254a7Smrg the Free Software Foundation; either version 2.1 of the License, or (at your
13*181254a7Smrg option) any later version.
14*181254a7Smrg 
15*181254a7Smrg The GNU MP Library is distributed in the hope that it will be useful, but
16*181254a7Smrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17*181254a7Smrg or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
18*181254a7Smrg License for more details.
19*181254a7Smrg 
20*181254a7Smrg You should have received a copy of the GNU Lesser General Public License
21*181254a7Smrg along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
22*181254a7Smrg the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
23*181254a7Smrg MA 02111-1307, USA. */
24*181254a7Smrg 
25*181254a7Smrg #include <config.h>
26*181254a7Smrg #include "gmp-impl.h"
27*181254a7Smrg 
28*181254a7Smrg mp_limb_t
mpn_submul_1(res_ptr,s1_ptr,s1_size,s2_limb)29*181254a7Smrg mpn_submul_1 (res_ptr, s1_ptr, s1_size, s2_limb)
30*181254a7Smrg      register mp_ptr res_ptr;
31*181254a7Smrg      register mp_srcptr s1_ptr;
32*181254a7Smrg      mp_size_t s1_size;
33*181254a7Smrg      register mp_limb_t s2_limb;
34*181254a7Smrg {
35*181254a7Smrg   register mp_limb_t cy_limb;
36*181254a7Smrg   register mp_size_t j;
37*181254a7Smrg   register mp_limb_t prod_high, prod_low;
38*181254a7Smrg   register mp_limb_t x;
39*181254a7Smrg 
40*181254a7Smrg   /* The loop counter and index J goes from -SIZE to -1.  This way
41*181254a7Smrg      the loop becomes faster.  */
42*181254a7Smrg   j = -s1_size;
43*181254a7Smrg 
44*181254a7Smrg   /* Offset the base pointers to compensate for the negative indices.  */
45*181254a7Smrg   res_ptr -= j;
46*181254a7Smrg   s1_ptr -= j;
47*181254a7Smrg 
48*181254a7Smrg   cy_limb = 0;
49*181254a7Smrg   do
50*181254a7Smrg     {
51*181254a7Smrg       umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);
52*181254a7Smrg 
53*181254a7Smrg       prod_low += cy_limb;
54*181254a7Smrg       cy_limb = (prod_low < cy_limb) + prod_high;
55*181254a7Smrg 
56*181254a7Smrg       x = res_ptr[j];
57*181254a7Smrg       prod_low = x - prod_low;
58*181254a7Smrg       cy_limb += (prod_low > x);
59*181254a7Smrg       res_ptr[j] = prod_low;
60*181254a7Smrg     }
61*181254a7Smrg   while (++j != 0);
62*181254a7Smrg 
63*181254a7Smrg   return cy_limb;
64*181254a7Smrg }
65