1*627f7eb2Smrg /* mpn_add_n -- Add two limb vectors of equal, non-zero length.
2*627f7eb2Smrg
3*627f7eb2Smrg Copyright (C) 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
4*627f7eb2Smrg
5*627f7eb2Smrg This file is part of the GNU MP Library.
6*627f7eb2Smrg
7*627f7eb2Smrg The GNU MP Library is free software; you can redistribute it and/or modify
8*627f7eb2Smrg it under the terms of the GNU Lesser General Public License as published by
9*627f7eb2Smrg the Free Software Foundation; either version 2.1 of the License, or (at your
10*627f7eb2Smrg option) any later version.
11*627f7eb2Smrg
12*627f7eb2Smrg The GNU MP Library is distributed in the hope that it will be useful, but
13*627f7eb2Smrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14*627f7eb2Smrg or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15*627f7eb2Smrg License for more details.
16*627f7eb2Smrg
17*627f7eb2Smrg You should have received a copy of the GNU Lesser General Public License
18*627f7eb2Smrg along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19*627f7eb2Smrg the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20*627f7eb2Smrg MA 02111-1307, USA. */
21*627f7eb2Smrg
22*627f7eb2Smrg #include <config.h>
23*627f7eb2Smrg #include "gmp-impl.h"
24*627f7eb2Smrg
25*627f7eb2Smrg mp_limb_t
26*627f7eb2Smrg #if __STDC__
mpn_add_n(mp_ptr res_ptr,mp_srcptr s1_ptr,mp_srcptr s2_ptr,mp_size_t size)27*627f7eb2Smrg mpn_add_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr, mp_size_t size)
28*627f7eb2Smrg #else
29*627f7eb2Smrg mpn_add_n (res_ptr, s1_ptr, s2_ptr, size)
30*627f7eb2Smrg register mp_ptr res_ptr;
31*627f7eb2Smrg register mp_srcptr s1_ptr;
32*627f7eb2Smrg register mp_srcptr s2_ptr;
33*627f7eb2Smrg mp_size_t size;
34*627f7eb2Smrg #endif
35*627f7eb2Smrg {
36*627f7eb2Smrg register mp_limb_t x, y, cy;
37*627f7eb2Smrg register mp_size_t j;
38*627f7eb2Smrg
39*627f7eb2Smrg /* The loop counter and index J goes from -SIZE to -1. This way
40*627f7eb2Smrg the loop becomes faster. */
41*627f7eb2Smrg j = -size;
42*627f7eb2Smrg
43*627f7eb2Smrg /* Offset the base pointers to compensate for the negative indices. */
44*627f7eb2Smrg s1_ptr -= j;
45*627f7eb2Smrg s2_ptr -= j;
46*627f7eb2Smrg res_ptr -= j;
47*627f7eb2Smrg
48*627f7eb2Smrg cy = 0;
49*627f7eb2Smrg do
50*627f7eb2Smrg {
51*627f7eb2Smrg y = s2_ptr[j];
52*627f7eb2Smrg x = s1_ptr[j];
53*627f7eb2Smrg y += cy; /* add previous carry to one addend */
54*627f7eb2Smrg cy = (y < cy); /* get out carry from that addition */
55*627f7eb2Smrg y = x + y; /* add other addend */
56*627f7eb2Smrg cy = (y < x) + cy; /* get out carry from that add, combine */
57*627f7eb2Smrg res_ptr[j] = y;
58*627f7eb2Smrg }
59*627f7eb2Smrg while (++j != 0);
60*627f7eb2Smrg
61*627f7eb2Smrg return cy;
62*627f7eb2Smrg }
63