xref: /dflybsd-src/contrib/gmp/mpn/generic/toom62_mul.c (revision 86d7f5d305c6adaa56ff4582ece9859d73106103)
1*86d7f5d3SJohn Marino /* mpn_toom62_mul -- Multiply {ap,an} and {bp,bn} where an is nominally 3 times
2*86d7f5d3SJohn Marino    as large as bn.  Or more accurately, (5/2)bn < an < 6bn.
3*86d7f5d3SJohn Marino 
4*86d7f5d3SJohn Marino    Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
5*86d7f5d3SJohn Marino 
6*86d7f5d3SJohn Marino    The idea of applying toom to unbalanced multiplication is due to Marco
7*86d7f5d3SJohn Marino    Bodrato and Alberto Zanoni.
8*86d7f5d3SJohn Marino 
9*86d7f5d3SJohn Marino    THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
10*86d7f5d3SJohn Marino    SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
11*86d7f5d3SJohn Marino    GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
12*86d7f5d3SJohn Marino 
13*86d7f5d3SJohn Marino Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
14*86d7f5d3SJohn Marino 
15*86d7f5d3SJohn Marino This file is part of the GNU MP Library.
16*86d7f5d3SJohn Marino 
17*86d7f5d3SJohn Marino The GNU MP Library is free software; you can redistribute it and/or modify
18*86d7f5d3SJohn Marino it under the terms of the GNU Lesser General Public License as published by
19*86d7f5d3SJohn Marino the Free Software Foundation; either version 3 of the License, or (at your
20*86d7f5d3SJohn Marino option) any later version.
21*86d7f5d3SJohn Marino 
22*86d7f5d3SJohn Marino The GNU MP Library is distributed in the hope that it will be useful, but
23*86d7f5d3SJohn Marino WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24*86d7f5d3SJohn Marino or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
25*86d7f5d3SJohn Marino License for more details.
26*86d7f5d3SJohn Marino 
27*86d7f5d3SJohn Marino You should have received a copy of the GNU Lesser General Public License
28*86d7f5d3SJohn Marino along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
29*86d7f5d3SJohn Marino 
30*86d7f5d3SJohn Marino 
31*86d7f5d3SJohn Marino #include "gmp.h"
32*86d7f5d3SJohn Marino #include "gmp-impl.h"
33*86d7f5d3SJohn Marino 
34*86d7f5d3SJohn Marino /* Evaluate in:
35*86d7f5d3SJohn Marino    0, +1, -1, +2, -2, 1/2, +inf
36*86d7f5d3SJohn Marino 
37*86d7f5d3SJohn Marino   <-s-><--n--><--n--><--n--><--n--><--n-->
38*86d7f5d3SJohn Marino    ___ ______ ______ ______ ______ ______
39*86d7f5d3SJohn Marino   |a5_|___a4_|___a3_|___a2_|___a1_|___a0_|
40*86d7f5d3SJohn Marino 			     |_b1_|___b0_|
41*86d7f5d3SJohn Marino 			     <-t--><--n-->
42*86d7f5d3SJohn Marino 
43*86d7f5d3SJohn Marino   v0  =    a0                       *   b0      #    A(0)*B(0)
44*86d7f5d3SJohn Marino   v1  = (  a0+  a1+ a2+ a3+  a4+  a5)*( b0+ b1) #    A(1)*B(1)      ah  <= 5   bh <= 1
45*86d7f5d3SJohn Marino   vm1 = (  a0-  a1+ a2- a3+  a4-  a5)*( b0- b1) #   A(-1)*B(-1)    |ah| <= 2   bh  = 0
46*86d7f5d3SJohn Marino   v2  = (  a0+ 2a1+4a2+8a3+16a4+32a5)*( b0+2b1) #    A(2)*B(2)      ah  <= 62  bh <= 2
47*86d7f5d3SJohn Marino   vm2 = (  a0- 2a1+4a2-8a3+16a4-32a5)*( b0-2b1) #   A(-2)*B(-2)    -41<=ah<=20 -1<=bh<=0
48*86d7f5d3SJohn Marino   vh  = (32a0+16a1+8a2+4a3+ 2a4+  a5)*(2b0+ b1) #  A(1/2)*B(1/2)    ah  <= 62  bh <= 2
49*86d7f5d3SJohn Marino   vinf=                           a5 *      b1  #  A(inf)*B(inf)
50*86d7f5d3SJohn Marino */
51*86d7f5d3SJohn Marino 
52*86d7f5d3SJohn Marino void
mpn_toom62_mul(mp_ptr pp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn,mp_ptr scratch)53*86d7f5d3SJohn Marino mpn_toom62_mul (mp_ptr pp,
54*86d7f5d3SJohn Marino 		mp_srcptr ap, mp_size_t an,
55*86d7f5d3SJohn Marino 		mp_srcptr bp, mp_size_t bn,
56*86d7f5d3SJohn Marino 		mp_ptr scratch)
57*86d7f5d3SJohn Marino {
58*86d7f5d3SJohn Marino   mp_size_t n, s, t;
59*86d7f5d3SJohn Marino   mp_limb_t cy;
60*86d7f5d3SJohn Marino   mp_ptr as1, asm1, as2, asm2, ash;
61*86d7f5d3SJohn Marino   mp_ptr bs1, bsm1, bs2, bsm2, bsh;
62*86d7f5d3SJohn Marino   mp_ptr gp;
63*86d7f5d3SJohn Marino   enum toom7_flags aflags, bflags;
64*86d7f5d3SJohn Marino   TMP_DECL;
65*86d7f5d3SJohn Marino 
66*86d7f5d3SJohn Marino #define a0  ap
67*86d7f5d3SJohn Marino #define a1  (ap + n)
68*86d7f5d3SJohn Marino #define a2  (ap + 2*n)
69*86d7f5d3SJohn Marino #define a3  (ap + 3*n)
70*86d7f5d3SJohn Marino #define a4  (ap + 4*n)
71*86d7f5d3SJohn Marino #define a5  (ap + 5*n)
72*86d7f5d3SJohn Marino #define b0  bp
73*86d7f5d3SJohn Marino #define b1  (bp + n)
74*86d7f5d3SJohn Marino 
75*86d7f5d3SJohn Marino   n = 1 + (an >= 3 * bn ? (an - 1) / (size_t) 6 : (bn - 1) >> 1);
76*86d7f5d3SJohn Marino 
77*86d7f5d3SJohn Marino   s = an - 5 * n;
78*86d7f5d3SJohn Marino   t = bn - n;
79*86d7f5d3SJohn Marino 
80*86d7f5d3SJohn Marino   ASSERT (0 < s && s <= n);
81*86d7f5d3SJohn Marino   ASSERT (0 < t && t <= n);
82*86d7f5d3SJohn Marino 
83*86d7f5d3SJohn Marino   TMP_MARK;
84*86d7f5d3SJohn Marino 
85*86d7f5d3SJohn Marino   as1 = TMP_SALLOC_LIMBS (n + 1);
86*86d7f5d3SJohn Marino   asm1 = TMP_SALLOC_LIMBS (n + 1);
87*86d7f5d3SJohn Marino   as2 = TMP_SALLOC_LIMBS (n + 1);
88*86d7f5d3SJohn Marino   asm2 = TMP_SALLOC_LIMBS (n + 1);
89*86d7f5d3SJohn Marino   ash = TMP_SALLOC_LIMBS (n + 1);
90*86d7f5d3SJohn Marino 
91*86d7f5d3SJohn Marino   bs1 = TMP_SALLOC_LIMBS (n + 1);
92*86d7f5d3SJohn Marino   bsm1 = TMP_SALLOC_LIMBS (n);
93*86d7f5d3SJohn Marino   bs2 = TMP_SALLOC_LIMBS (n + 1);
94*86d7f5d3SJohn Marino   bsm2 = TMP_SALLOC_LIMBS (n + 1);
95*86d7f5d3SJohn Marino   bsh = TMP_SALLOC_LIMBS (n + 1);
96*86d7f5d3SJohn Marino 
97*86d7f5d3SJohn Marino   gp = pp;
98*86d7f5d3SJohn Marino 
99*86d7f5d3SJohn Marino   /* Compute as1 and asm1.  */
100*86d7f5d3SJohn Marino   aflags = toom7_w3_neg & mpn_toom_eval_pm1 (as1, asm1, 5, ap, n, s, gp);
101*86d7f5d3SJohn Marino 
102*86d7f5d3SJohn Marino   /* Compute as2 and asm2. */
103*86d7f5d3SJohn Marino   aflags |= toom7_w1_neg & mpn_toom_eval_pm2 (as2, asm2, 5, ap, n, s, gp);
104*86d7f5d3SJohn Marino 
105*86d7f5d3SJohn Marino   /* Compute ash = 32 a0 + 16 a1 + 8 a2 + 4 a3 + 2 a4 + a5
106*86d7f5d3SJohn Marino      = 2*(2*(2*(2*(2*a0 + a1) + a2) + a3) + a4) + a5  */
107*86d7f5d3SJohn Marino 
108*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_addlsh1_n
109*86d7f5d3SJohn Marino   cy = mpn_addlsh1_n (ash, a1, a0, n);
110*86d7f5d3SJohn Marino   cy = 2*cy + mpn_addlsh1_n (ash, a2, ash, n);
111*86d7f5d3SJohn Marino   cy = 2*cy + mpn_addlsh1_n (ash, a3, ash, n);
112*86d7f5d3SJohn Marino   cy = 2*cy + mpn_addlsh1_n (ash, a4, ash, n);
113*86d7f5d3SJohn Marino   if (s < n)
114*86d7f5d3SJohn Marino     {
115*86d7f5d3SJohn Marino       mp_limb_t cy2;
116*86d7f5d3SJohn Marino       cy2 = mpn_addlsh1_n (ash, a5, ash, s);
117*86d7f5d3SJohn Marino       ash[n] = 2*cy + mpn_lshift (ash + s, ash + s, n - s, 1);
118*86d7f5d3SJohn Marino       MPN_INCR_U (ash + s, n+1-s, cy2);
119*86d7f5d3SJohn Marino     }
120*86d7f5d3SJohn Marino   else
121*86d7f5d3SJohn Marino     ash[n] = 2*cy + mpn_addlsh1_n (ash, a5, ash, n);
122*86d7f5d3SJohn Marino #else
123*86d7f5d3SJohn Marino   cy = mpn_lshift (ash, a0, n, 1);
124*86d7f5d3SJohn Marino   cy += mpn_add_n (ash, ash, a1, n);
125*86d7f5d3SJohn Marino   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
126*86d7f5d3SJohn Marino   cy += mpn_add_n (ash, ash, a2, n);
127*86d7f5d3SJohn Marino   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
128*86d7f5d3SJohn Marino   cy += mpn_add_n (ash, ash, a3, n);
129*86d7f5d3SJohn Marino   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
130*86d7f5d3SJohn Marino   cy += mpn_add_n (ash, ash, a4, n);
131*86d7f5d3SJohn Marino   cy = 2*cy + mpn_lshift (ash, ash, n, 1);
132*86d7f5d3SJohn Marino   ash[n] = cy + mpn_add (ash, ash, n, a5, s);
133*86d7f5d3SJohn Marino #endif
134*86d7f5d3SJohn Marino 
135*86d7f5d3SJohn Marino   /* Compute bs1 and bsm1.  */
136*86d7f5d3SJohn Marino   if (t == n)
137*86d7f5d3SJohn Marino     {
138*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_add_n_sub_n
139*86d7f5d3SJohn Marino       if (mpn_cmp (b0, b1, n) < 0)
140*86d7f5d3SJohn Marino 	{
141*86d7f5d3SJohn Marino 	  cy = mpn_add_n_sub_n (bs1, bsm1, b1, b0, n);
142*86d7f5d3SJohn Marino 	  bflags = toom7_w3_neg;
143*86d7f5d3SJohn Marino 	}
144*86d7f5d3SJohn Marino       else
145*86d7f5d3SJohn Marino 	{
146*86d7f5d3SJohn Marino 	  cy = mpn_add_n_sub_n (bs1, bsm1, b0, b1, n);
147*86d7f5d3SJohn Marino 	  bflags = 0;
148*86d7f5d3SJohn Marino 	}
149*86d7f5d3SJohn Marino       bs1[n] = cy >> 1;
150*86d7f5d3SJohn Marino #else
151*86d7f5d3SJohn Marino       bs1[n] = mpn_add_n (bs1, b0, b1, n);
152*86d7f5d3SJohn Marino       if (mpn_cmp (b0, b1, n) < 0)
153*86d7f5d3SJohn Marino 	{
154*86d7f5d3SJohn Marino 	  mpn_sub_n (bsm1, b1, b0, n);
155*86d7f5d3SJohn Marino 	  bflags = toom7_w3_neg;
156*86d7f5d3SJohn Marino 	}
157*86d7f5d3SJohn Marino       else
158*86d7f5d3SJohn Marino 	{
159*86d7f5d3SJohn Marino 	  mpn_sub_n (bsm1, b0, b1, n);
160*86d7f5d3SJohn Marino 	  bflags = 0;
161*86d7f5d3SJohn Marino 	}
162*86d7f5d3SJohn Marino #endif
163*86d7f5d3SJohn Marino     }
164*86d7f5d3SJohn Marino   else
165*86d7f5d3SJohn Marino     {
166*86d7f5d3SJohn Marino       bs1[n] = mpn_add (bs1, b0, n, b1, t);
167*86d7f5d3SJohn Marino       if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
168*86d7f5d3SJohn Marino 	{
169*86d7f5d3SJohn Marino 	  mpn_sub_n (bsm1, b1, b0, t);
170*86d7f5d3SJohn Marino 	  MPN_ZERO (bsm1 + t, n - t);
171*86d7f5d3SJohn Marino 	  bflags = toom7_w3_neg;
172*86d7f5d3SJohn Marino 	}
173*86d7f5d3SJohn Marino       else
174*86d7f5d3SJohn Marino 	{
175*86d7f5d3SJohn Marino 	  mpn_sub (bsm1, b0, n, b1, t);
176*86d7f5d3SJohn Marino 	  bflags = 0;
177*86d7f5d3SJohn Marino 	}
178*86d7f5d3SJohn Marino     }
179*86d7f5d3SJohn Marino 
180*86d7f5d3SJohn Marino   /* Compute bs2 and bsm2. Recycling bs1 and bsm1; bs2=bs1+b1, bsm2 =
181*86d7f5d3SJohn Marino      bsm1 - b1 */
182*86d7f5d3SJohn Marino   mpn_add (bs2, bs1, n + 1, b1, t);
183*86d7f5d3SJohn Marino   if (bflags & toom7_w3_neg)
184*86d7f5d3SJohn Marino     {
185*86d7f5d3SJohn Marino       bsm2[n] = mpn_add (bsm2, bsm1, n, b1, t);
186*86d7f5d3SJohn Marino       bflags |= toom7_w1_neg;
187*86d7f5d3SJohn Marino     }
188*86d7f5d3SJohn Marino   else
189*86d7f5d3SJohn Marino     {
190*86d7f5d3SJohn Marino       /* FIXME: Simplify this logic? */
191*86d7f5d3SJohn Marino       if (t < n)
192*86d7f5d3SJohn Marino 	{
193*86d7f5d3SJohn Marino 	  if (mpn_zero_p (bsm1 + t, n - t) && mpn_cmp (bsm1, b1, t) < 0)
194*86d7f5d3SJohn Marino 	    {
195*86d7f5d3SJohn Marino 	      ASSERT_NOCARRY (mpn_sub_n (bsm2, b1, bsm1, t));
196*86d7f5d3SJohn Marino 	      MPN_ZERO (bsm2 + t, n + 1 - t);
197*86d7f5d3SJohn Marino 	      bflags |= toom7_w1_neg;
198*86d7f5d3SJohn Marino 	    }
199*86d7f5d3SJohn Marino 	  else
200*86d7f5d3SJohn Marino 	    {
201*86d7f5d3SJohn Marino 	      ASSERT_NOCARRY (mpn_sub (bsm2, bsm1, n, b1, t));
202*86d7f5d3SJohn Marino 	      bsm2[n] = 0;
203*86d7f5d3SJohn Marino 	    }
204*86d7f5d3SJohn Marino 	}
205*86d7f5d3SJohn Marino       else
206*86d7f5d3SJohn Marino 	{
207*86d7f5d3SJohn Marino 	  if (mpn_cmp (bsm1, b1, n) < 0)
208*86d7f5d3SJohn Marino 	    {
209*86d7f5d3SJohn Marino 	      ASSERT_NOCARRY (mpn_sub_n (bsm2, b1, bsm1, n));
210*86d7f5d3SJohn Marino 	      bflags |= toom7_w1_neg;
211*86d7f5d3SJohn Marino 	    }
212*86d7f5d3SJohn Marino 	  else
213*86d7f5d3SJohn Marino 	    {
214*86d7f5d3SJohn Marino 	      ASSERT_NOCARRY (mpn_sub (bsm2, bsm1, n, b1, n));
215*86d7f5d3SJohn Marino 	    }
216*86d7f5d3SJohn Marino 	  bsm2[n] = 0;
217*86d7f5d3SJohn Marino 	}
218*86d7f5d3SJohn Marino     }
219*86d7f5d3SJohn Marino 
220*86d7f5d3SJohn Marino   /* Compute bsh, recycling bs1 and bsm1. bsh=bs1+b0;  */
221*86d7f5d3SJohn Marino   mpn_add (bsh, bs1, n + 1, b0, n);
222*86d7f5d3SJohn Marino 
223*86d7f5d3SJohn Marino   ASSERT (as1[n] <= 5);
224*86d7f5d3SJohn Marino   ASSERT (bs1[n] <= 1);
225*86d7f5d3SJohn Marino   ASSERT (asm1[n] <= 2);
226*86d7f5d3SJohn Marino   ASSERT (as2[n] <= 62);
227*86d7f5d3SJohn Marino   ASSERT (bs2[n] <= 2);
228*86d7f5d3SJohn Marino   ASSERT (asm2[n] <= 41);
229*86d7f5d3SJohn Marino   ASSERT (bsm2[n] <= 1);
230*86d7f5d3SJohn Marino   ASSERT (ash[n] <= 62);
231*86d7f5d3SJohn Marino   ASSERT (bsh[n] <= 2);
232*86d7f5d3SJohn Marino 
233*86d7f5d3SJohn Marino #define v0    pp				/* 2n */
234*86d7f5d3SJohn Marino #define v1    (pp + 2 * n)			/* 2n+1 */
235*86d7f5d3SJohn Marino #define vinf  (pp + 6 * n)			/* s+t */
236*86d7f5d3SJohn Marino #define v2    scratch				/* 2n+1 */
237*86d7f5d3SJohn Marino #define vm2   (scratch + 2 * n + 1)		/* 2n+1 */
238*86d7f5d3SJohn Marino #define vh    (scratch + 4 * n + 2)		/* 2n+1 */
239*86d7f5d3SJohn Marino #define vm1   (scratch + 6 * n + 3)		/* 2n+1 */
240*86d7f5d3SJohn Marino #define scratch_out (scratch + 8 * n + 4)		/* 2n+1 */
241*86d7f5d3SJohn Marino   /* Total scratch need: 10*n+5 */
242*86d7f5d3SJohn Marino 
243*86d7f5d3SJohn Marino   /* Must be in allocation order, as they overwrite one limb beyond
244*86d7f5d3SJohn Marino    * 2n+1. */
245*86d7f5d3SJohn Marino   mpn_mul_n (v2, as2, bs2, n + 1);		/* v2, 2n+1 limbs */
246*86d7f5d3SJohn Marino   mpn_mul_n (vm2, asm2, bsm2, n + 1);		/* vm2, 2n+1 limbs */
247*86d7f5d3SJohn Marino   mpn_mul_n (vh, ash, bsh, n + 1);		/* vh, 2n+1 limbs */
248*86d7f5d3SJohn Marino 
249*86d7f5d3SJohn Marino   /* vm1, 2n+1 limbs */
250*86d7f5d3SJohn Marino   mpn_mul_n (vm1, asm1, bsm1, n);
251*86d7f5d3SJohn Marino   cy = 0;
252*86d7f5d3SJohn Marino   if (asm1[n] == 1)
253*86d7f5d3SJohn Marino     {
254*86d7f5d3SJohn Marino       cy = mpn_add_n (vm1 + n, vm1 + n, bsm1, n);
255*86d7f5d3SJohn Marino     }
256*86d7f5d3SJohn Marino   else if (asm1[n] == 2)
257*86d7f5d3SJohn Marino     {
258*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_addlsh1_n
259*86d7f5d3SJohn Marino       cy = mpn_addlsh1_n (vm1 + n, vm1 + n, bsm1, n);
260*86d7f5d3SJohn Marino #else
261*86d7f5d3SJohn Marino       cy = mpn_addmul_1 (vm1 + n, bsm1, n, CNST_LIMB(2));
262*86d7f5d3SJohn Marino #endif
263*86d7f5d3SJohn Marino     }
264*86d7f5d3SJohn Marino   vm1[2 * n] = cy;
265*86d7f5d3SJohn Marino 
266*86d7f5d3SJohn Marino   /* v1, 2n+1 limbs */
267*86d7f5d3SJohn Marino   mpn_mul_n (v1, as1, bs1, n);
268*86d7f5d3SJohn Marino   if (as1[n] == 1)
269*86d7f5d3SJohn Marino     {
270*86d7f5d3SJohn Marino       cy = bs1[n] + mpn_add_n (v1 + n, v1 + n, bs1, n);
271*86d7f5d3SJohn Marino     }
272*86d7f5d3SJohn Marino   else if (as1[n] == 2)
273*86d7f5d3SJohn Marino     {
274*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_addlsh1_n
275*86d7f5d3SJohn Marino       cy = 2 * bs1[n] + mpn_addlsh1_n (v1 + n, v1 + n, bs1, n);
276*86d7f5d3SJohn Marino #else
277*86d7f5d3SJohn Marino       cy = 2 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(2));
278*86d7f5d3SJohn Marino #endif
279*86d7f5d3SJohn Marino     }
280*86d7f5d3SJohn Marino   else if (as1[n] != 0)
281*86d7f5d3SJohn Marino     {
282*86d7f5d3SJohn Marino       cy = as1[n] * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, as1[n]);
283*86d7f5d3SJohn Marino     }
284*86d7f5d3SJohn Marino   else
285*86d7f5d3SJohn Marino     cy = 0;
286*86d7f5d3SJohn Marino   if (bs1[n] != 0)
287*86d7f5d3SJohn Marino     cy += mpn_add_n (v1 + n, v1 + n, as1, n);
288*86d7f5d3SJohn Marino   v1[2 * n] = cy;
289*86d7f5d3SJohn Marino 
290*86d7f5d3SJohn Marino   mpn_mul_n (v0, a0, b0, n);			/* v0, 2n limbs */
291*86d7f5d3SJohn Marino 
292*86d7f5d3SJohn Marino   /* vinf, s+t limbs */
293*86d7f5d3SJohn Marino   if (s > t)  mpn_mul (vinf, a5, s, b1, t);
294*86d7f5d3SJohn Marino   else        mpn_mul (vinf, b1, t, a5, s);
295*86d7f5d3SJohn Marino 
296*86d7f5d3SJohn Marino   mpn_toom_interpolate_7pts (pp, n, aflags ^ bflags,
297*86d7f5d3SJohn Marino 			     vm2, vm1, v2, vh, s + t, scratch_out);
298*86d7f5d3SJohn Marino 
299*86d7f5d3SJohn Marino   TMP_FREE;
300*86d7f5d3SJohn Marino }
301