xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/wide-int.cc (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Operations with very long integers.
2    Copyright (C) 2012-2015 Free Software Foundation, Inc.
3    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "dumpfile.h"
37 
38 
39 #define HOST_BITS_PER_HALF_WIDE_INT 32
40 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
41 # define HOST_HALF_WIDE_INT long
42 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
43 # define HOST_HALF_WIDE_INT int
44 #else
45 #error Please add support for HOST_HALF_WIDE_INT
46 #endif
47 
48 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
49 /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
50 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
51 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
52 typedef unsigned HOST_WIDE_INT UWtype;
53 typedef unsigned int UQItype __attribute__ ((mode (QI)));
54 typedef unsigned int USItype __attribute__ ((mode (SI)));
55 typedef unsigned int UDItype __attribute__ ((mode (DI)));
56 #if W_TYPE_SIZE == 32
57 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
58 #else
59 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
60 #endif
61 #include "longlong.h"
62 #endif
63 
64 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
65 
66 /*
67  * Internal utilities.
68  */
69 
70 /* Quantities to deal with values that hold half of a wide int.  Used
71    in multiply and divide.  */
72 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
73 
74 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
75 #define BLOCKS_NEEDED(PREC) \
76   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
77 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
78 
79 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
80    based on the top existing bit of VAL. */
81 
82 static unsigned HOST_WIDE_INT
83 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
84 {
85   return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
86 }
87 
88 /* Convert the integer in VAL to canonical form, returning its new length.
89    LEN is the number of blocks currently in VAL and PRECISION is the number
90    of bits in the integer it represents.
91 
92    This function only changes the representation, not the value.  */
93 static unsigned int
94 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
95 {
96   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
97   HOST_WIDE_INT top;
98   int i;
99 
100   if (len > blocks_needed)
101     len = blocks_needed;
102 
103   if (len == 1)
104     return len;
105 
106   top = val[len - 1];
107   if (len * HOST_BITS_PER_WIDE_INT > precision)
108     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
109   if (top != 0 && top != (HOST_WIDE_INT)-1)
110     return len;
111 
112   /* At this point we know that the top is either 0 or -1.  Find the
113      first block that is not a copy of this.  */
114   for (i = len - 2; i >= 0; i--)
115     {
116       HOST_WIDE_INT x = val[i];
117       if (x != top)
118 	{
119 	  if (SIGN_MASK (x) == top)
120 	    return i + 1;
121 
122 	  /* We need an extra block because the top bit block i does
123 	     not match the extension.  */
124 	  return i + 2;
125 	}
126     }
127 
128   /* The number is 0 or -1.  */
129   return 1;
130 }
131 
132 /*
133  * Conversion routines in and out of wide_int.
134  */
135 
136 /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
137    result for an integer with precision PRECISION.  Return the length
138    of VAL (after any canonization.  */
139 unsigned int
140 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
141 		unsigned int xlen, unsigned int precision, bool need_canon)
142 {
143   for (unsigned i = 0; i < xlen; i++)
144     val[i] = xval[i];
145   return need_canon ? canonize (val, xlen, precision) : xlen;
146 }
147 
148 /* Construct a wide int from a buffer of length LEN.  BUFFER will be
149    read according to byte endianess and word endianess of the target.
150    Only the lower BUFFER_LEN bytes of the result are set; the remaining
151    high bytes are cleared.  */
152 wide_int
153 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
154 {
155   unsigned int precision = buffer_len * BITS_PER_UNIT;
156   wide_int result = wide_int::create (precision);
157   unsigned int words = buffer_len / UNITS_PER_WORD;
158 
159   /* We have to clear all the bits ourself, as we merely or in values
160      below.  */
161   unsigned int len = BLOCKS_NEEDED (precision);
162   HOST_WIDE_INT *val = result.write_val ();
163   for (unsigned int i = 0; i < len; ++i)
164     val[i] = 0;
165 
166   for (unsigned int byte = 0; byte < buffer_len; byte++)
167     {
168       unsigned int offset;
169       unsigned int index;
170       unsigned int bitpos = byte * BITS_PER_UNIT;
171       unsigned HOST_WIDE_INT value;
172 
173       if (buffer_len > UNITS_PER_WORD)
174 	{
175 	  unsigned int word = byte / UNITS_PER_WORD;
176 
177 	  if (WORDS_BIG_ENDIAN)
178 	    word = (words - 1) - word;
179 
180 	  offset = word * UNITS_PER_WORD;
181 
182 	  if (BYTES_BIG_ENDIAN)
183 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
184 	  else
185 	    offset += byte % UNITS_PER_WORD;
186 	}
187       else
188 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
189 
190       value = (unsigned HOST_WIDE_INT) buffer[offset];
191 
192       index = bitpos / HOST_BITS_PER_WIDE_INT;
193       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
194     }
195 
196   result.set_len (canonize (val, len, precision));
197 
198   return result;
199 }
200 
201 /* Sets RESULT from X, the sign is taken according to SGN.  */
202 void
203 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
204 {
205   int len = x.get_len ();
206   const HOST_WIDE_INT *v = x.get_val ();
207   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
208 
209   if (wi::neg_p (x, sgn))
210     {
211       /* We use ones complement to avoid -x80..0 edge case that -
212 	 won't work on.  */
213       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
214       for (int i = 0; i < len; i++)
215 	t[i] = ~v[i];
216       if (excess > 0)
217 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
218       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
219       mpz_com (result, result);
220     }
221   else if (excess > 0)
222     {
223       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
224       for (int i = 0; i < len - 1; i++)
225 	t[i] = v[i];
226       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
227       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
228     }
229   else
230     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
231 }
232 
233 /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
234    values of VAL will be wrapped; otherwise, they will be set to the
235    appropriate minimum or maximum TYPE bound.  */
236 wide_int
237 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
238 {
239   size_t count, numb;
240   unsigned int prec = TYPE_PRECISION (type);
241   wide_int res = wide_int::create (prec);
242 
243   if (!wrap)
244     {
245       mpz_t min, max;
246 
247       mpz_init (min);
248       mpz_init (max);
249       get_type_static_bounds (type, min, max);
250 
251       if (mpz_cmp (x, min) < 0)
252 	mpz_set (x, min);
253       else if (mpz_cmp (x, max) > 0)
254 	mpz_set (x, max);
255 
256       mpz_clear (min);
257       mpz_clear (max);
258     }
259 
260   /* Determine the number of unsigned HOST_WIDE_INTs that are required
261      for representing the absolute value.  The code to calculate count is
262      extracted from the GMP manual, section "Integer Import and Export":
263      http://gmplib.org/manual/Integer-Import-and-Export.html  */
264   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
265   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
266   HOST_WIDE_INT *val = res.write_val ();
267   /* Read the absolute value.
268 
269      Write directly to the wide_int storage if possible, otherwise leave
270      GMP to allocate the memory for us.  It might be slightly more efficient
271      to use mpz_tdiv_r_2exp for the latter case, but the situation is
272      pathological and it seems safer to operate on the original mpz value
273      in all cases.  */
274   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
275 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
276   if (count < 1)
277     {
278       val[0] = 0;
279       count = 1;
280     }
281   count = MIN (count, BLOCKS_NEEDED (prec));
282   if (valres != val)
283     {
284       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
285       free (valres);
286     }
287   /* Zero-extend the absolute value to PREC bits.  */
288   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
289     val[count++] = 0;
290   else
291     count = canonize (val, count, prec);
292   res.set_len (count);
293 
294   if (mpz_sgn (x) < 0)
295     res = -res;
296 
297   return res;
298 }
299 
300 /*
301  * Largest and smallest values in a mode.
302  */
303 
304 /* Return the largest SGNed number that is representable in PRECISION bits.
305 
306    TODO: There is still code from the double_int era that trys to
307    make up for the fact that double int's could not represent the
308    min and max values of all types.  This code should be removed
309    because the min and max values can always be represented in
310    wide_ints and int-csts.  */
311 wide_int
312 wi::max_value (unsigned int precision, signop sgn)
313 {
314   gcc_checking_assert (precision != 0);
315   if (sgn == UNSIGNED)
316     /* The unsigned max is just all ones.  */
317     return shwi (-1, precision);
318   else
319     /* The signed max is all ones except the top bit.  This must be
320        explicitly represented.  */
321     return mask (precision - 1, false, precision);
322 }
323 
324 /* Return the largest SGNed number that is representable in PRECISION bits.  */
325 wide_int
326 wi::min_value (unsigned int precision, signop sgn)
327 {
328   gcc_checking_assert (precision != 0);
329   if (sgn == UNSIGNED)
330     return uhwi (0, precision);
331   else
332     /* The signed min is all zeros except the top bit.  This must be
333        explicitly represented.  */
334     return wi::set_bit_in_zero (precision - 1, precision);
335 }
336 
337 /*
338  * Public utilities.
339  */
340 
341 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
342    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
343    in VAL and return the number of blocks used.
344 
345    This function can handle both extension (PRECISION > XPRECISION)
346    and truncation (PRECISION < XPRECISION).  */
347 unsigned int
348 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
349 		   unsigned int xlen, unsigned int xprecision,
350 		   unsigned int precision, signop sgn)
351 {
352   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
353   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
354   for (unsigned i = 0; i < len; i++)
355     val[i] = xval[i];
356 
357   if (precision > xprecision)
358     {
359       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
360 
361       /* Expanding.  */
362       if (sgn == UNSIGNED)
363 	{
364 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
365 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
366 	  else if (val[len - 1] < 0)
367 	    {
368 	      while (len < BLOCKS_NEEDED (xprecision))
369 		val[len++] = -1;
370 	      if (small_xprecision)
371 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
372 	      else
373 		val[len++] = 0;
374 	    }
375 	}
376       else
377 	{
378 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
379 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
380 	}
381     }
382   len = canonize (val, len, precision);
383 
384   return len;
385 }
386 
387 /* This function hides the fact that we cannot rely on the bits beyond
388    the precision.  This issue comes up in the relational comparisions
389    where we do allow comparisons of values of different precisions.  */
390 static inline HOST_WIDE_INT
391 selt (const HOST_WIDE_INT *a, unsigned int len,
392       unsigned int blocks_needed, unsigned int small_prec,
393       unsigned int index, signop sgn)
394 {
395   HOST_WIDE_INT val;
396   if (index < len)
397     val = a[index];
398   else if (index < blocks_needed || sgn == SIGNED)
399     /* Signed or within the precision.  */
400     val = SIGN_MASK (a[len - 1]);
401   else
402     /* Unsigned extension beyond the precision. */
403     val = 0;
404 
405   if (small_prec && index == blocks_needed - 1)
406     return (sgn == SIGNED
407 	    ? sext_hwi (val, small_prec)
408 	    : zext_hwi (val, small_prec));
409   else
410     return val;
411 }
412 
413 /* Find the highest bit represented in a wide int.  This will in
414    general have the same value as the sign bit.  */
415 static inline HOST_WIDE_INT
416 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
417 {
418   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
419   unsigned HOST_WIDE_INT val = a[len - 1];
420   if (excess > 0)
421     val <<= excess;
422   return val >> (HOST_BITS_PER_WIDE_INT - 1);
423 }
424 
425 /*
426  * Comparisons, note that only equality is an operator.  The other
427  * comparisons cannot be operators since they are inherently signed or
428  * unsigned and C++ has no such operators.
429  */
430 
431 /* Return true if OP0 == OP1.  */
432 bool
433 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
434 		const HOST_WIDE_INT *op1, unsigned int op1len,
435 		unsigned int prec)
436 {
437   int l0 = op0len - 1;
438   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
439 
440   if (op0len != op1len)
441     return false;
442 
443   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
444     {
445       /* It does not matter if we zext or sext here, we just have to
446 	 do both the same way.  */
447       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
448 	return false;
449       l0--;
450     }
451 
452   while (l0 >= 0)
453     if (op0[l0] != op1[l0])
454       return false;
455     else
456       l0--;
457 
458   return true;
459 }
460 
461 /* Return true if OP0 < OP1 using signed comparisons.  */
462 bool
463 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
464 		 unsigned int precision,
465 		 const HOST_WIDE_INT *op1, unsigned int op1len)
466 {
467   HOST_WIDE_INT s0, s1;
468   unsigned HOST_WIDE_INT u0, u1;
469   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
470   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
471   int l = MAX (op0len - 1, op1len - 1);
472 
473   /* Only the top block is compared as signed.  The rest are unsigned
474      comparisons.  */
475   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
476   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
477   if (s0 < s1)
478     return true;
479   if (s0 > s1)
480     return false;
481 
482   l--;
483   while (l >= 0)
484     {
485       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
486       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
487 
488       if (u0 < u1)
489 	return true;
490       if (u0 > u1)
491 	return false;
492       l--;
493     }
494 
495   return false;
496 }
497 
498 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
499    signed compares.  */
500 int
501 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
502 		unsigned int precision,
503 		const HOST_WIDE_INT *op1, unsigned int op1len)
504 {
505   HOST_WIDE_INT s0, s1;
506   unsigned HOST_WIDE_INT u0, u1;
507   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
508   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
509   int l = MAX (op0len - 1, op1len - 1);
510 
511   /* Only the top block is compared as signed.  The rest are unsigned
512      comparisons.  */
513   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
514   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
515   if (s0 < s1)
516     return -1;
517   if (s0 > s1)
518     return 1;
519 
520   l--;
521   while (l >= 0)
522     {
523       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
524       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
525 
526       if (u0 < u1)
527 	return -1;
528       if (u0 > u1)
529 	return 1;
530       l--;
531     }
532 
533   return 0;
534 }
535 
536 /* Return true if OP0 < OP1 using unsigned comparisons.  */
537 bool
538 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
539 		 unsigned int precision,
540 		 const HOST_WIDE_INT *op1, unsigned int op1len)
541 {
542   unsigned HOST_WIDE_INT x0;
543   unsigned HOST_WIDE_INT x1;
544   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
545   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
546   int l = MAX (op0len - 1, op1len - 1);
547 
548   while (l >= 0)
549     {
550       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
551       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
552       if (x0 < x1)
553 	return true;
554       if (x0 > x1)
555 	return false;
556       l--;
557     }
558 
559   return false;
560 }
561 
562 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
563    unsigned compares.  */
564 int
565 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
566 		unsigned int precision,
567 		const HOST_WIDE_INT *op1, unsigned int op1len)
568 {
569   unsigned HOST_WIDE_INT x0;
570   unsigned HOST_WIDE_INT x1;
571   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
572   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
573   int l = MAX (op0len - 1, op1len - 1);
574 
575   while (l >= 0)
576     {
577       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
578       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
579       if (x0 < x1)
580 	return -1;
581       if (x0 > x1)
582 	return 1;
583       l--;
584     }
585 
586   return 0;
587 }
588 
589 /*
590  * Extension.
591  */
592 
593 /* Sign-extend the number represented by XVAL and XLEN into VAL,
594    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
595    and VAL have PRECISION bits.  */
596 unsigned int
597 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
598 		unsigned int xlen, unsigned int precision, unsigned int offset)
599 {
600   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
601   /* Extending beyond the precision is a no-op.  If we have only stored
602      OFFSET bits or fewer, the rest are already signs.  */
603   if (offset >= precision || len >= xlen)
604     {
605       for (unsigned i = 0; i < xlen; ++i)
606 	val[i] = xval[i];
607       return xlen;
608     }
609   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
610   for (unsigned int i = 0; i < len; i++)
611     val[i] = xval[i];
612   if (suboffset > 0)
613     {
614       val[len] = sext_hwi (xval[len], suboffset);
615       len += 1;
616     }
617   return canonize (val, len, precision);
618 }
619 
620 /* Zero-extend the number represented by XVAL and XLEN into VAL,
621    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
622    and VAL have PRECISION bits.  */
623 unsigned int
624 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
625 		unsigned int xlen, unsigned int precision, unsigned int offset)
626 {
627   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
628   /* Extending beyond the precision is a no-op.  If we have only stored
629      OFFSET bits or fewer, and the upper stored bit is zero, then there
630      is nothing to do.  */
631   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
632     {
633       for (unsigned i = 0; i < xlen; ++i)
634 	val[i] = xval[i];
635       return xlen;
636     }
637   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
638   for (unsigned int i = 0; i < len; i++)
639     val[i] = i < xlen ? xval[i] : -1;
640   if (suboffset > 0)
641     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
642   else
643     val[len] = 0;
644   return canonize (val, len + 1, precision);
645 }
646 
647 /*
648  * Masking, inserting, shifting, rotating.
649  */
650 
651 /* Insert WIDTH bits from Y into X starting at START.  */
652 wide_int
653 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
654 	    unsigned int width)
655 {
656   wide_int result;
657   wide_int mask;
658   wide_int tmp;
659 
660   unsigned int precision = x.get_precision ();
661   if (start >= precision)
662     return x;
663 
664   gcc_checking_assert (precision >= width);
665 
666   if (start + width >= precision)
667     width = precision - start;
668 
669   mask = wi::shifted_mask (start, width, false, precision);
670   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
671   result = tmp & mask;
672 
673   tmp = wi::bit_and_not (x, mask);
674   result = result | tmp;
675 
676   return result;
677 }
678 
679 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
680    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
681    bits.  */
682 unsigned int
683 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
684 		   unsigned int xlen, unsigned int precision, unsigned int bit)
685 {
686   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
687   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
688 
689   if (block + 1 >= xlen)
690     {
691       /* The operation either affects the last current block or needs
692 	 a new block.  */
693       unsigned int len = block + 1;
694       for (unsigned int i = 0; i < len; i++)
695 	val[i] = safe_uhwi (xval, xlen, i);
696       val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
697 
698       /* If the bit we just set is at the msb of the block, make sure
699 	 that any higher bits are zeros.  */
700       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
701 	val[len++] = 0;
702       return len;
703     }
704   else
705     {
706       for (unsigned int i = 0; i < xlen; i++)
707 	val[i] = xval[i];
708       val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
709       return canonize (val, xlen, precision);
710     }
711 }
712 
713 /* bswap THIS.  */
714 wide_int
715 wide_int_storage::bswap () const
716 {
717   wide_int result = wide_int::create (precision);
718   unsigned int i, s;
719   unsigned int len = BLOCKS_NEEDED (precision);
720   unsigned int xlen = get_len ();
721   const HOST_WIDE_INT *xval = get_val ();
722   HOST_WIDE_INT *val = result.write_val ();
723 
724   /* This is not a well defined operation if the precision is not a
725      multiple of 8.  */
726   gcc_assert ((precision & 0x7) == 0);
727 
728   for (i = 0; i < len; i++)
729     val[i] = 0;
730 
731   /* Only swap the bytes that are not the padding.  */
732   for (s = 0; s < precision; s += 8)
733     {
734       unsigned int d = precision - s - 8;
735       unsigned HOST_WIDE_INT byte;
736 
737       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
738       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
739 
740       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
741 
742       block = d / HOST_BITS_PER_WIDE_INT;
743       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
744 
745       val[block] |= byte << offset;
746     }
747 
748   result.set_len (canonize (val, len, precision));
749   return result;
750 }
751 
752 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
753    above that up to PREC are zeros.  The result is inverted if NEGATE
754    is true.  Return the number of blocks in VAL.  */
755 unsigned int
756 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
757 	  unsigned int prec)
758 {
759   if (width >= prec)
760     {
761       val[0] = negate ? 0 : -1;
762       return 1;
763     }
764   else if (width == 0)
765     {
766       val[0] = negate ? -1 : 0;
767       return 1;
768     }
769 
770   unsigned int i = 0;
771   while (i < width / HOST_BITS_PER_WIDE_INT)
772     val[i++] = negate ? 0 : -1;
773 
774   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
775   if (shift != 0)
776     {
777       HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
778       val[i++] = negate ? ~last : last;
779     }
780   else
781     val[i++] = negate ? -1 : 0;
782 
783   return i;
784 }
785 
786 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
787    bits are ones, and the bits above that up to PREC are zeros.  The result
788    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
789 unsigned int
790 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
791 		  bool negate, unsigned int prec)
792 {
793   if (start >= prec || width == 0)
794     {
795       val[0] = negate ? -1 : 0;
796       return 1;
797     }
798 
799   if (width > prec - start)
800     width = prec - start;
801   unsigned int end = start + width;
802 
803   unsigned int i = 0;
804   while (i < start / HOST_BITS_PER_WIDE_INT)
805     val[i++] = negate ? -1 : 0;
806 
807   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
808   if (shift)
809     {
810       HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
811       shift += width;
812       if (shift < HOST_BITS_PER_WIDE_INT)
813 	{
814 	  /* case 000111000 */
815 	  block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
816 	  val[i++] = negate ? ~block : block;
817 	  return i;
818 	}
819       else
820 	/* ...111000 */
821 	val[i++] = negate ? block : ~block;
822     }
823 
824   while (i < end / HOST_BITS_PER_WIDE_INT)
825     /* 1111111 */
826     val[i++] = negate ? 0 : -1;
827 
828   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
829   if (shift != 0)
830     {
831       /* 000011111 */
832       HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
833       val[i++] = negate ? ~block : block;
834     }
835   else if (end < prec)
836     val[i++] = negate ? -1 : 0;
837 
838   return i;
839 }
840 
841 /*
842  * logical operations.
843  */
844 
845 /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
846 unsigned int
847 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
848 	       unsigned int op0len, const HOST_WIDE_INT *op1,
849 	       unsigned int op1len, unsigned int prec)
850 {
851   int l0 = op0len - 1;
852   int l1 = op1len - 1;
853   bool need_canon = true;
854 
855   unsigned int len = MAX (op0len, op1len);
856   if (l0 > l1)
857     {
858       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
859       if (op1mask == 0)
860 	{
861 	  l0 = l1;
862 	  len = l1 + 1;
863 	}
864       else
865 	{
866 	  need_canon = false;
867 	  while (l0 > l1)
868 	    {
869 	      val[l0] = op0[l0];
870 	      l0--;
871 	    }
872 	}
873     }
874   else if (l1 > l0)
875     {
876       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
877       if (op0mask == 0)
878 	len = l0 + 1;
879       else
880 	{
881 	  need_canon = false;
882 	  while (l1 > l0)
883 	    {
884 	      val[l1] = op1[l1];
885 	      l1--;
886 	    }
887 	}
888     }
889 
890   while (l0 >= 0)
891     {
892       val[l0] = op0[l0] & op1[l0];
893       l0--;
894     }
895 
896   if (need_canon)
897     len = canonize (val, len, prec);
898 
899   return len;
900 }
901 
902 /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
903 unsigned int
904 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
905 		   unsigned int op0len, const HOST_WIDE_INT *op1,
906 		   unsigned int op1len, unsigned int prec)
907 {
908   wide_int result;
909   int l0 = op0len - 1;
910   int l1 = op1len - 1;
911   bool need_canon = true;
912 
913   unsigned int len = MAX (op0len, op1len);
914   if (l0 > l1)
915     {
916       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
917       if (op1mask != 0)
918 	{
919 	  l0 = l1;
920 	  len = l1 + 1;
921 	}
922       else
923 	{
924 	  need_canon = false;
925 	  while (l0 > l1)
926 	    {
927 	      val[l0] = op0[l0];
928 	      l0--;
929 	    }
930 	}
931     }
932   else if (l1 > l0)
933     {
934       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
935       if (op0mask == 0)
936 	len = l0 + 1;
937       else
938 	{
939 	  need_canon = false;
940 	  while (l1 > l0)
941 	    {
942 	      val[l1] = ~op1[l1];
943 	      l1--;
944 	    }
945 	}
946     }
947 
948   while (l0 >= 0)
949     {
950       val[l0] = op0[l0] & ~op1[l0];
951       l0--;
952     }
953 
954   if (need_canon)
955     len = canonize (val, len, prec);
956 
957   return len;
958 }
959 
960 /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
961 unsigned int
962 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
963 	      unsigned int op0len, const HOST_WIDE_INT *op1,
964 	      unsigned int op1len, unsigned int prec)
965 {
966   wide_int result;
967   int l0 = op0len - 1;
968   int l1 = op1len - 1;
969   bool need_canon = true;
970 
971   unsigned int len = MAX (op0len, op1len);
972   if (l0 > l1)
973     {
974       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
975       if (op1mask != 0)
976 	{
977 	  l0 = l1;
978 	  len = l1 + 1;
979 	}
980       else
981 	{
982 	  need_canon = false;
983 	  while (l0 > l1)
984 	    {
985 	      val[l0] = op0[l0];
986 	      l0--;
987 	    }
988 	}
989     }
990   else if (l1 > l0)
991     {
992       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
993       if (op0mask != 0)
994 	len = l0 + 1;
995       else
996 	{
997 	  need_canon = false;
998 	  while (l1 > l0)
999 	    {
1000 	      val[l1] = op1[l1];
1001 	      l1--;
1002 	    }
1003 	}
1004     }
1005 
1006   while (l0 >= 0)
1007     {
1008       val[l0] = op0[l0] | op1[l0];
1009       l0--;
1010     }
1011 
1012   if (need_canon)
1013     len = canonize (val, len, prec);
1014 
1015   return len;
1016 }
1017 
1018 /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
1019 unsigned int
1020 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1021 		  unsigned int op0len, const HOST_WIDE_INT *op1,
1022 		  unsigned int op1len, unsigned int prec)
1023 {
1024   wide_int result;
1025   int l0 = op0len - 1;
1026   int l1 = op1len - 1;
1027   bool need_canon = true;
1028 
1029   unsigned int len = MAX (op0len, op1len);
1030   if (l0 > l1)
1031     {
1032       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1033       if (op1mask == 0)
1034 	{
1035 	  l0 = l1;
1036 	  len = l1 + 1;
1037 	}
1038       else
1039 	{
1040 	  need_canon = false;
1041 	  while (l0 > l1)
1042 	    {
1043 	      val[l0] = op0[l0];
1044 	      l0--;
1045 	    }
1046 	}
1047     }
1048   else if (l1 > l0)
1049     {
1050       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1051       if (op0mask != 0)
1052 	len = l0 + 1;
1053       else
1054 	{
1055 	  need_canon = false;
1056 	  while (l1 > l0)
1057 	    {
1058 	      val[l1] = ~op1[l1];
1059 	      l1--;
1060 	    }
1061 	}
1062     }
1063 
1064   while (l0 >= 0)
1065     {
1066       val[l0] = op0[l0] | ~op1[l0];
1067       l0--;
1068     }
1069 
1070   if (need_canon)
1071     len = canonize (val, len, prec);
1072 
1073   return len;
1074 }
1075 
1076 /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
1077 unsigned int
1078 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1079 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1080 	       unsigned int op1len, unsigned int prec)
1081 {
1082   wide_int result;
1083   int l0 = op0len - 1;
1084   int l1 = op1len - 1;
1085 
1086   unsigned int len = MAX (op0len, op1len);
1087   if (l0 > l1)
1088     {
1089       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1090       while (l0 > l1)
1091 	{
1092 	  val[l0] = op0[l0] ^ op1mask;
1093 	  l0--;
1094 	}
1095     }
1096 
1097   if (l1 > l0)
1098     {
1099       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1100       while (l1 > l0)
1101 	{
1102 	  val[l1] = op0mask ^ op1[l1];
1103 	  l1--;
1104 	}
1105     }
1106 
1107   while (l0 >= 0)
1108     {
1109       val[l0] = op0[l0] ^ op1[l0];
1110       l0--;
1111     }
1112 
1113   return canonize (val, len, prec);
1114 }
1115 
1116 /*
1117  * math
1118  */
1119 
1120 /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1121    whether the result overflows when OP0 and OP1 are treated as having
1122    signedness SGN.  Return the number of blocks in VAL.  */
1123 unsigned int
1124 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1125 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1126 	       unsigned int op1len, unsigned int prec,
1127 	       signop sgn, bool *overflow)
1128 {
1129   unsigned HOST_WIDE_INT o0 = 0;
1130   unsigned HOST_WIDE_INT o1 = 0;
1131   unsigned HOST_WIDE_INT x = 0;
1132   unsigned HOST_WIDE_INT carry = 0;
1133   unsigned HOST_WIDE_INT old_carry = 0;
1134   unsigned HOST_WIDE_INT mask0, mask1;
1135   unsigned int i;
1136 
1137   unsigned int len = MAX (op0len, op1len);
1138   mask0 = -top_bit_of (op0, op0len, prec);
1139   mask1 = -top_bit_of (op1, op1len, prec);
1140   /* Add all of the explicitly defined elements.  */
1141 
1142   for (i = 0; i < len; i++)
1143     {
1144       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1145       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1146       x = o0 + o1 + carry;
1147       val[i] = x;
1148       old_carry = carry;
1149       carry = carry == 0 ? x < o0 : x <= o0;
1150     }
1151 
1152   if (len * HOST_BITS_PER_WIDE_INT < prec)
1153     {
1154       val[len] = mask0 + mask1 + carry;
1155       len++;
1156       if (overflow)
1157 	*overflow = false;
1158     }
1159   else if (overflow)
1160     {
1161       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1162       if (sgn == SIGNED)
1163 	{
1164 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1165 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1166 	}
1167       else
1168 	{
1169 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1170 	  x <<= shift;
1171 	  o0 <<= shift;
1172 	  if (old_carry)
1173 	    *overflow = (x <= o0);
1174 	  else
1175 	    *overflow = (x < o0);
1176 	}
1177     }
1178 
1179   return canonize (val, len, prec);
1180 }
1181 
1182 /* Subroutines of the multiplication and division operations.  Unpack
1183    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1184    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
1185    uncompressing the top bit of INPUT[IN_LEN - 1].  */
1186 static void
1187 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1188 	   unsigned int in_len, unsigned int out_len,
1189 	   unsigned int prec, signop sgn)
1190 {
1191   unsigned int i;
1192   unsigned int j = 0;
1193   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1194   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1195   HOST_WIDE_INT mask;
1196 
1197   if (sgn == SIGNED)
1198     {
1199       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1200       mask &= HALF_INT_MASK;
1201     }
1202   else
1203     mask = 0;
1204 
1205   for (i = 0; i < blocks_needed - 1; i++)
1206     {
1207       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1208       result[j++] = x;
1209       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1210     }
1211 
1212   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1213   if (small_prec)
1214     {
1215       if (sgn == SIGNED)
1216 	x = sext_hwi (x, small_prec);
1217       else
1218 	x = zext_hwi (x, small_prec);
1219     }
1220   result[j++] = x;
1221   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1222 
1223   /* Smear the sign bit.  */
1224   while (j < out_len)
1225     result[j++] = mask;
1226 }
1227 
1228 /* The inverse of wi_unpack.  IN_LEN is the number of input
1229    blocks and PRECISION is the precision of the result.  Return the
1230    number of blocks in the canonicalized result.  */
1231 static unsigned int
1232 wi_pack (HOST_WIDE_INT *result,
1233 	 const unsigned HOST_HALF_WIDE_INT *input,
1234 	 unsigned int in_len, unsigned int precision)
1235 {
1236   unsigned int i = 0;
1237   unsigned int j = 0;
1238   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1239 
1240   while (i + 1 < in_len)
1241     {
1242       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1243 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
1244 			<< HOST_BITS_PER_HALF_WIDE_INT));
1245       i += 2;
1246     }
1247 
1248   /* Handle the case where in_len is odd.   For this we zero extend.  */
1249   if (in_len & 1)
1250     result[j++] = (unsigned HOST_WIDE_INT) input[i];
1251   else if (j < blocks_needed)
1252     result[j++] = 0;
1253   return canonize (result, j, precision);
1254 }
1255 
1256 /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
1257    result is returned.
1258 
1259    If HIGH is not set, throw away the upper half after the check is
1260    made to see if it overflows.  Unfortunately there is no better way
1261    to check for overflow than to do this.  If OVERFLOW is nonnull,
1262    record in *OVERFLOW whether the result overflowed.  SGN controls
1263    the signedness and is used to check overflow or if HIGH is set.  */
1264 unsigned int
1265 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1266 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
1267 		  unsigned int op2len, unsigned int prec, signop sgn,
1268 		  bool *overflow, bool high)
1269 {
1270   unsigned HOST_WIDE_INT o0, o1, k, t;
1271   unsigned int i;
1272   unsigned int j;
1273   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1274   unsigned int half_blocks_needed = blocks_needed * 2;
1275   /* The sizes here are scaled to support a 2x largest mode by 2x
1276      largest mode yielding a 4x largest mode result.  This is what is
1277      needed by vpn.  */
1278 
1279   unsigned HOST_HALF_WIDE_INT
1280     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1281   unsigned HOST_HALF_WIDE_INT
1282     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1283   /* The '2' in 'R' is because we are internally doing a full
1284      multiply.  */
1285   unsigned HOST_HALF_WIDE_INT
1286     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1287   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1288 
1289   /* If the top level routine did not really pass in an overflow, then
1290      just make sure that we never attempt to set it.  */
1291   bool needs_overflow = (overflow != 0);
1292   if (needs_overflow)
1293     *overflow = false;
1294 
1295   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1296   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1297 
1298   /* This is a surprisingly common case, so do it first.  */
1299   if (op1 == 0 || op2 == 0)
1300     {
1301       val[0] = 0;
1302       return 1;
1303     }
1304 
1305 #ifdef umul_ppmm
1306   if (sgn == UNSIGNED)
1307     {
1308       /* If the inputs are single HWIs and the output has room for at
1309 	 least two HWIs, we can use umul_ppmm directly.  */
1310       if (prec >= HOST_BITS_PER_WIDE_INT * 2
1311 	  && wi::fits_uhwi_p (op1)
1312 	  && wi::fits_uhwi_p (op2))
1313 	{
1314 	  /* This case never overflows.  */
1315 	  if (high)
1316 	    {
1317 	      val[0] = 0;
1318 	      return 1;
1319 	    }
1320 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1321 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1322 	    {
1323 	      val[2] = 0;
1324 	      return 3;
1325 	    }
1326 	  return 1 + (val[1] != 0 || val[0] < 0);
1327 	}
1328       /* Likewise if the output is a full single HWI, except that the
1329 	 upper HWI of the result is only used for determining overflow.
1330 	 (We handle this case inline when overflow isn't needed.)  */
1331       else if (prec == HOST_BITS_PER_WIDE_INT)
1332 	{
1333 	  unsigned HOST_WIDE_INT upper;
1334 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1335 	  if (needs_overflow)
1336 	    *overflow = (upper != 0);
1337 	  if (high)
1338 	    val[0] = upper;
1339 	  return 1;
1340 	}
1341     }
1342 #endif
1343 
1344   /* Handle multiplications by 1.  */
1345   if (op1 == 1)
1346     {
1347       if (high)
1348 	{
1349 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1350 	  return 1;
1351 	}
1352       for (i = 0; i < op2len; i++)
1353 	val[i] = op2val[i];
1354       return op2len;
1355     }
1356   if (op2 == 1)
1357     {
1358       if (high)
1359 	{
1360 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1361 	  return 1;
1362 	}
1363       for (i = 0; i < op1len; i++)
1364 	val[i] = op1val[i];
1365       return op1len;
1366     }
1367 
1368   /* If we need to check for overflow, we can only do half wide
1369      multiplies quickly because we need to look at the top bits to
1370      check for the overflow.  */
1371   if ((high || needs_overflow)
1372       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1373     {
1374       unsigned HOST_WIDE_INT r;
1375 
1376       if (sgn == SIGNED)
1377 	{
1378 	  o0 = op1.to_shwi ();
1379 	  o1 = op2.to_shwi ();
1380 	}
1381       else
1382 	{
1383 	  o0 = op1.to_uhwi ();
1384 	  o1 = op2.to_uhwi ();
1385 	}
1386 
1387       r = o0 * o1;
1388       if (needs_overflow)
1389 	{
1390 	  if (sgn == SIGNED)
1391 	    {
1392 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1393 		*overflow = true;
1394 	    }
1395 	  else
1396 	    {
1397 	      if ((r >> prec) != 0)
1398 		*overflow = true;
1399 	    }
1400 	}
1401       val[0] = high ? r >> prec : r;
1402       return 1;
1403     }
1404 
1405   /* We do unsigned mul and then correct it.  */
1406   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1407   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1408 
1409   /* The 2 is for a full mult.  */
1410   memset (r, 0, half_blocks_needed * 2
1411 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1412 
1413   for (j = 0; j < half_blocks_needed; j++)
1414     {
1415       k = 0;
1416       for (i = 0; i < half_blocks_needed; i++)
1417 	{
1418 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1419 	       + r[i + j] + k);
1420 	  r[i + j] = t & HALF_INT_MASK;
1421 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1422 	}
1423       r[j + half_blocks_needed] = k;
1424     }
1425 
1426   /* We did unsigned math above.  For signed we must adjust the
1427      product (assuming we need to see that).  */
1428   if (sgn == SIGNED && (high || needs_overflow))
1429     {
1430       unsigned HOST_WIDE_INT b;
1431       if (wi::neg_p (op1))
1432 	{
1433 	  b = 0;
1434 	  for (i = 0; i < half_blocks_needed; i++)
1435 	    {
1436 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1437 		- (unsigned HOST_WIDE_INT)v[i] - b;
1438 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1439 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1440 	    }
1441 	}
1442       if (wi::neg_p (op2))
1443 	{
1444 	  b = 0;
1445 	  for (i = 0; i < half_blocks_needed; i++)
1446 	    {
1447 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1448 		- (unsigned HOST_WIDE_INT)u[i] - b;
1449 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1450 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1451 	    }
1452 	}
1453     }
1454 
1455   if (needs_overflow)
1456     {
1457       HOST_WIDE_INT top;
1458 
1459       /* For unsigned, overflow is true if any of the top bits are set.
1460 	 For signed, overflow is true if any of the top bits are not equal
1461 	 to the sign bit.  */
1462       if (sgn == UNSIGNED)
1463 	top = 0;
1464       else
1465 	{
1466 	  top = r[(half_blocks_needed) - 1];
1467 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1468 	  top &= mask;
1469 	}
1470 
1471       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1472 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1473 	  *overflow = true;
1474     }
1475 
1476   int r_offset = high ? half_blocks_needed : 0;
1477   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1478 }
1479 
1480 /* Compute the population count of X.  */
1481 int
1482 wi::popcount (const wide_int_ref &x)
1483 {
1484   unsigned int i;
1485   int count;
1486 
1487   /* The high order block is special if it is the last block and the
1488      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
1489      have to clear out any ones above the precision before doing
1490      popcount on this block.  */
1491   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1492   unsigned int stop = x.len;
1493   if (count < 0)
1494     {
1495       count = popcount_hwi (x.uhigh () << -count);
1496       stop -= 1;
1497     }
1498   else
1499     {
1500       if (x.sign_mask () >= 0)
1501 	count = 0;
1502     }
1503 
1504   for (i = 0; i < stop; ++i)
1505     count += popcount_hwi (x.val[i]);
1506 
1507   return count;
1508 }
1509 
1510 /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1511    whether the result overflows when OP0 and OP1 are treated as having
1512    signedness SGN.  Return the number of blocks in VAL.  */
1513 unsigned int
1514 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1515 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1516 	       unsigned int op1len, unsigned int prec,
1517 	       signop sgn, bool *overflow)
1518 {
1519   unsigned HOST_WIDE_INT o0 = 0;
1520   unsigned HOST_WIDE_INT o1 = 0;
1521   unsigned HOST_WIDE_INT x = 0;
1522   /* We implement subtraction as an in place negate and add.  Negation
1523      is just inversion and add 1, so we can do the add of 1 by just
1524      starting the borrow in of the first element at 1.  */
1525   unsigned HOST_WIDE_INT borrow = 0;
1526   unsigned HOST_WIDE_INT old_borrow = 0;
1527 
1528   unsigned HOST_WIDE_INT mask0, mask1;
1529   unsigned int i;
1530 
1531   unsigned int len = MAX (op0len, op1len);
1532   mask0 = -top_bit_of (op0, op0len, prec);
1533   mask1 = -top_bit_of (op1, op1len, prec);
1534 
1535   /* Subtract all of the explicitly defined elements.  */
1536   for (i = 0; i < len; i++)
1537     {
1538       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1539       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1540       x = o0 - o1 - borrow;
1541       val[i] = x;
1542       old_borrow = borrow;
1543       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1544     }
1545 
1546   if (len * HOST_BITS_PER_WIDE_INT < prec)
1547     {
1548       val[len] = mask0 - mask1 - borrow;
1549       len++;
1550       if (overflow)
1551 	*overflow = false;
1552     }
1553   else if (overflow)
1554     {
1555       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1556       if (sgn == SIGNED)
1557 	{
1558 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1559 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1560 	}
1561       else
1562 	{
1563 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1564 	  x <<= shift;
1565 	  o0 <<= shift;
1566 	  if (old_borrow)
1567 	    *overflow = (x >= o0);
1568 	  else
1569 	    *overflow = (x > o0);
1570 	}
1571     }
1572 
1573   return canonize (val, len, prec);
1574 }
1575 
1576 
1577 /*
1578  * Division and Mod
1579  */
1580 
1581 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
1582    algorithm is a small modification of the algorithm in Hacker's
1583    Delight by Warren, which itself is a small modification of Knuth's
1584    algorithm.  M is the number of significant elements of U however
1585    there needs to be at least one extra element of B_DIVIDEND
1586    allocated, N is the number of elements of B_DIVISOR.  */
1587 static void
1588 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1589 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
1590 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
1591 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
1592 		   int m, int n)
1593 {
1594   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1595      HOST_WIDE_INT and stored in the lower bits of each word.  This
1596      algorithm should work properly on both 32 and 64 bit
1597      machines.  */
1598   unsigned HOST_WIDE_INT b
1599     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1600   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
1601   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
1602   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
1603   HOST_WIDE_INT t, k;
1604   int i, j, s;
1605 
1606   /* Single digit divisor.  */
1607   if (n == 1)
1608     {
1609       k = 0;
1610       for (j = m - 1; j >= 0; j--)
1611 	{
1612 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1613 	  k = ((k * b + b_dividend[j])
1614 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
1615 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
1616 	}
1617       b_remainder[0] = k;
1618       return;
1619     }
1620 
1621   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1622 
1623   if (s)
1624     {
1625       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
1626 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
1627 	 that.  */
1628       for (i = n - 1; i > 0; i--)
1629 	b_divisor[i] = (b_divisor[i] << s)
1630 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1631       b_divisor[0] = b_divisor[0] << s;
1632 
1633       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1634       for (i = m - 1; i > 0; i--)
1635 	b_dividend[i] = (b_dividend[i] << s)
1636 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1637       b_dividend[0] = b_dividend[0] << s;
1638     }
1639 
1640   /* Main loop.  */
1641   for (j = m - n; j >= 0; j--)
1642     {
1643       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1644       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1645     again:
1646       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1647 	{
1648 	  qhat -= 1;
1649 	  rhat += b_divisor[n-1];
1650 	  if (rhat < b)
1651 	    goto again;
1652 	}
1653 
1654       /* Multiply and subtract.  */
1655       k = 0;
1656       for (i = 0; i < n; i++)
1657 	{
1658 	  p = qhat * b_divisor[i];
1659 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1660 	  b_dividend[i + j] = t;
1661 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1662 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1663 	}
1664       t = b_dividend[j+n] - k;
1665       b_dividend[j+n] = t;
1666 
1667       b_quotient[j] = qhat;
1668       if (t < 0)
1669 	{
1670 	  b_quotient[j] -= 1;
1671 	  k = 0;
1672 	  for (i = 0; i < n; i++)
1673 	    {
1674 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1675 	      b_dividend[i+j] = t;
1676 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1677 	    }
1678 	  b_dividend[j+n] += k;
1679 	}
1680     }
1681   if (s)
1682     for (i = 0; i < n; i++)
1683       b_remainder[i] = (b_dividend[i] >> s)
1684 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1685   else
1686     for (i = 0; i < n; i++)
1687       b_remainder[i] = b_dividend[i];
1688 }
1689 
1690 
1691 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1692    the result.  If QUOTIENT is nonnull, store the value of the quotient
1693    there and return the number of blocks in it.  The return value is
1694    not defined otherwise.  If REMAINDER is nonnull, store the value
1695    of the remainder there and store the number of blocks in
1696    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
1697    the division overflowed.  */
1698 unsigned int
1699 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1700 		     HOST_WIDE_INT *remainder,
1701 		     const HOST_WIDE_INT *dividend_val,
1702 		     unsigned int dividend_len, unsigned int dividend_prec,
1703 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1704 		     unsigned int divisor_prec, signop sgn,
1705 		     bool *oflow)
1706 {
1707   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1708   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1709   unsigned HOST_HALF_WIDE_INT
1710     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1711   unsigned HOST_HALF_WIDE_INT
1712     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1713   unsigned HOST_HALF_WIDE_INT
1714     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1715   unsigned HOST_HALF_WIDE_INT
1716     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1717   unsigned int m, n;
1718   bool dividend_neg = false;
1719   bool divisor_neg = false;
1720   bool overflow = false;
1721   wide_int neg_dividend, neg_divisor;
1722 
1723   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1724 					   dividend_prec);
1725   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1726 					  divisor_prec);
1727   if (divisor == 0)
1728     overflow = true;
1729 
1730   /* The smallest signed number / -1 causes overflow.  The dividend_len
1731      check is for speed rather than correctness.  */
1732   if (sgn == SIGNED
1733       && dividend_len == BLOCKS_NEEDED (dividend_prec)
1734       && divisor == -1
1735       && wi::only_sign_bit_p (dividend))
1736     overflow = true;
1737 
1738   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
1739      (signed min / -1) has the same representation as the orignal dividend.
1740      We have traditionally made division by zero act as division by one,
1741      so there too we use the original dividend.  */
1742   if (overflow)
1743     {
1744       if (remainder)
1745 	{
1746 	  *remainder_len = 1;
1747 	  remainder[0] = 0;
1748 	}
1749       if (oflow != 0)
1750 	*oflow = true;
1751       if (quotient)
1752 	for (unsigned int i = 0; i < dividend_len; ++i)
1753 	  quotient[i] = dividend_val[i];
1754       return dividend_len;
1755     }
1756 
1757   if (oflow)
1758     *oflow = false;
1759 
1760   /* Do it on the host if you can.  */
1761   if (sgn == SIGNED
1762       && wi::fits_shwi_p (dividend)
1763       && wi::fits_shwi_p (divisor))
1764     {
1765       HOST_WIDE_INT o0 = dividend.to_shwi ();
1766       HOST_WIDE_INT o1 = divisor.to_shwi ();
1767 
1768       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1769 	{
1770 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1771 	  if (quotient)
1772 	    {
1773 	      quotient[0] = HOST_WIDE_INT_MIN;
1774 	      quotient[1] = 0;
1775 	    }
1776 	  if (remainder)
1777 	    {
1778 	      remainder[0] = 0;
1779 	      *remainder_len = 1;
1780 	    }
1781 	  return 2;
1782 	}
1783       else
1784 	{
1785 	  if (quotient)
1786 	    quotient[0] = o0 / o1;
1787 	  if (remainder)
1788 	    {
1789 	      remainder[0] = o0 % o1;
1790 	      *remainder_len = 1;
1791 	    }
1792 	  return 1;
1793 	}
1794     }
1795 
1796   if (sgn == UNSIGNED
1797       && wi::fits_uhwi_p (dividend)
1798       && wi::fits_uhwi_p (divisor))
1799     {
1800       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1801       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1802       unsigned int quotient_len = 1;
1803 
1804       if (quotient)
1805 	{
1806 	  quotient[0] = o0 / o1;
1807 	  if (o1 == 1
1808 	      && (HOST_WIDE_INT) o0 < 0
1809 	      && dividend_prec > HOST_BITS_PER_WIDE_INT)
1810 	    {
1811 	      quotient[1] = 0;
1812 	      quotient_len = 2;
1813 	    }
1814 	}
1815       if (remainder)
1816 	{
1817 	  remainder[0] = o0 % o1;
1818 	  if ((HOST_WIDE_INT) remainder[0] < 0
1819 	      && dividend_prec > HOST_BITS_PER_WIDE_INT)
1820 	    {
1821 	      remainder[1] = 0;
1822 	      *remainder_len = 2;
1823 	    }
1824 	  else
1825 	    *remainder_len = 1;
1826 	}
1827       return quotient_len;
1828     }
1829 
1830   /* Make the divisor and dividend positive and remember what we
1831      did.  */
1832   if (sgn == SIGNED)
1833     {
1834       if (wi::neg_p (dividend))
1835 	{
1836 	  neg_dividend = -dividend;
1837 	  dividend = neg_dividend;
1838 	  dividend_neg = true;
1839 	}
1840       if (wi::neg_p (divisor))
1841 	{
1842 	  neg_divisor = -divisor;
1843 	  divisor = neg_divisor;
1844 	  divisor_neg = true;
1845 	}
1846     }
1847 
1848   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1849 	     dividend_blocks_needed, dividend_prec, sgn);
1850   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1851 	     divisor_blocks_needed, divisor_prec, sgn);
1852 
1853   m = dividend_blocks_needed;
1854   b_dividend[m] = 0;
1855   while (m > 1 && b_dividend[m - 1] == 0)
1856     m--;
1857 
1858   n = divisor_blocks_needed;
1859   while (n > 1 && b_divisor[n - 1] == 0)
1860     n--;
1861 
1862   memset (b_quotient, 0, sizeof (b_quotient));
1863 
1864   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1865 
1866   unsigned int quotient_len = 0;
1867   if (quotient)
1868     {
1869       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1870       /* The quotient is neg if exactly one of the divisor or dividend is
1871 	 neg.  */
1872       if (dividend_neg != divisor_neg)
1873 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1874 				      quotient_len, dividend_prec,
1875 				      UNSIGNED, 0);
1876     }
1877 
1878   if (remainder)
1879     {
1880       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1881       /* The remainder is always the same sign as the dividend.  */
1882       if (dividend_neg)
1883 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1884 					*remainder_len, dividend_prec,
1885 					UNSIGNED, 0);
1886     }
1887 
1888   return quotient_len;
1889 }
1890 
1891 /*
1892  * Shifting, rotating and extraction.
1893  */
1894 
1895 /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
1896    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
1897 unsigned int
1898 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1899 		  unsigned int xlen, unsigned int precision,
1900 		  unsigned int shift)
1901 {
1902   /* Split the shift into a whole-block shift and a subblock shift.  */
1903   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1904   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1905 
1906   /* The whole-block shift fills with zeros.  */
1907   unsigned int len = BLOCKS_NEEDED (precision);
1908   for (unsigned int i = 0; i < skip; ++i)
1909     val[i] = 0;
1910 
1911   /* It's easier to handle the simple block case specially.  */
1912   if (small_shift == 0)
1913     for (unsigned int i = skip; i < len; ++i)
1914       val[i] = safe_uhwi (xval, xlen, i - skip);
1915   else
1916     {
1917       /* The first unfilled output block is a left shift of the first
1918 	 block in XVAL.  The other output blocks contain bits from two
1919 	 consecutive input blocks.  */
1920       unsigned HOST_WIDE_INT carry = 0;
1921       for (unsigned int i = skip; i < len; ++i)
1922 	{
1923 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1924 	  val[i] = (x << small_shift) | carry;
1925 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1926 	}
1927     }
1928   return canonize (val, len, precision);
1929 }
1930 
1931 /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
1932    number of blocks in VAL.  The input has XPRECISION bits and the
1933    output has XPRECISION - SHIFT bits.  */
1934 static unsigned int
1935 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1936 		     unsigned int xlen, unsigned int xprecision,
1937 		     unsigned int shift)
1938 {
1939   /* Split the shift into a whole-block shift and a subblock shift.  */
1940   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1941   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1942 
1943   /* Work out how many blocks are needed to store the significant bits
1944      (excluding the upper zeros or signs).  */
1945   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1946 
1947   /* It's easier to handle the simple block case specially.  */
1948   if (small_shift == 0)
1949     for (unsigned int i = 0; i < len; ++i)
1950       val[i] = safe_uhwi (xval, xlen, i + skip);
1951   else
1952     {
1953       /* Each output block but the last is a combination of two input blocks.
1954 	 The last block is a right shift of the last block in XVAL.  */
1955       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1956       for (unsigned int i = 0; i < len; ++i)
1957 	{
1958 	  val[i] = curr >> small_shift;
1959 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
1960 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1961 	}
1962     }
1963   return len;
1964 }
1965 
1966 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1967    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1968    VAL has PRECISION bits.  */
1969 unsigned int
1970 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1971 		   unsigned int xlen, unsigned int xprecision,
1972 		   unsigned int precision, unsigned int shift)
1973 {
1974   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1975 
1976   /* The value we just created has precision XPRECISION - SHIFT.
1977      Zero-extend it to wider precisions.  */
1978   if (precision > xprecision - shift)
1979     {
1980       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1981       if (small_prec)
1982 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
1983       else if (val[len - 1] < 0)
1984 	{
1985 	  /* Add a new block with a zero. */
1986 	  val[len++] = 0;
1987 	  return len;
1988 	}
1989     }
1990   return canonize (val, len, precision);
1991 }
1992 
1993 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1994    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1995    VAL has PRECISION bits.  */
1996 unsigned int
1997 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1998 		   unsigned int xlen, unsigned int xprecision,
1999 		   unsigned int precision, unsigned int shift)
2000 {
2001   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2002 
2003   /* The value we just created has precision XPRECISION - SHIFT.
2004      Sign-extend it to wider types.  */
2005   if (precision > xprecision - shift)
2006     {
2007       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2008       if (small_prec)
2009 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
2010     }
2011   return canonize (val, len, precision);
2012 }
2013 
2014 /* Return the number of leading (upper) zeros in X.  */
2015 int
2016 wi::clz (const wide_int_ref &x)
2017 {
2018   /* Calculate how many bits there above the highest represented block.  */
2019   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2020 
2021   unsigned HOST_WIDE_INT high = x.uhigh ();
2022   if (count < 0)
2023     /* The upper -COUNT bits of HIGH are not part of the value.
2024        Clear them out.  */
2025     high = (high << -count) >> -count;
2026   else if (x.sign_mask () < 0)
2027     /* The upper bit is set, so there are no leading zeros.  */
2028     return 0;
2029 
2030   /* We don't need to look below HIGH.  Either HIGH is nonzero,
2031      or the top bit of the block below is nonzero; clz_hwi is
2032      HOST_BITS_PER_WIDE_INT in the latter case.  */
2033   return count + clz_hwi (high);
2034 }
2035 
2036 /* Return the number of redundant sign bits in X.  (That is, the number
2037    of bits immediately below the sign bit that have the same value as
2038    the sign bit.)  */
2039 int
2040 wi::clrsb (const wide_int_ref &x)
2041 {
2042   /* Calculate how many bits there above the highest represented block.  */
2043   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2044 
2045   unsigned HOST_WIDE_INT high = x.uhigh ();
2046   unsigned HOST_WIDE_INT mask = -1;
2047   if (count < 0)
2048     {
2049       /* The upper -COUNT bits of HIGH are not part of the value.
2050 	 Clear them from both MASK and HIGH.  */
2051       mask >>= -count;
2052       high &= mask;
2053     }
2054 
2055   /* If the top bit is 1, count the number of leading 1s.  If the top
2056      bit is zero, count the number of leading zeros.  */
2057   if (high > mask / 2)
2058     high ^= mask;
2059 
2060   /* There are no sign bits below the top block, so we don't need to look
2061      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2062      HIGH is 0.  */
2063   return count + clz_hwi (high) - 1;
2064 }
2065 
2066 /* Return the number of trailing (lower) zeros in X.  */
2067 int
2068 wi::ctz (const wide_int_ref &x)
2069 {
2070   if (x.len == 1 && x.ulow () == 0)
2071     return x.precision;
2072 
2073   /* Having dealt with the zero case, there must be a block with a
2074      nonzero bit.  We don't care about the bits above the first 1.  */
2075   unsigned int i = 0;
2076   while (x.val[i] == 0)
2077     ++i;
2078   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2079 }
2080 
2081 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2082    return -1.  */
2083 int
2084 wi::exact_log2 (const wide_int_ref &x)
2085 {
2086   /* Reject cases where there are implicit -1 blocks above HIGH.  */
2087   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2088     return -1;
2089 
2090   /* Set CRUX to the index of the entry that should be nonzero.
2091      If the top block is zero then the next lowest block (if any)
2092      must have the high bit set.  */
2093   unsigned int crux = x.len - 1;
2094   if (crux > 0 && x.val[crux] == 0)
2095     crux -= 1;
2096 
2097   /* Check that all lower blocks are zero.  */
2098   for (unsigned int i = 0; i < crux; ++i)
2099     if (x.val[i] != 0)
2100       return -1;
2101 
2102   /* Get a zero-extended form of block CRUX.  */
2103   unsigned HOST_WIDE_INT hwi = x.val[crux];
2104   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2105     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2106 
2107   /* Now it's down to whether HWI is a power of 2.  */
2108   int res = ::exact_log2 (hwi);
2109   if (res >= 0)
2110     res += crux * HOST_BITS_PER_WIDE_INT;
2111   return res;
2112 }
2113 
2114 /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
2115 int
2116 wi::floor_log2 (const wide_int_ref &x)
2117 {
2118   return x.precision - 1 - clz (x);
2119 }
2120 
2121 /* Return the index of the first (lowest) set bit in X, counting from 1.
2122    Return 0 if X is 0.  */
2123 int
2124 wi::ffs (const wide_int_ref &x)
2125 {
2126   return eq_p (x, 0) ? 0 : ctz (x) + 1;
2127 }
2128 
2129 /* Return true if sign-extending X to have precision PRECISION would give
2130    the minimum signed value at that precision.  */
2131 bool
2132 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2133 {
2134   return ctz (x) + 1 == int (precision);
2135 }
2136 
2137 /* Return true if X represents the minimum signed value.  */
2138 bool
2139 wi::only_sign_bit_p (const wide_int_ref &x)
2140 {
2141   return only_sign_bit_p (x, x.precision);
2142 }
2143 
2144 /*
2145  * Private utilities.
2146  */
2147 
2148 void gt_ggc_mx (widest_int *) { }
2149 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2150 void gt_pch_nx (widest_int *) { }
2151 
2152 template void wide_int::dump () const;
2153 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2154 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2155 template void offset_int::dump () const;
2156 template void widest_int::dump () const;
2157