1 /* Operations with very long integers.
2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "selftest.h"
27
28
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
34 #else
35 #error Please add support for HOST_HALF_WIDE_INT
36 #endif
37
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 typedef unsigned HOST_WIDE_INT UWtype;
43 typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 typedef unsigned int USItype __attribute__ ((mode (SI)));
45 typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 #if W_TYPE_SIZE == 32
47 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 #else
49 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 #endif
51 #include "longlong.h"
52 #endif
53
54 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
55
56 /*
57 * Internal utilities.
58 */
59
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70 based on the top existing bit of VAL. */
71
72 static unsigned HOST_WIDE_INT
safe_uhwi(const HOST_WIDE_INT * val,unsigned int len,unsigned int i)73 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 {
75 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
76 }
77
78 /* Convert the integer in VAL to canonical form, returning its new length.
79 LEN is the number of blocks currently in VAL and PRECISION is the number
80 of bits in the integer it represents.
81
82 This function only changes the representation, not the value. */
83 static unsigned int
canonize(HOST_WIDE_INT * val,unsigned int len,unsigned int precision)84 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 {
86 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87 HOST_WIDE_INT top;
88 int i;
89
90 if (len > blocks_needed)
91 len = blocks_needed;
92
93 if (len == 1)
94 return len;
95
96 top = val[len - 1];
97 if (len * HOST_BITS_PER_WIDE_INT > precision)
98 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
99 if (top != 0 && top != (HOST_WIDE_INT)-1)
100 return len;
101
102 /* At this point we know that the top is either 0 or -1. Find the
103 first block that is not a copy of this. */
104 for (i = len - 2; i >= 0; i--)
105 {
106 HOST_WIDE_INT x = val[i];
107 if (x != top)
108 {
109 if (SIGN_MASK (x) == top)
110 return i + 1;
111
112 /* We need an extra block because the top bit block i does
113 not match the extension. */
114 return i + 2;
115 }
116 }
117
118 /* The number is 0 or -1. */
119 return 1;
120 }
121
122 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
123 another 0 block if needed, and return number of blocks needed. */
124
125 static inline unsigned int
canonize_uhwi(HOST_WIDE_INT * val,unsigned int precision)126 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127 {
128 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129 {
130 val[1] = 0;
131 return 2;
132 }
133 return 1;
134 }
135
136 /*
137 * Conversion routines in and out of wide_int.
138 */
139
140 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
141 result for an integer with precision PRECISION. Return the length
142 of VAL (after any canonization. */
143 unsigned int
from_array(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,bool need_canon)144 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145 unsigned int xlen, unsigned int precision, bool need_canon)
146 {
147 for (unsigned i = 0; i < xlen; i++)
148 val[i] = xval[i];
149 return need_canon ? canonize (val, xlen, precision) : xlen;
150 }
151
152 /* Construct a wide int from a buffer of length LEN. BUFFER will be
153 read according to byte endianness and word endianness of the target.
154 Only the lower BUFFER_LEN bytes of the result are set; the remaining
155 high bytes are cleared. */
156 wide_int
from_buffer(const unsigned char * buffer,unsigned int buffer_len)157 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158 {
159 unsigned int precision = buffer_len * BITS_PER_UNIT;
160 wide_int result = wide_int::create (precision);
161 unsigned int words = buffer_len / UNITS_PER_WORD;
162
163 /* We have to clear all the bits ourself, as we merely or in values
164 below. */
165 unsigned int len = BLOCKS_NEEDED (precision);
166 HOST_WIDE_INT *val = result.write_val ();
167 for (unsigned int i = 0; i < len; ++i)
168 val[i] = 0;
169
170 for (unsigned int byte = 0; byte < buffer_len; byte++)
171 {
172 unsigned int offset;
173 unsigned int index;
174 unsigned int bitpos = byte * BITS_PER_UNIT;
175 unsigned HOST_WIDE_INT value;
176
177 if (buffer_len > UNITS_PER_WORD)
178 {
179 unsigned int word = byte / UNITS_PER_WORD;
180
181 if (WORDS_BIG_ENDIAN)
182 word = (words - 1) - word;
183
184 offset = word * UNITS_PER_WORD;
185
186 if (BYTES_BIG_ENDIAN)
187 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188 else
189 offset += byte % UNITS_PER_WORD;
190 }
191 else
192 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193
194 value = (unsigned HOST_WIDE_INT) buffer[offset];
195
196 index = bitpos / HOST_BITS_PER_WIDE_INT;
197 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
198 }
199
200 result.set_len (canonize (val, len, precision));
201
202 return result;
203 }
204
205 /* Sets RESULT from X, the sign is taken according to SGN. */
206 void
to_mpz(const wide_int_ref & x,mpz_t result,signop sgn)207 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208 {
209 int len = x.get_len ();
210 const HOST_WIDE_INT *v = x.get_val ();
211 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212
213 if (wi::neg_p (x, sgn))
214 {
215 /* We use ones complement to avoid -x80..0 edge case that -
216 won't work on. */
217 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218 for (int i = 0; i < len; i++)
219 t[i] = ~v[i];
220 if (excess > 0)
221 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 mpz_com (result, result);
224 }
225 else if (excess > 0)
226 {
227 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
228 for (int i = 0; i < len - 1; i++)
229 t[i] = v[i];
230 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
231 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232 }
233 else if (excess < 0 && wi::neg_p (x))
234 {
235 int extra
236 = (-excess + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
237 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
238 for (int i = 0; i < len; i++)
239 t[i] = v[i];
240 for (int i = 0; i < extra; i++)
241 t[len + i] = -1;
242 excess = (-excess) % HOST_BITS_PER_WIDE_INT;
243 if (excess)
244 t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
245 mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
246 }
247 else
248 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
249 }
250
251 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
252 values of VAL will be wrapped; otherwise, they will be set to the
253 appropriate minimum or maximum TYPE bound. */
254 wide_int
from_mpz(const_tree type,mpz_t x,bool wrap)255 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
256 {
257 size_t count, numb;
258 unsigned int prec = TYPE_PRECISION (type);
259 wide_int res = wide_int::create (prec);
260
261 if (!wrap)
262 {
263 mpz_t min, max;
264
265 mpz_init (min);
266 mpz_init (max);
267 get_type_static_bounds (type, min, max);
268
269 if (mpz_cmp (x, min) < 0)
270 mpz_set (x, min);
271 else if (mpz_cmp (x, max) > 0)
272 mpz_set (x, max);
273
274 mpz_clear (min);
275 mpz_clear (max);
276 }
277
278 /* Determine the number of unsigned HOST_WIDE_INTs that are required
279 for representing the absolute value. The code to calculate count is
280 extracted from the GMP manual, section "Integer Import and Export":
281 http://gmplib.org/manual/Integer-Import-and-Export.html */
282 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
283 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
284 HOST_WIDE_INT *val = res.write_val ();
285 /* Read the absolute value.
286
287 Write directly to the wide_int storage if possible, otherwise leave
288 GMP to allocate the memory for us. It might be slightly more efficient
289 to use mpz_tdiv_r_2exp for the latter case, but the situation is
290 pathological and it seems safer to operate on the original mpz value
291 in all cases. */
292 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
293 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
294 if (count < 1)
295 {
296 val[0] = 0;
297 count = 1;
298 }
299 count = MIN (count, BLOCKS_NEEDED (prec));
300 if (valres != val)
301 {
302 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
303 free (valres);
304 }
305 /* Zero-extend the absolute value to PREC bits. */
306 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
307 val[count++] = 0;
308 else
309 count = canonize (val, count, prec);
310 res.set_len (count);
311
312 if (mpz_sgn (x) < 0)
313 res = -res;
314
315 return res;
316 }
317
318 /*
319 * Largest and smallest values in a mode.
320 */
321
322 /* Return the largest SGNed number that is representable in PRECISION bits.
323
324 TODO: There is still code from the double_int era that trys to
325 make up for the fact that double int's could not represent the
326 min and max values of all types. This code should be removed
327 because the min and max values can always be represented in
328 wide_ints and int-csts. */
329 wide_int
max_value(unsigned int precision,signop sgn)330 wi::max_value (unsigned int precision, signop sgn)
331 {
332 gcc_checking_assert (precision != 0);
333 if (sgn == UNSIGNED)
334 /* The unsigned max is just all ones. */
335 return shwi (-1, precision);
336 else
337 /* The signed max is all ones except the top bit. This must be
338 explicitly represented. */
339 return mask (precision - 1, false, precision);
340 }
341
342 /* Return the largest SGNed number that is representable in PRECISION bits. */
343 wide_int
min_value(unsigned int precision,signop sgn)344 wi::min_value (unsigned int precision, signop sgn)
345 {
346 gcc_checking_assert (precision != 0);
347 if (sgn == UNSIGNED)
348 return uhwi (0, precision);
349 else
350 /* The signed min is all zeros except the top bit. This must be
351 explicitly represented. */
352 return wi::set_bit_in_zero (precision - 1, precision);
353 }
354
355 /*
356 * Public utilities.
357 */
358
359 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
360 signedness SGN, to an integer that has PRECISION bits. Store the blocks
361 in VAL and return the number of blocks used.
362
363 This function can handle both extension (PRECISION > XPRECISION)
364 and truncation (PRECISION < XPRECISION). */
365 unsigned int
force_to_size(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,signop sgn)366 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
367 unsigned int xlen, unsigned int xprecision,
368 unsigned int precision, signop sgn)
369 {
370 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
371 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
372 for (unsigned i = 0; i < len; i++)
373 val[i] = xval[i];
374
375 if (precision > xprecision)
376 {
377 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
378
379 /* Expanding. */
380 if (sgn == UNSIGNED)
381 {
382 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
384 else if (val[len - 1] < 0)
385 {
386 while (len < BLOCKS_NEEDED (xprecision))
387 val[len++] = -1;
388 if (small_xprecision)
389 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
390 else
391 val[len++] = 0;
392 }
393 }
394 else
395 {
396 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
397 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
398 }
399 }
400 len = canonize (val, len, precision);
401
402 return len;
403 }
404
405 /* This function hides the fact that we cannot rely on the bits beyond
406 the precision. This issue comes up in the relational comparisions
407 where we do allow comparisons of values of different precisions. */
408 static inline HOST_WIDE_INT
selt(const HOST_WIDE_INT * a,unsigned int len,unsigned int blocks_needed,unsigned int small_prec,unsigned int index,signop sgn)409 selt (const HOST_WIDE_INT *a, unsigned int len,
410 unsigned int blocks_needed, unsigned int small_prec,
411 unsigned int index, signop sgn)
412 {
413 HOST_WIDE_INT val;
414 if (index < len)
415 val = a[index];
416 else if (index < blocks_needed || sgn == SIGNED)
417 /* Signed or within the precision. */
418 val = SIGN_MASK (a[len - 1]);
419 else
420 /* Unsigned extension beyond the precision. */
421 val = 0;
422
423 if (small_prec && index == blocks_needed - 1)
424 return (sgn == SIGNED
425 ? sext_hwi (val, small_prec)
426 : zext_hwi (val, small_prec));
427 else
428 return val;
429 }
430
431 /* Find the highest bit represented in a wide int. This will in
432 general have the same value as the sign bit. */
433 static inline HOST_WIDE_INT
top_bit_of(const HOST_WIDE_INT * a,unsigned int len,unsigned int prec)434 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
435 {
436 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
437 unsigned HOST_WIDE_INT val = a[len - 1];
438 if (excess > 0)
439 val <<= excess;
440 return val >> (HOST_BITS_PER_WIDE_INT - 1);
441 }
442
443 /*
444 * Comparisons, note that only equality is an operator. The other
445 * comparisons cannot be operators since they are inherently signed or
446 * unsigned and C++ has no such operators.
447 */
448
449 /* Return true if OP0 == OP1. */
450 bool
eq_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)451 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
452 const HOST_WIDE_INT *op1, unsigned int op1len,
453 unsigned int prec)
454 {
455 int l0 = op0len - 1;
456 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
457
458 if (op0len != op1len)
459 return false;
460
461 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
462 {
463 /* It does not matter if we zext or sext here, we just have to
464 do both the same way. */
465 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
466 return false;
467 l0--;
468 }
469
470 while (l0 >= 0)
471 if (op0[l0] != op1[l0])
472 return false;
473 else
474 l0--;
475
476 return true;
477 }
478
479 /* Return true if OP0 < OP1 using signed comparisons. */
480 bool
lts_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)481 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
482 unsigned int precision,
483 const HOST_WIDE_INT *op1, unsigned int op1len)
484 {
485 HOST_WIDE_INT s0, s1;
486 unsigned HOST_WIDE_INT u0, u1;
487 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
488 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
489 int l = MAX (op0len - 1, op1len - 1);
490
491 /* Only the top block is compared as signed. The rest are unsigned
492 comparisons. */
493 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
494 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
495 if (s0 < s1)
496 return true;
497 if (s0 > s1)
498 return false;
499
500 l--;
501 while (l >= 0)
502 {
503 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
504 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
505
506 if (u0 < u1)
507 return true;
508 if (u0 > u1)
509 return false;
510 l--;
511 }
512
513 return false;
514 }
515
516 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
517 signed compares. */
518 int
cmps_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)519 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
520 unsigned int precision,
521 const HOST_WIDE_INT *op1, unsigned int op1len)
522 {
523 HOST_WIDE_INT s0, s1;
524 unsigned HOST_WIDE_INT u0, u1;
525 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
526 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
527 int l = MAX (op0len - 1, op1len - 1);
528
529 /* Only the top block is compared as signed. The rest are unsigned
530 comparisons. */
531 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
532 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
533 if (s0 < s1)
534 return -1;
535 if (s0 > s1)
536 return 1;
537
538 l--;
539 while (l >= 0)
540 {
541 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
542 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
543
544 if (u0 < u1)
545 return -1;
546 if (u0 > u1)
547 return 1;
548 l--;
549 }
550
551 return 0;
552 }
553
554 /* Return true if OP0 < OP1 using unsigned comparisons. */
555 bool
ltu_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)556 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
557 unsigned int precision,
558 const HOST_WIDE_INT *op1, unsigned int op1len)
559 {
560 unsigned HOST_WIDE_INT x0;
561 unsigned HOST_WIDE_INT x1;
562 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
563 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
564 int l = MAX (op0len - 1, op1len - 1);
565
566 while (l >= 0)
567 {
568 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
569 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
570 if (x0 < x1)
571 return true;
572 if (x0 > x1)
573 return false;
574 l--;
575 }
576
577 return false;
578 }
579
580 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
581 unsigned compares. */
582 int
cmpu_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)583 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
584 unsigned int precision,
585 const HOST_WIDE_INT *op1, unsigned int op1len)
586 {
587 unsigned HOST_WIDE_INT x0;
588 unsigned HOST_WIDE_INT x1;
589 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
590 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
591 int l = MAX (op0len - 1, op1len - 1);
592
593 while (l >= 0)
594 {
595 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
596 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
597 if (x0 < x1)
598 return -1;
599 if (x0 > x1)
600 return 1;
601 l--;
602 }
603
604 return 0;
605 }
606
607 /*
608 * Extension.
609 */
610
611 /* Sign-extend the number represented by XVAL and XLEN into VAL,
612 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
613 and VAL have PRECISION bits. */
614 unsigned int
sext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)615 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
616 unsigned int xlen, unsigned int precision, unsigned int offset)
617 {
618 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
619 /* Extending beyond the precision is a no-op. If we have only stored
620 OFFSET bits or fewer, the rest are already signs. */
621 if (offset >= precision || len >= xlen)
622 {
623 for (unsigned i = 0; i < xlen; ++i)
624 val[i] = xval[i];
625 return xlen;
626 }
627 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
628 for (unsigned int i = 0; i < len; i++)
629 val[i] = xval[i];
630 if (suboffset > 0)
631 {
632 val[len] = sext_hwi (xval[len], suboffset);
633 len += 1;
634 }
635 return canonize (val, len, precision);
636 }
637
638 /* Zero-extend the number represented by XVAL and XLEN into VAL,
639 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
640 and VAL have PRECISION bits. */
641 unsigned int
zext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)642 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
643 unsigned int xlen, unsigned int precision, unsigned int offset)
644 {
645 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
646 /* Extending beyond the precision is a no-op. If we have only stored
647 OFFSET bits or fewer, and the upper stored bit is zero, then there
648 is nothing to do. */
649 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
650 {
651 for (unsigned i = 0; i < xlen; ++i)
652 val[i] = xval[i];
653 return xlen;
654 }
655 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
656 for (unsigned int i = 0; i < len; i++)
657 val[i] = i < xlen ? xval[i] : -1;
658 if (suboffset > 0)
659 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
660 else
661 val[len] = 0;
662 return canonize (val, len + 1, precision);
663 }
664
665 /*
666 * Masking, inserting, shifting, rotating.
667 */
668
669 /* Insert WIDTH bits from Y into X starting at START. */
670 wide_int
insert(const wide_int & x,const wide_int & y,unsigned int start,unsigned int width)671 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
672 unsigned int width)
673 {
674 wide_int result;
675 wide_int mask;
676 wide_int tmp;
677
678 unsigned int precision = x.get_precision ();
679 if (start >= precision)
680 return x;
681
682 gcc_checking_assert (precision >= width);
683
684 if (start + width >= precision)
685 width = precision - start;
686
687 mask = wi::shifted_mask (start, width, false, precision);
688 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
689 result = tmp & mask;
690
691 tmp = wi::bit_and_not (x, mask);
692 result = result | tmp;
693
694 return result;
695 }
696
697 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
698 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
699 bits. */
700 unsigned int
set_bit_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int bit)701 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
702 unsigned int xlen, unsigned int precision, unsigned int bit)
703 {
704 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
705 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
706
707 if (block + 1 >= xlen)
708 {
709 /* The operation either affects the last current block or needs
710 a new block. */
711 unsigned int len = block + 1;
712 for (unsigned int i = 0; i < len; i++)
713 val[i] = safe_uhwi (xval, xlen, i);
714 val[block] |= HOST_WIDE_INT_1U << subbit;
715
716 /* If the bit we just set is at the msb of the block, make sure
717 that any higher bits are zeros. */
718 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
719 {
720 val[len++] = 0;
721 return len;
722 }
723 return canonize (val, len, precision);
724 }
725 else
726 {
727 for (unsigned int i = 0; i < xlen; i++)
728 val[i] = xval[i];
729 val[block] |= HOST_WIDE_INT_1U << subbit;
730 return canonize (val, xlen, precision);
731 }
732 }
733
734 /* bswap THIS. */
735 wide_int
bswap() const736 wide_int_storage::bswap () const
737 {
738 wide_int result = wide_int::create (precision);
739 unsigned int i, s;
740 unsigned int len = BLOCKS_NEEDED (precision);
741 unsigned int xlen = get_len ();
742 const HOST_WIDE_INT *xval = get_val ();
743 HOST_WIDE_INT *val = result.write_val ();
744
745 /* This is not a well defined operation if the precision is not a
746 multiple of 8. */
747 gcc_assert ((precision & 0x7) == 0);
748
749 for (i = 0; i < len; i++)
750 val[i] = 0;
751
752 /* Only swap the bytes that are not the padding. */
753 for (s = 0; s < precision; s += 8)
754 {
755 unsigned int d = precision - s - 8;
756 unsigned HOST_WIDE_INT byte;
757
758 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
759 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
760
761 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
762
763 block = d / HOST_BITS_PER_WIDE_INT;
764 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
765
766 val[block] |= byte << offset;
767 }
768
769 result.set_len (canonize (val, len, precision));
770 return result;
771 }
772
773 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
774 above that up to PREC are zeros. The result is inverted if NEGATE
775 is true. Return the number of blocks in VAL. */
776 unsigned int
mask(HOST_WIDE_INT * val,unsigned int width,bool negate,unsigned int prec)777 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
778 unsigned int prec)
779 {
780 if (width >= prec)
781 {
782 val[0] = negate ? 0 : -1;
783 return 1;
784 }
785 else if (width == 0)
786 {
787 val[0] = negate ? -1 : 0;
788 return 1;
789 }
790
791 unsigned int i = 0;
792 while (i < width / HOST_BITS_PER_WIDE_INT)
793 val[i++] = negate ? 0 : -1;
794
795 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
796 if (shift != 0)
797 {
798 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
799 val[i++] = negate ? ~last : last;
800 }
801 else
802 val[i++] = negate ? -1 : 0;
803
804 return i;
805 }
806
807 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
808 bits are ones, and the bits above that up to PREC are zeros. The result
809 is inverted if NEGATE is true. Return the number of blocks in VAL. */
810 unsigned int
shifted_mask(HOST_WIDE_INT * val,unsigned int start,unsigned int width,bool negate,unsigned int prec)811 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
812 bool negate, unsigned int prec)
813 {
814 if (start >= prec || width == 0)
815 {
816 val[0] = negate ? -1 : 0;
817 return 1;
818 }
819
820 if (width > prec - start)
821 width = prec - start;
822 unsigned int end = start + width;
823
824 unsigned int i = 0;
825 while (i < start / HOST_BITS_PER_WIDE_INT)
826 val[i++] = negate ? -1 : 0;
827
828 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
829 if (shift)
830 {
831 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
832 shift += width;
833 if (shift < HOST_BITS_PER_WIDE_INT)
834 {
835 /* case 000111000 */
836 block = (HOST_WIDE_INT_1U << shift) - block - 1;
837 val[i++] = negate ? ~block : block;
838 return i;
839 }
840 else
841 /* ...111000 */
842 val[i++] = negate ? block : ~block;
843 }
844
845 if (end >= prec)
846 {
847 if (!shift)
848 val[i++] = negate ? 0 : -1;
849 return i;
850 }
851
852 while (i < end / HOST_BITS_PER_WIDE_INT)
853 /* 1111111 */
854 val[i++] = negate ? 0 : -1;
855
856 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
857 if (shift != 0)
858 {
859 /* 000011111 */
860 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
861 val[i++] = negate ? ~block : block;
862 }
863 else
864 val[i++] = negate ? -1 : 0;
865
866 return i;
867 }
868
869 /*
870 * logical operations.
871 */
872
873 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
874 unsigned int
and_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)875 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
876 unsigned int op0len, const HOST_WIDE_INT *op1,
877 unsigned int op1len, unsigned int prec)
878 {
879 int l0 = op0len - 1;
880 int l1 = op1len - 1;
881 bool need_canon = true;
882
883 unsigned int len = MAX (op0len, op1len);
884 if (l0 > l1)
885 {
886 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
887 if (op1mask == 0)
888 {
889 l0 = l1;
890 len = l1 + 1;
891 }
892 else
893 {
894 need_canon = false;
895 while (l0 > l1)
896 {
897 val[l0] = op0[l0];
898 l0--;
899 }
900 }
901 }
902 else if (l1 > l0)
903 {
904 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
905 if (op0mask == 0)
906 len = l0 + 1;
907 else
908 {
909 need_canon = false;
910 while (l1 > l0)
911 {
912 val[l1] = op1[l1];
913 l1--;
914 }
915 }
916 }
917
918 while (l0 >= 0)
919 {
920 val[l0] = op0[l0] & op1[l0];
921 l0--;
922 }
923
924 if (need_canon)
925 len = canonize (val, len, prec);
926
927 return len;
928 }
929
930 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
931 unsigned int
and_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)932 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
933 unsigned int op0len, const HOST_WIDE_INT *op1,
934 unsigned int op1len, unsigned int prec)
935 {
936 wide_int result;
937 int l0 = op0len - 1;
938 int l1 = op1len - 1;
939 bool need_canon = true;
940
941 unsigned int len = MAX (op0len, op1len);
942 if (l0 > l1)
943 {
944 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
945 if (op1mask != 0)
946 {
947 l0 = l1;
948 len = l1 + 1;
949 }
950 else
951 {
952 need_canon = false;
953 while (l0 > l1)
954 {
955 val[l0] = op0[l0];
956 l0--;
957 }
958 }
959 }
960 else if (l1 > l0)
961 {
962 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
963 if (op0mask == 0)
964 len = l0 + 1;
965 else
966 {
967 need_canon = false;
968 while (l1 > l0)
969 {
970 val[l1] = ~op1[l1];
971 l1--;
972 }
973 }
974 }
975
976 while (l0 >= 0)
977 {
978 val[l0] = op0[l0] & ~op1[l0];
979 l0--;
980 }
981
982 if (need_canon)
983 len = canonize (val, len, prec);
984
985 return len;
986 }
987
988 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
989 unsigned int
or_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)990 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
991 unsigned int op0len, const HOST_WIDE_INT *op1,
992 unsigned int op1len, unsigned int prec)
993 {
994 wide_int result;
995 int l0 = op0len - 1;
996 int l1 = op1len - 1;
997 bool need_canon = true;
998
999 unsigned int len = MAX (op0len, op1len);
1000 if (l0 > l1)
1001 {
1002 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1003 if (op1mask != 0)
1004 {
1005 l0 = l1;
1006 len = l1 + 1;
1007 }
1008 else
1009 {
1010 need_canon = false;
1011 while (l0 > l1)
1012 {
1013 val[l0] = op0[l0];
1014 l0--;
1015 }
1016 }
1017 }
1018 else if (l1 > l0)
1019 {
1020 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1021 if (op0mask != 0)
1022 len = l0 + 1;
1023 else
1024 {
1025 need_canon = false;
1026 while (l1 > l0)
1027 {
1028 val[l1] = op1[l1];
1029 l1--;
1030 }
1031 }
1032 }
1033
1034 while (l0 >= 0)
1035 {
1036 val[l0] = op0[l0] | op1[l0];
1037 l0--;
1038 }
1039
1040 if (need_canon)
1041 len = canonize (val, len, prec);
1042
1043 return len;
1044 }
1045
1046 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1047 unsigned int
or_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1048 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1049 unsigned int op0len, const HOST_WIDE_INT *op1,
1050 unsigned int op1len, unsigned int prec)
1051 {
1052 wide_int result;
1053 int l0 = op0len - 1;
1054 int l1 = op1len - 1;
1055 bool need_canon = true;
1056
1057 unsigned int len = MAX (op0len, op1len);
1058 if (l0 > l1)
1059 {
1060 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1061 if (op1mask == 0)
1062 {
1063 l0 = l1;
1064 len = l1 + 1;
1065 }
1066 else
1067 {
1068 need_canon = false;
1069 while (l0 > l1)
1070 {
1071 val[l0] = op0[l0];
1072 l0--;
1073 }
1074 }
1075 }
1076 else if (l1 > l0)
1077 {
1078 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1079 if (op0mask != 0)
1080 len = l0 + 1;
1081 else
1082 {
1083 need_canon = false;
1084 while (l1 > l0)
1085 {
1086 val[l1] = ~op1[l1];
1087 l1--;
1088 }
1089 }
1090 }
1091
1092 while (l0 >= 0)
1093 {
1094 val[l0] = op0[l0] | ~op1[l0];
1095 l0--;
1096 }
1097
1098 if (need_canon)
1099 len = canonize (val, len, prec);
1100
1101 return len;
1102 }
1103
1104 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1105 unsigned int
xor_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1106 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1107 unsigned int op0len, const HOST_WIDE_INT *op1,
1108 unsigned int op1len, unsigned int prec)
1109 {
1110 wide_int result;
1111 int l0 = op0len - 1;
1112 int l1 = op1len - 1;
1113
1114 unsigned int len = MAX (op0len, op1len);
1115 if (l0 > l1)
1116 {
1117 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1118 while (l0 > l1)
1119 {
1120 val[l0] = op0[l0] ^ op1mask;
1121 l0--;
1122 }
1123 }
1124
1125 if (l1 > l0)
1126 {
1127 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1128 while (l1 > l0)
1129 {
1130 val[l1] = op0mask ^ op1[l1];
1131 l1--;
1132 }
1133 }
1134
1135 while (l0 >= 0)
1136 {
1137 val[l0] = op0[l0] ^ op1[l0];
1138 l0--;
1139 }
1140
1141 return canonize (val, len, prec);
1142 }
1143
1144 /*
1145 * math
1146 */
1147
1148 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1149 whether the result overflows when OP0 and OP1 are treated as having
1150 signedness SGN. Return the number of blocks in VAL. */
1151 unsigned int
add_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1152 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1153 unsigned int op0len, const HOST_WIDE_INT *op1,
1154 unsigned int op1len, unsigned int prec,
1155 signop sgn, wi::overflow_type *overflow)
1156 {
1157 unsigned HOST_WIDE_INT o0 = 0;
1158 unsigned HOST_WIDE_INT o1 = 0;
1159 unsigned HOST_WIDE_INT x = 0;
1160 unsigned HOST_WIDE_INT carry = 0;
1161 unsigned HOST_WIDE_INT old_carry = 0;
1162 unsigned HOST_WIDE_INT mask0, mask1;
1163 unsigned int i;
1164
1165 unsigned int len = MAX (op0len, op1len);
1166 mask0 = -top_bit_of (op0, op0len, prec);
1167 mask1 = -top_bit_of (op1, op1len, prec);
1168 /* Add all of the explicitly defined elements. */
1169
1170 for (i = 0; i < len; i++)
1171 {
1172 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1173 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1174 x = o0 + o1 + carry;
1175 val[i] = x;
1176 old_carry = carry;
1177 carry = carry == 0 ? x < o0 : x <= o0;
1178 }
1179
1180 if (len * HOST_BITS_PER_WIDE_INT < prec)
1181 {
1182 val[len] = mask0 + mask1 + carry;
1183 len++;
1184 if (overflow)
1185 *overflow
1186 = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1187 }
1188 else if (overflow)
1189 {
1190 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1191 if (sgn == SIGNED)
1192 {
1193 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1194 if ((HOST_WIDE_INT) (x << shift) < 0)
1195 {
1196 if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1197 *overflow = wi::OVF_UNDERFLOW;
1198 else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1199 *overflow = wi::OVF_OVERFLOW;
1200 else
1201 *overflow = wi::OVF_NONE;
1202 }
1203 else
1204 *overflow = wi::OVF_NONE;
1205 }
1206 else
1207 {
1208 /* Put the MSB of X and O0 and in the top of the HWI. */
1209 x <<= shift;
1210 o0 <<= shift;
1211 if (old_carry)
1212 *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1213 else
1214 *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1215 }
1216 }
1217
1218 return canonize (val, len, prec);
1219 }
1220
1221 /* Subroutines of the multiplication and division operations. Unpack
1222 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1223 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1224 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1225 static void
wi_unpack(unsigned HOST_HALF_WIDE_INT * result,const HOST_WIDE_INT * input,unsigned int in_len,unsigned int out_len,unsigned int prec,signop sgn)1226 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1227 unsigned int in_len, unsigned int out_len,
1228 unsigned int prec, signop sgn)
1229 {
1230 unsigned int i;
1231 unsigned int j = 0;
1232 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1233 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1234 HOST_WIDE_INT mask;
1235
1236 if (sgn == SIGNED)
1237 {
1238 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1239 mask &= HALF_INT_MASK;
1240 }
1241 else
1242 mask = 0;
1243
1244 for (i = 0; i < blocks_needed - 1; i++)
1245 {
1246 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1247 result[j++] = x;
1248 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1249 }
1250
1251 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1252 if (small_prec)
1253 {
1254 if (sgn == SIGNED)
1255 x = sext_hwi (x, small_prec);
1256 else
1257 x = zext_hwi (x, small_prec);
1258 }
1259 result[j++] = x;
1260 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1261
1262 /* Smear the sign bit. */
1263 while (j < out_len)
1264 result[j++] = mask;
1265 }
1266
1267 /* The inverse of wi_unpack. IN_LEN is the number of input
1268 blocks and PRECISION is the precision of the result. Return the
1269 number of blocks in the canonicalized result. */
1270 static unsigned int
wi_pack(HOST_WIDE_INT * result,const unsigned HOST_HALF_WIDE_INT * input,unsigned int in_len,unsigned int precision)1271 wi_pack (HOST_WIDE_INT *result,
1272 const unsigned HOST_HALF_WIDE_INT *input,
1273 unsigned int in_len, unsigned int precision)
1274 {
1275 unsigned int i = 0;
1276 unsigned int j = 0;
1277 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1278
1279 while (i + 1 < in_len)
1280 {
1281 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1282 | ((unsigned HOST_WIDE_INT) input[i + 1]
1283 << HOST_BITS_PER_HALF_WIDE_INT));
1284 i += 2;
1285 }
1286
1287 /* Handle the case where in_len is odd. For this we zero extend. */
1288 if (in_len & 1)
1289 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1290 else if (j < blocks_needed)
1291 result[j++] = 0;
1292 return canonize (result, j, precision);
1293 }
1294
1295 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1296 result is returned.
1297
1298 If HIGH is not set, throw away the upper half after the check is
1299 made to see if it overflows. Unfortunately there is no better way
1300 to check for overflow than to do this. If OVERFLOW is nonnull,
1301 record in *OVERFLOW whether the result overflowed. SGN controls
1302 the signedness and is used to check overflow or if HIGH is set.
1303
1304 NOTE: Overflow type for signed overflow is not yet implemented. */
1305 unsigned int
mul_internal(HOST_WIDE_INT * val,const HOST_WIDE_INT * op1val,unsigned int op1len,const HOST_WIDE_INT * op2val,unsigned int op2len,unsigned int prec,signop sgn,wi::overflow_type * overflow,bool high)1306 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1307 unsigned int op1len, const HOST_WIDE_INT *op2val,
1308 unsigned int op2len, unsigned int prec, signop sgn,
1309 wi::overflow_type *overflow, bool high)
1310 {
1311 unsigned HOST_WIDE_INT o0, o1, k, t;
1312 unsigned int i;
1313 unsigned int j;
1314 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1315 unsigned int half_blocks_needed = blocks_needed * 2;
1316 /* The sizes here are scaled to support a 2x largest mode by 2x
1317 largest mode yielding a 4x largest mode result. This is what is
1318 needed by vpn. */
1319
1320 unsigned HOST_HALF_WIDE_INT
1321 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1322 unsigned HOST_HALF_WIDE_INT
1323 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1324 /* The '2' in 'R' is because we are internally doing a full
1325 multiply. */
1326 unsigned HOST_HALF_WIDE_INT
1327 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1328 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1329
1330 /* If the top level routine did not really pass in an overflow, then
1331 just make sure that we never attempt to set it. */
1332 bool needs_overflow = (overflow != 0);
1333 if (needs_overflow)
1334 *overflow = wi::OVF_NONE;
1335
1336 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1337 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1338
1339 /* This is a surprisingly common case, so do it first. */
1340 if (op1 == 0 || op2 == 0)
1341 {
1342 val[0] = 0;
1343 return 1;
1344 }
1345
1346 #ifdef umul_ppmm
1347 if (sgn == UNSIGNED)
1348 {
1349 /* If the inputs are single HWIs and the output has room for at
1350 least two HWIs, we can use umul_ppmm directly. */
1351 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1352 && wi::fits_uhwi_p (op1)
1353 && wi::fits_uhwi_p (op2))
1354 {
1355 /* This case never overflows. */
1356 if (high)
1357 {
1358 val[0] = 0;
1359 return 1;
1360 }
1361 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1362 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1363 {
1364 val[2] = 0;
1365 return 3;
1366 }
1367 return 1 + (val[1] != 0 || val[0] < 0);
1368 }
1369 /* Likewise if the output is a full single HWI, except that the
1370 upper HWI of the result is only used for determining overflow.
1371 (We handle this case inline when overflow isn't needed.) */
1372 else if (prec == HOST_BITS_PER_WIDE_INT)
1373 {
1374 unsigned HOST_WIDE_INT upper;
1375 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1376 if (needs_overflow)
1377 /* Unsigned overflow can only be +OVERFLOW. */
1378 *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1379 if (high)
1380 val[0] = upper;
1381 return 1;
1382 }
1383 }
1384 #endif
1385
1386 /* Handle multiplications by 1. */
1387 if (op1 == 1)
1388 {
1389 if (high)
1390 {
1391 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1392 return 1;
1393 }
1394 for (i = 0; i < op2len; i++)
1395 val[i] = op2val[i];
1396 return op2len;
1397 }
1398 if (op2 == 1)
1399 {
1400 if (high)
1401 {
1402 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1403 return 1;
1404 }
1405 for (i = 0; i < op1len; i++)
1406 val[i] = op1val[i];
1407 return op1len;
1408 }
1409
1410 /* If we need to check for overflow, we can only do half wide
1411 multiplies quickly because we need to look at the top bits to
1412 check for the overflow. */
1413 if ((high || needs_overflow)
1414 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1415 {
1416 unsigned HOST_WIDE_INT r;
1417
1418 if (sgn == SIGNED)
1419 {
1420 o0 = op1.to_shwi ();
1421 o1 = op2.to_shwi ();
1422 }
1423 else
1424 {
1425 o0 = op1.to_uhwi ();
1426 o1 = op2.to_uhwi ();
1427 }
1428
1429 r = o0 * o1;
1430 if (needs_overflow)
1431 {
1432 if (sgn == SIGNED)
1433 {
1434 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1435 /* FIXME: Signed overflow type is not implemented yet. */
1436 *overflow = OVF_UNKNOWN;
1437 }
1438 else
1439 {
1440 if ((r >> prec) != 0)
1441 /* Unsigned overflow can only be +OVERFLOW. */
1442 *overflow = OVF_OVERFLOW;
1443 }
1444 }
1445 val[0] = high ? r >> prec : r;
1446 return 1;
1447 }
1448
1449 /* We do unsigned mul and then correct it. */
1450 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1451 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1452
1453 /* The 2 is for a full mult. */
1454 memset (r, 0, half_blocks_needed * 2
1455 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1456
1457 for (j = 0; j < half_blocks_needed; j++)
1458 {
1459 k = 0;
1460 for (i = 0; i < half_blocks_needed; i++)
1461 {
1462 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1463 + r[i + j] + k);
1464 r[i + j] = t & HALF_INT_MASK;
1465 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1466 }
1467 r[j + half_blocks_needed] = k;
1468 }
1469
1470 /* We did unsigned math above. For signed we must adjust the
1471 product (assuming we need to see that). */
1472 if (sgn == SIGNED && (high || needs_overflow))
1473 {
1474 unsigned HOST_WIDE_INT b;
1475 if (wi::neg_p (op1))
1476 {
1477 b = 0;
1478 for (i = 0; i < half_blocks_needed; i++)
1479 {
1480 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1481 - (unsigned HOST_WIDE_INT)v[i] - b;
1482 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1483 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1484 }
1485 }
1486 if (wi::neg_p (op2))
1487 {
1488 b = 0;
1489 for (i = 0; i < half_blocks_needed; i++)
1490 {
1491 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1492 - (unsigned HOST_WIDE_INT)u[i] - b;
1493 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1494 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1495 }
1496 }
1497 }
1498
1499 if (needs_overflow)
1500 {
1501 HOST_WIDE_INT top;
1502
1503 /* For unsigned, overflow is true if any of the top bits are set.
1504 For signed, overflow is true if any of the top bits are not equal
1505 to the sign bit. */
1506 if (sgn == UNSIGNED)
1507 top = 0;
1508 else
1509 {
1510 top = r[(half_blocks_needed) - 1];
1511 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1512 top &= mask;
1513 }
1514
1515 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1516 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1517 /* FIXME: Signed overflow type is not implemented yet. */
1518 *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1519 }
1520
1521 int r_offset = high ? half_blocks_needed : 0;
1522 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1523 }
1524
1525 /* Compute the population count of X. */
1526 int
popcount(const wide_int_ref & x)1527 wi::popcount (const wide_int_ref &x)
1528 {
1529 unsigned int i;
1530 int count;
1531
1532 /* The high order block is special if it is the last block and the
1533 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1534 have to clear out any ones above the precision before doing
1535 popcount on this block. */
1536 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1537 unsigned int stop = x.len;
1538 if (count < 0)
1539 {
1540 count = popcount_hwi (x.uhigh () << -count);
1541 stop -= 1;
1542 }
1543 else
1544 {
1545 if (x.sign_mask () >= 0)
1546 count = 0;
1547 }
1548
1549 for (i = 0; i < stop; ++i)
1550 count += popcount_hwi (x.val[i]);
1551
1552 return count;
1553 }
1554
1555 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1556 whether the result overflows when OP0 and OP1 are treated as having
1557 signedness SGN. Return the number of blocks in VAL. */
1558 unsigned int
sub_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1559 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1560 unsigned int op0len, const HOST_WIDE_INT *op1,
1561 unsigned int op1len, unsigned int prec,
1562 signop sgn, wi::overflow_type *overflow)
1563 {
1564 unsigned HOST_WIDE_INT o0 = 0;
1565 unsigned HOST_WIDE_INT o1 = 0;
1566 unsigned HOST_WIDE_INT x = 0;
1567 /* We implement subtraction as an in place negate and add. Negation
1568 is just inversion and add 1, so we can do the add of 1 by just
1569 starting the borrow in of the first element at 1. */
1570 unsigned HOST_WIDE_INT borrow = 0;
1571 unsigned HOST_WIDE_INT old_borrow = 0;
1572
1573 unsigned HOST_WIDE_INT mask0, mask1;
1574 unsigned int i;
1575
1576 unsigned int len = MAX (op0len, op1len);
1577 mask0 = -top_bit_of (op0, op0len, prec);
1578 mask1 = -top_bit_of (op1, op1len, prec);
1579
1580 /* Subtract all of the explicitly defined elements. */
1581 for (i = 0; i < len; i++)
1582 {
1583 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1584 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1585 x = o0 - o1 - borrow;
1586 val[i] = x;
1587 old_borrow = borrow;
1588 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1589 }
1590
1591 if (len * HOST_BITS_PER_WIDE_INT < prec)
1592 {
1593 val[len] = mask0 - mask1 - borrow;
1594 len++;
1595 if (overflow)
1596 *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1597 }
1598 else if (overflow)
1599 {
1600 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1601 if (sgn == SIGNED)
1602 {
1603 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1604 if ((HOST_WIDE_INT) (x << shift) < 0)
1605 {
1606 if (o0 > o1)
1607 *overflow = OVF_UNDERFLOW;
1608 else if (o0 < o1)
1609 *overflow = OVF_OVERFLOW;
1610 else
1611 *overflow = OVF_NONE;
1612 }
1613 else
1614 *overflow = OVF_NONE;
1615 }
1616 else
1617 {
1618 /* Put the MSB of X and O0 and in the top of the HWI. */
1619 x <<= shift;
1620 o0 <<= shift;
1621 if (old_borrow)
1622 *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1623 else
1624 *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1625 }
1626 }
1627
1628 return canonize (val, len, prec);
1629 }
1630
1631
1632 /*
1633 * Division and Mod
1634 */
1635
1636 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1637 algorithm is a small modification of the algorithm in Hacker's
1638 Delight by Warren, which itself is a small modification of Knuth's
1639 algorithm. M is the number of significant elements of U however
1640 there needs to be at least one extra element of B_DIVIDEND
1641 allocated, N is the number of elements of B_DIVISOR. */
1642 static void
divmod_internal_2(unsigned HOST_HALF_WIDE_INT * b_quotient,unsigned HOST_HALF_WIDE_INT * b_remainder,unsigned HOST_HALF_WIDE_INT * b_dividend,unsigned HOST_HALF_WIDE_INT * b_divisor,int m,int n)1643 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1644 unsigned HOST_HALF_WIDE_INT *b_remainder,
1645 unsigned HOST_HALF_WIDE_INT *b_dividend,
1646 unsigned HOST_HALF_WIDE_INT *b_divisor,
1647 int m, int n)
1648 {
1649 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1650 HOST_WIDE_INT and stored in the lower bits of each word. This
1651 algorithm should work properly on both 32 and 64 bit
1652 machines. */
1653 unsigned HOST_WIDE_INT b
1654 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1655 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1656 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1657 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1658 HOST_WIDE_INT t, k;
1659 int i, j, s;
1660
1661 /* Single digit divisor. */
1662 if (n == 1)
1663 {
1664 k = 0;
1665 for (j = m - 1; j >= 0; j--)
1666 {
1667 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1668 k = ((k * b + b_dividend[j])
1669 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1670 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1671 }
1672 b_remainder[0] = k;
1673 return;
1674 }
1675
1676 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1677
1678 if (s)
1679 {
1680 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1681 algorithm, we can overwrite b_dividend and b_divisor, so we do
1682 that. */
1683 for (i = n - 1; i > 0; i--)
1684 b_divisor[i] = (b_divisor[i] << s)
1685 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1686 b_divisor[0] = b_divisor[0] << s;
1687
1688 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1689 for (i = m - 1; i > 0; i--)
1690 b_dividend[i] = (b_dividend[i] << s)
1691 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1692 b_dividend[0] = b_dividend[0] << s;
1693 }
1694
1695 /* Main loop. */
1696 for (j = m - n; j >= 0; j--)
1697 {
1698 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1699 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1700 again:
1701 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1702 {
1703 qhat -= 1;
1704 rhat += b_divisor[n-1];
1705 if (rhat < b)
1706 goto again;
1707 }
1708
1709 /* Multiply and subtract. */
1710 k = 0;
1711 for (i = 0; i < n; i++)
1712 {
1713 p = qhat * b_divisor[i];
1714 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1715 b_dividend[i + j] = t;
1716 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1717 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1718 }
1719 t = b_dividend[j+n] - k;
1720 b_dividend[j+n] = t;
1721
1722 b_quotient[j] = qhat;
1723 if (t < 0)
1724 {
1725 b_quotient[j] -= 1;
1726 k = 0;
1727 for (i = 0; i < n; i++)
1728 {
1729 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1730 b_dividend[i+j] = t;
1731 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1732 }
1733 b_dividend[j+n] += k;
1734 }
1735 }
1736 if (s)
1737 for (i = 0; i < n; i++)
1738 b_remainder[i] = (b_dividend[i] >> s)
1739 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1740 else
1741 for (i = 0; i < n; i++)
1742 b_remainder[i] = b_dividend[i];
1743 }
1744
1745
1746 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1747 the result. If QUOTIENT is nonnull, store the value of the quotient
1748 there and return the number of blocks in it. The return value is
1749 not defined otherwise. If REMAINDER is nonnull, store the value
1750 of the remainder there and store the number of blocks in
1751 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1752 the division overflowed. */
1753 unsigned int
divmod_internal(HOST_WIDE_INT * quotient,unsigned int * remainder_len,HOST_WIDE_INT * remainder,const HOST_WIDE_INT * dividend_val,unsigned int dividend_len,unsigned int dividend_prec,const HOST_WIDE_INT * divisor_val,unsigned int divisor_len,unsigned int divisor_prec,signop sgn,wi::overflow_type * oflow)1754 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1755 HOST_WIDE_INT *remainder,
1756 const HOST_WIDE_INT *dividend_val,
1757 unsigned int dividend_len, unsigned int dividend_prec,
1758 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1759 unsigned int divisor_prec, signop sgn,
1760 wi::overflow_type *oflow)
1761 {
1762 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1763 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1764 unsigned HOST_HALF_WIDE_INT
1765 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1766 unsigned HOST_HALF_WIDE_INT
1767 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1768 unsigned HOST_HALF_WIDE_INT
1769 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1770 unsigned HOST_HALF_WIDE_INT
1771 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1772 unsigned int m, n;
1773 bool dividend_neg = false;
1774 bool divisor_neg = false;
1775 bool overflow = false;
1776 wide_int neg_dividend, neg_divisor;
1777
1778 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1779 dividend_prec);
1780 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1781 divisor_prec);
1782 if (divisor == 0)
1783 overflow = true;
1784
1785 /* The smallest signed number / -1 causes overflow. The dividend_len
1786 check is for speed rather than correctness. */
1787 if (sgn == SIGNED
1788 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1789 && divisor == -1
1790 && wi::only_sign_bit_p (dividend))
1791 overflow = true;
1792
1793 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1794 (signed min / -1) has the same representation as the orignal dividend.
1795 We have traditionally made division by zero act as division by one,
1796 so there too we use the original dividend. */
1797 if (overflow)
1798 {
1799 if (remainder)
1800 {
1801 *remainder_len = 1;
1802 remainder[0] = 0;
1803 }
1804 if (oflow)
1805 *oflow = OVF_OVERFLOW;
1806 if (quotient)
1807 for (unsigned int i = 0; i < dividend_len; ++i)
1808 quotient[i] = dividend_val[i];
1809 return dividend_len;
1810 }
1811
1812 if (oflow)
1813 *oflow = OVF_NONE;
1814
1815 /* Do it on the host if you can. */
1816 if (sgn == SIGNED
1817 && wi::fits_shwi_p (dividend)
1818 && wi::fits_shwi_p (divisor))
1819 {
1820 HOST_WIDE_INT o0 = dividend.to_shwi ();
1821 HOST_WIDE_INT o1 = divisor.to_shwi ();
1822
1823 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1824 {
1825 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1826 if (quotient)
1827 {
1828 quotient[0] = HOST_WIDE_INT_MIN;
1829 quotient[1] = 0;
1830 }
1831 if (remainder)
1832 {
1833 remainder[0] = 0;
1834 *remainder_len = 1;
1835 }
1836 return 2;
1837 }
1838 else
1839 {
1840 if (quotient)
1841 quotient[0] = o0 / o1;
1842 if (remainder)
1843 {
1844 remainder[0] = o0 % o1;
1845 *remainder_len = 1;
1846 }
1847 return 1;
1848 }
1849 }
1850
1851 if (sgn == UNSIGNED
1852 && wi::fits_uhwi_p (dividend)
1853 && wi::fits_uhwi_p (divisor))
1854 {
1855 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1856 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1857 unsigned int quotient_len = 1;
1858
1859 if (quotient)
1860 {
1861 quotient[0] = o0 / o1;
1862 quotient_len = canonize_uhwi (quotient, dividend_prec);
1863 }
1864 if (remainder)
1865 {
1866 remainder[0] = o0 % o1;
1867 *remainder_len = canonize_uhwi (remainder, dividend_prec);
1868 }
1869 return quotient_len;
1870 }
1871
1872 /* Make the divisor and dividend positive and remember what we
1873 did. */
1874 if (sgn == SIGNED)
1875 {
1876 if (wi::neg_p (dividend))
1877 {
1878 neg_dividend = -dividend;
1879 dividend = neg_dividend;
1880 dividend_neg = true;
1881 }
1882 if (wi::neg_p (divisor))
1883 {
1884 neg_divisor = -divisor;
1885 divisor = neg_divisor;
1886 divisor_neg = true;
1887 }
1888 }
1889
1890 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1891 dividend_blocks_needed, dividend_prec, sgn);
1892 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1893 divisor_blocks_needed, divisor_prec, sgn);
1894
1895 m = dividend_blocks_needed;
1896 b_dividend[m] = 0;
1897 while (m > 1 && b_dividend[m - 1] == 0)
1898 m--;
1899
1900 n = divisor_blocks_needed;
1901 while (n > 1 && b_divisor[n - 1] == 0)
1902 n--;
1903
1904 memset (b_quotient, 0, sizeof (b_quotient));
1905
1906 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1907
1908 unsigned int quotient_len = 0;
1909 if (quotient)
1910 {
1911 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1912 /* The quotient is neg if exactly one of the divisor or dividend is
1913 neg. */
1914 if (dividend_neg != divisor_neg)
1915 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1916 quotient_len, dividend_prec,
1917 UNSIGNED, 0);
1918 }
1919
1920 if (remainder)
1921 {
1922 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1923 /* The remainder is always the same sign as the dividend. */
1924 if (dividend_neg)
1925 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1926 *remainder_len, dividend_prec,
1927 UNSIGNED, 0);
1928 }
1929
1930 return quotient_len;
1931 }
1932
1933 /*
1934 * Shifting, rotating and extraction.
1935 */
1936
1937 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1938 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1939 unsigned int
lshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int shift)1940 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1941 unsigned int xlen, unsigned int precision,
1942 unsigned int shift)
1943 {
1944 /* Split the shift into a whole-block shift and a subblock shift. */
1945 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1946 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1947
1948 /* The whole-block shift fills with zeros. */
1949 unsigned int len = BLOCKS_NEEDED (precision);
1950 for (unsigned int i = 0; i < skip; ++i)
1951 val[i] = 0;
1952
1953 /* It's easier to handle the simple block case specially. */
1954 if (small_shift == 0)
1955 for (unsigned int i = skip; i < len; ++i)
1956 val[i] = safe_uhwi (xval, xlen, i - skip);
1957 else
1958 {
1959 /* The first unfilled output block is a left shift of the first
1960 block in XVAL. The other output blocks contain bits from two
1961 consecutive input blocks. */
1962 unsigned HOST_WIDE_INT carry = 0;
1963 for (unsigned int i = skip; i < len; ++i)
1964 {
1965 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1966 val[i] = (x << small_shift) | carry;
1967 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1968 }
1969 }
1970 return canonize (val, len, precision);
1971 }
1972
1973 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1974 number of blocks in VAL. The input has XPRECISION bits and the
1975 output has XPRECISION - SHIFT bits. */
1976 static unsigned int
rshift_large_common(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int shift)1977 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1978 unsigned int xlen, unsigned int xprecision,
1979 unsigned int shift)
1980 {
1981 /* Split the shift into a whole-block shift and a subblock shift. */
1982 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1983 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1984
1985 /* Work out how many blocks are needed to store the significant bits
1986 (excluding the upper zeros or signs). */
1987 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1988
1989 /* It's easier to handle the simple block case specially. */
1990 if (small_shift == 0)
1991 for (unsigned int i = 0; i < len; ++i)
1992 val[i] = safe_uhwi (xval, xlen, i + skip);
1993 else
1994 {
1995 /* Each output block but the last is a combination of two input blocks.
1996 The last block is a right shift of the last block in XVAL. */
1997 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1998 for (unsigned int i = 0; i < len; ++i)
1999 {
2000 val[i] = curr >> small_shift;
2001 curr = safe_uhwi (xval, xlen, i + skip + 1);
2002 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
2003 }
2004 }
2005 return len;
2006 }
2007
2008 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2009 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2010 VAL has PRECISION bits. */
2011 unsigned int
lrshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)2012 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2013 unsigned int xlen, unsigned int xprecision,
2014 unsigned int precision, unsigned int shift)
2015 {
2016 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2017
2018 /* The value we just created has precision XPRECISION - SHIFT.
2019 Zero-extend it to wider precisions. */
2020 if (precision > xprecision - shift)
2021 {
2022 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2023 if (small_prec)
2024 val[len - 1] = zext_hwi (val[len - 1], small_prec);
2025 else if (val[len - 1] < 0)
2026 {
2027 /* Add a new block with a zero. */
2028 val[len++] = 0;
2029 return len;
2030 }
2031 }
2032 return canonize (val, len, precision);
2033 }
2034
2035 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2036 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2037 VAL has PRECISION bits. */
2038 unsigned int
arshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)2039 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2040 unsigned int xlen, unsigned int xprecision,
2041 unsigned int precision, unsigned int shift)
2042 {
2043 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2044
2045 /* The value we just created has precision XPRECISION - SHIFT.
2046 Sign-extend it to wider types. */
2047 if (precision > xprecision - shift)
2048 {
2049 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2050 if (small_prec)
2051 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2052 }
2053 return canonize (val, len, precision);
2054 }
2055
2056 /* Return the number of leading (upper) zeros in X. */
2057 int
clz(const wide_int_ref & x)2058 wi::clz (const wide_int_ref &x)
2059 {
2060 /* Calculate how many bits there above the highest represented block. */
2061 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2062
2063 unsigned HOST_WIDE_INT high = x.uhigh ();
2064 if (count < 0)
2065 /* The upper -COUNT bits of HIGH are not part of the value.
2066 Clear them out. */
2067 high = (high << -count) >> -count;
2068 else if (x.sign_mask () < 0)
2069 /* The upper bit is set, so there are no leading zeros. */
2070 return 0;
2071
2072 /* We don't need to look below HIGH. Either HIGH is nonzero,
2073 or the top bit of the block below is nonzero; clz_hwi is
2074 HOST_BITS_PER_WIDE_INT in the latter case. */
2075 return count + clz_hwi (high);
2076 }
2077
2078 /* Return the number of redundant sign bits in X. (That is, the number
2079 of bits immediately below the sign bit that have the same value as
2080 the sign bit.) */
2081 int
clrsb(const wide_int_ref & x)2082 wi::clrsb (const wide_int_ref &x)
2083 {
2084 /* Calculate how many bits there above the highest represented block. */
2085 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2086
2087 unsigned HOST_WIDE_INT high = x.uhigh ();
2088 unsigned HOST_WIDE_INT mask = -1;
2089 if (count < 0)
2090 {
2091 /* The upper -COUNT bits of HIGH are not part of the value.
2092 Clear them from both MASK and HIGH. */
2093 mask >>= -count;
2094 high &= mask;
2095 }
2096
2097 /* If the top bit is 1, count the number of leading 1s. If the top
2098 bit is zero, count the number of leading zeros. */
2099 if (high > mask / 2)
2100 high ^= mask;
2101
2102 /* There are no sign bits below the top block, so we don't need to look
2103 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2104 HIGH is 0. */
2105 return count + clz_hwi (high) - 1;
2106 }
2107
2108 /* Return the number of trailing (lower) zeros in X. */
2109 int
ctz(const wide_int_ref & x)2110 wi::ctz (const wide_int_ref &x)
2111 {
2112 if (x.len == 1 && x.ulow () == 0)
2113 return x.precision;
2114
2115 /* Having dealt with the zero case, there must be a block with a
2116 nonzero bit. We don't care about the bits above the first 1. */
2117 unsigned int i = 0;
2118 while (x.val[i] == 0)
2119 ++i;
2120 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2121 }
2122
2123 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2124 return -1. */
2125 int
exact_log2(const wide_int_ref & x)2126 wi::exact_log2 (const wide_int_ref &x)
2127 {
2128 /* Reject cases where there are implicit -1 blocks above HIGH. */
2129 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2130 return -1;
2131
2132 /* Set CRUX to the index of the entry that should be nonzero.
2133 If the top block is zero then the next lowest block (if any)
2134 must have the high bit set. */
2135 unsigned int crux = x.len - 1;
2136 if (crux > 0 && x.val[crux] == 0)
2137 crux -= 1;
2138
2139 /* Check that all lower blocks are zero. */
2140 for (unsigned int i = 0; i < crux; ++i)
2141 if (x.val[i] != 0)
2142 return -1;
2143
2144 /* Get a zero-extended form of block CRUX. */
2145 unsigned HOST_WIDE_INT hwi = x.val[crux];
2146 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2147 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2148
2149 /* Now it's down to whether HWI is a power of 2. */
2150 int res = ::exact_log2 (hwi);
2151 if (res >= 0)
2152 res += crux * HOST_BITS_PER_WIDE_INT;
2153 return res;
2154 }
2155
2156 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2157 int
floor_log2(const wide_int_ref & x)2158 wi::floor_log2 (const wide_int_ref &x)
2159 {
2160 return x.precision - 1 - clz (x);
2161 }
2162
2163 /* Return the index of the first (lowest) set bit in X, counting from 1.
2164 Return 0 if X is 0. */
2165 int
ffs(const wide_int_ref & x)2166 wi::ffs (const wide_int_ref &x)
2167 {
2168 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2169 }
2170
2171 /* Return true if sign-extending X to have precision PRECISION would give
2172 the minimum signed value at that precision. */
2173 bool
only_sign_bit_p(const wide_int_ref & x,unsigned int precision)2174 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2175 {
2176 return ctz (x) + 1 == int (precision);
2177 }
2178
2179 /* Return true if X represents the minimum signed value. */
2180 bool
only_sign_bit_p(const wide_int_ref & x)2181 wi::only_sign_bit_p (const wide_int_ref &x)
2182 {
2183 return only_sign_bit_p (x, x.precision);
2184 }
2185
2186 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2187 down to the previous value that has no bits set outside MASK.
2188 This rounding wraps for signed values if VAL is negative and
2189 the top bit of MASK is clear.
2190
2191 For example, round_down_for_mask (6, 0xf1) would give 1 and
2192 round_down_for_mask (24, 0xf1) would give 17. */
2193
2194 wide_int
round_down_for_mask(const wide_int & val,const wide_int & mask)2195 wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2196 {
2197 /* Get the bits in VAL that are outside the mask. */
2198 wide_int extra_bits = wi::bit_and_not (val, mask);
2199 if (extra_bits == 0)
2200 return val;
2201
2202 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2203 below that bit. */
2204 unsigned int precision = val.get_precision ();
2205 wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2206 false, precision);
2207
2208 /* Clear the bits that aren't in MASK, but ensure that all bits
2209 in MASK below the top cleared bit are set. */
2210 return (val & mask) | (mask & lower_mask);
2211 }
2212
2213 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2214 up to the next value that has no bits set outside MASK. The rounding
2215 wraps if there are no suitable values greater than VAL.
2216
2217 For example, round_up_for_mask (6, 0xf1) would give 16 and
2218 round_up_for_mask (24, 0xf1) would give 32. */
2219
2220 wide_int
round_up_for_mask(const wide_int & val,const wide_int & mask)2221 wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2222 {
2223 /* Get the bits in VAL that are outside the mask. */
2224 wide_int extra_bits = wi::bit_and_not (val, mask);
2225 if (extra_bits == 0)
2226 return val;
2227
2228 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2229 unsigned int precision = val.get_precision ();
2230 wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2231 true, precision);
2232
2233 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2234 upper_mask &= mask;
2235
2236 /* Conceptually we need to:
2237
2238 - clear bits of VAL outside UPPER_MASK
2239 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2240 - propagate the carry through the bits of VAL in UPPER_MASK
2241
2242 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2243 reaches that bit and the process leaves all lower bits clear.
2244 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2245 wide_int tmp = wi::bit_and_not (upper_mask, val);
2246
2247 return (val | tmp) & -tmp;
2248 }
2249
2250 /*
2251 * Private utilities.
2252 */
2253
gt_ggc_mx(widest_int *)2254 void gt_ggc_mx (widest_int *) { }
gt_pch_nx(widest_int *,void (*)(void *,void *),void *)2255 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
gt_pch_nx(widest_int *)2256 void gt_pch_nx (widest_int *) { }
2257
2258 template void wide_int::dump () const;
2259 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2260 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2261 template void offset_int::dump () const;
2262 template void widest_int::dump () const;
2263
2264 /* We could add all the above ::dump variants here, but wide_int and
2265 widest_int should handle the common cases. Besides, you can always
2266 call the dump method directly. */
2267
2268 DEBUG_FUNCTION void
debug(const wide_int & ref)2269 debug (const wide_int &ref)
2270 {
2271 ref.dump ();
2272 }
2273
2274 DEBUG_FUNCTION void
debug(const wide_int * ptr)2275 debug (const wide_int *ptr)
2276 {
2277 if (ptr)
2278 debug (*ptr);
2279 else
2280 fprintf (stderr, "<nil>\n");
2281 }
2282
2283 DEBUG_FUNCTION void
debug(const widest_int & ref)2284 debug (const widest_int &ref)
2285 {
2286 ref.dump ();
2287 }
2288
2289 DEBUG_FUNCTION void
debug(const widest_int * ptr)2290 debug (const widest_int *ptr)
2291 {
2292 if (ptr)
2293 debug (*ptr);
2294 else
2295 fprintf (stderr, "<nil>\n");
2296 }
2297
2298 #if CHECKING_P
2299
2300 namespace selftest {
2301
2302 /* Selftests for wide ints. We run these multiple times, once per type. */
2303
2304 /* Helper function for building a test value. */
2305
2306 template <class VALUE_TYPE>
2307 static VALUE_TYPE
2308 from_int (int i);
2309
2310 /* Specializations of the fixture for each wide-int type. */
2311
2312 /* Specialization for VALUE_TYPE == wide_int. */
2313
2314 template <>
2315 wide_int
from_int(int i)2316 from_int (int i)
2317 {
2318 return wi::shwi (i, 32);
2319 }
2320
2321 /* Specialization for VALUE_TYPE == offset_int. */
2322
2323 template <>
2324 offset_int
from_int(int i)2325 from_int (int i)
2326 {
2327 return offset_int (i);
2328 }
2329
2330 /* Specialization for VALUE_TYPE == widest_int. */
2331
2332 template <>
2333 widest_int
from_int(int i)2334 from_int (int i)
2335 {
2336 return widest_int (i);
2337 }
2338
2339 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2340 representation (using base 10). */
2341
2342 static void
assert_deceq(const char * expected,const wide_int_ref & wi,signop sgn)2343 assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2344 {
2345 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2346 print_dec (wi, buf, sgn);
2347 ASSERT_STREQ (expected, buf);
2348 }
2349
2350 /* Likewise for base 16. */
2351
2352 static void
assert_hexeq(const char * expected,const wide_int_ref & wi)2353 assert_hexeq (const char *expected, const wide_int_ref &wi)
2354 {
2355 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2356 print_hex (wi, buf);
2357 ASSERT_STREQ (expected, buf);
2358 }
2359
2360 /* Test cases. */
2361
2362 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2363
2364 template <class VALUE_TYPE>
2365 static void
test_printing()2366 test_printing ()
2367 {
2368 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2369 assert_deceq ("42", a, SIGNED);
2370 assert_hexeq ("0x2a", a);
2371 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2372 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2373 assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2374 if (WIDE_INT_MAX_PRECISION > 128)
2375 {
2376 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2377 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2378 assert_hexeq ("0x200000000000004000123456789abcdef",
2379 wi::lshift (1, 129) + wi::lshift (1, 74)
2380 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2381 }
2382 }
2383
2384 /* Verify that various operations work correctly for VALUE_TYPE,
2385 unary and binary, using both function syntax, and
2386 overloaded-operators. */
2387
2388 template <class VALUE_TYPE>
2389 static void
test_ops()2390 test_ops ()
2391 {
2392 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2393 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2394
2395 /* Using functions. */
2396 assert_deceq ("-7", wi::neg (a), SIGNED);
2397 assert_deceq ("10", wi::add (a, b), SIGNED);
2398 assert_deceq ("4", wi::sub (a, b), SIGNED);
2399 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2400 assert_deceq ("21", wi::mul (a, b), SIGNED);
2401
2402 /* Using operators. */
2403 assert_deceq ("-7", -a, SIGNED);
2404 assert_deceq ("10", a + b, SIGNED);
2405 assert_deceq ("4", a - b, SIGNED);
2406 assert_deceq ("-4", b - a, SIGNED);
2407 assert_deceq ("21", a * b, SIGNED);
2408 }
2409
2410 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2411
2412 template <class VALUE_TYPE>
2413 static void
test_comparisons()2414 test_comparisons ()
2415 {
2416 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2417 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2418
2419 /* == */
2420 ASSERT_TRUE (wi::eq_p (a, a));
2421 ASSERT_FALSE (wi::eq_p (a, b));
2422
2423 /* != */
2424 ASSERT_TRUE (wi::ne_p (a, b));
2425 ASSERT_FALSE (wi::ne_p (a, a));
2426
2427 /* < */
2428 ASSERT_FALSE (wi::lts_p (a, a));
2429 ASSERT_FALSE (wi::lts_p (a, b));
2430 ASSERT_TRUE (wi::lts_p (b, a));
2431
2432 /* <= */
2433 ASSERT_TRUE (wi::les_p (a, a));
2434 ASSERT_FALSE (wi::les_p (a, b));
2435 ASSERT_TRUE (wi::les_p (b, a));
2436
2437 /* > */
2438 ASSERT_FALSE (wi::gts_p (a, a));
2439 ASSERT_TRUE (wi::gts_p (a, b));
2440 ASSERT_FALSE (wi::gts_p (b, a));
2441
2442 /* >= */
2443 ASSERT_TRUE (wi::ges_p (a, a));
2444 ASSERT_TRUE (wi::ges_p (a, b));
2445 ASSERT_FALSE (wi::ges_p (b, a));
2446
2447 /* comparison */
2448 ASSERT_EQ (-1, wi::cmps (b, a));
2449 ASSERT_EQ (0, wi::cmps (a, a));
2450 ASSERT_EQ (1, wi::cmps (a, b));
2451 }
2452
2453 /* Run all of the selftests, using the given VALUE_TYPE. */
2454
2455 template <class VALUE_TYPE>
run_all_wide_int_tests()2456 static void run_all_wide_int_tests ()
2457 {
2458 test_printing <VALUE_TYPE> ();
2459 test_ops <VALUE_TYPE> ();
2460 test_comparisons <VALUE_TYPE> ();
2461 }
2462
2463 /* Test overflow conditions. */
2464
2465 static void
test_overflow()2466 test_overflow ()
2467 {
2468 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2469 static int offsets[] = { 16, 1, 0 };
2470 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2471 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2472 {
2473 int prec = precs[i];
2474 int offset = offsets[j];
2475 wi::overflow_type overflow;
2476 wide_int sum, diff;
2477
2478 sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2479 UNSIGNED, &overflow);
2480 ASSERT_EQ (sum, -offset);
2481 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2482
2483 sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2484 UNSIGNED, &overflow);
2485 ASSERT_EQ (sum, -offset);
2486 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2487
2488 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2489 wi::max_value (prec, UNSIGNED),
2490 UNSIGNED, &overflow);
2491 ASSERT_EQ (diff, -offset);
2492 ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2493
2494 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2495 wi::max_value (prec, UNSIGNED) - 1,
2496 UNSIGNED, &overflow);
2497 ASSERT_EQ (diff, 1 - offset);
2498 ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2499 }
2500 }
2501
2502 /* Test the round_{down,up}_for_mask functions. */
2503
2504 static void
test_round_for_mask()2505 test_round_for_mask ()
2506 {
2507 unsigned int prec = 18;
2508 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2509 wi::shwi (0xf1, prec)));
2510 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2511 wi::shwi (0xf1, prec)));
2512
2513 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2514 wi::shwi (0xf1, prec)));
2515 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2516 wi::shwi (0xf1, prec)));
2517
2518 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2519 wi::shwi (0xf1, prec)));
2520 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2521 wi::shwi (0xf1, prec)));
2522
2523 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2524 wi::shwi (0x111, prec)));
2525 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2526 wi::shwi (0x111, prec)));
2527
2528 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2529 wi::shwi (0xfc, prec)));
2530 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2531 wi::shwi (0xfc, prec)));
2532
2533 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2534 wi::shwi (0xabc, prec)));
2535 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2536 wi::shwi (0xabc, prec)));
2537
2538 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2539 wi::shwi (0xabc, prec)));
2540 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2541 wi::shwi (0xabc, prec)));
2542
2543 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2544 wi::shwi (0xabc, prec)));
2545 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2546 wi::shwi (0xabc, prec)));
2547 }
2548
2549 /* Run all of the selftests within this file, for all value types. */
2550
2551 void
wide_int_cc_tests()2552 wide_int_cc_tests ()
2553 {
2554 run_all_wide_int_tests <wide_int> ();
2555 run_all_wide_int_tests <offset_int> ();
2556 run_all_wide_int_tests <widest_int> ();
2557 test_overflow ();
2558 test_round_for_mask ();
2559 ASSERT_EQ (wi::mask (128, false, 128),
2560 wi::shifted_mask (0, 128, false, 128));
2561 ASSERT_EQ (wi::mask (128, true, 128),
2562 wi::shifted_mask (0, 128, true, 128));
2563 }
2564
2565 } // namespace selftest
2566 #endif /* CHECKING_P */
2567