1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "flags.h"
32 #include "convert.h"
33 #include "toplev.h"
34 #include "langhooks.h"
35 #include "real.h"
36
37 /* Convert EXPR to some pointer or reference type TYPE.
38 EXPR must be pointer, reference, integer, enumeral, or literal zero;
39 in other cases error is called. */
40
41 tree
convert_to_pointer(tree type,tree expr)42 convert_to_pointer (tree type, tree expr)
43 {
44 if (TREE_TYPE (expr) == type)
45 return expr;
46
47 if (integer_zerop (expr))
48 {
49 tree t = build_int_cst (type, 0);
50 if (TREE_OVERFLOW (expr) || TREE_CONSTANT_OVERFLOW (expr))
51 t = force_fit_type (t, 0, TREE_OVERFLOW (expr),
52 TREE_CONSTANT_OVERFLOW (expr));
53 return t;
54 }
55
56 switch (TREE_CODE (TREE_TYPE (expr)))
57 {
58 case POINTER_TYPE:
59 case REFERENCE_TYPE:
60 return fold_build1 (NOP_EXPR, type, expr);
61
62 case INTEGER_TYPE:
63 case ENUMERAL_TYPE:
64 case BOOLEAN_TYPE:
65 if (TYPE_PRECISION (TREE_TYPE (expr)) != POINTER_SIZE)
66 expr = fold_build1 (NOP_EXPR,
67 lang_hooks.types.type_for_size (POINTER_SIZE, 0),
68 expr);
69 return fold_build1 (CONVERT_EXPR, type, expr);
70
71
72 default:
73 error ("cannot convert to a pointer type");
74 return convert_to_pointer (type, integer_zero_node);
75 }
76 }
77
78 /* Avoid any floating point extensions from EXP. */
79 tree
strip_float_extensions(tree exp)80 strip_float_extensions (tree exp)
81 {
82 tree sub, expt, subt;
83
84 /* For floating point constant look up the narrowest type that can hold
85 it properly and handle it like (type)(narrowest_type)constant.
86 This way we can optimize for instance a=a*2.0 where "a" is float
87 but 2.0 is double constant. */
88 if (TREE_CODE (exp) == REAL_CST)
89 {
90 REAL_VALUE_TYPE orig;
91 tree type = NULL;
92
93 orig = TREE_REAL_CST (exp);
94 if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (float_type_node)
95 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
96 type = float_type_node;
97 else if (TYPE_PRECISION (TREE_TYPE (exp))
98 > TYPE_PRECISION (double_type_node)
99 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
100 type = double_type_node;
101 if (type)
102 return build_real (type, real_value_truncate (TYPE_MODE (type), orig));
103 }
104
105 if (TREE_CODE (exp) != NOP_EXPR
106 && TREE_CODE (exp) != CONVERT_EXPR)
107 return exp;
108
109 sub = TREE_OPERAND (exp, 0);
110 subt = TREE_TYPE (sub);
111 expt = TREE_TYPE (exp);
112
113 if (!FLOAT_TYPE_P (subt))
114 return exp;
115
116 if (TYPE_PRECISION (subt) > TYPE_PRECISION (expt))
117 return exp;
118
119 return strip_float_extensions (sub);
120 }
121
122
123 /* Convert EXPR to some floating-point type TYPE.
124
125 EXPR must be float, integer, or enumeral;
126 in other cases error is called. */
127
128 tree
convert_to_real(tree type,tree expr)129 convert_to_real (tree type, tree expr)
130 {
131 enum built_in_function fcode = builtin_mathfn_code (expr);
132 tree itype = TREE_TYPE (expr);
133
134 /* Disable until we figure out how to decide whether the functions are
135 present in runtime. */
136 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
137 if (optimize
138 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
139 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
140 {
141 switch (fcode)
142 {
143 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
144 CASE_MATHFN (ACOS)
145 CASE_MATHFN (ACOSH)
146 CASE_MATHFN (ASIN)
147 CASE_MATHFN (ASINH)
148 CASE_MATHFN (ATAN)
149 CASE_MATHFN (ATANH)
150 CASE_MATHFN (CBRT)
151 CASE_MATHFN (COS)
152 CASE_MATHFN (COSH)
153 CASE_MATHFN (ERF)
154 CASE_MATHFN (ERFC)
155 CASE_MATHFN (EXP)
156 CASE_MATHFN (EXP10)
157 CASE_MATHFN (EXP2)
158 CASE_MATHFN (EXPM1)
159 CASE_MATHFN (FABS)
160 CASE_MATHFN (GAMMA)
161 CASE_MATHFN (J0)
162 CASE_MATHFN (J1)
163 CASE_MATHFN (LGAMMA)
164 CASE_MATHFN (LOG)
165 CASE_MATHFN (LOG10)
166 CASE_MATHFN (LOG1P)
167 CASE_MATHFN (LOG2)
168 CASE_MATHFN (LOGB)
169 CASE_MATHFN (POW10)
170 CASE_MATHFN (SIN)
171 CASE_MATHFN (SINH)
172 CASE_MATHFN (SQRT)
173 CASE_MATHFN (TAN)
174 CASE_MATHFN (TANH)
175 CASE_MATHFN (TGAMMA)
176 CASE_MATHFN (Y0)
177 CASE_MATHFN (Y1)
178 #undef CASE_MATHFN
179 {
180 tree arg0 = strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr, 1)));
181 tree newtype = type;
182
183 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
184 the both as the safe type for operation. */
185 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
186 newtype = TREE_TYPE (arg0);
187
188 /* Be careful about integer to fp conversions.
189 These may overflow still. */
190 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
191 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
192 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
193 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
194 {
195 tree arglist;
196 tree fn = mathfn_built_in (newtype, fcode);
197
198 if (fn)
199 {
200 arglist = build_tree_list (NULL_TREE, fold (convert_to_real (newtype, arg0)));
201 expr = build_function_call_expr (fn, arglist);
202 if (newtype == type)
203 return expr;
204 }
205 }
206 }
207 default:
208 break;
209 }
210 }
211 if (optimize
212 && (((fcode == BUILT_IN_FLOORL
213 || fcode == BUILT_IN_CEILL
214 || fcode == BUILT_IN_ROUNDL
215 || fcode == BUILT_IN_RINTL
216 || fcode == BUILT_IN_TRUNCL
217 || fcode == BUILT_IN_NEARBYINTL)
218 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
219 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
220 || ((fcode == BUILT_IN_FLOOR
221 || fcode == BUILT_IN_CEIL
222 || fcode == BUILT_IN_ROUND
223 || fcode == BUILT_IN_RINT
224 || fcode == BUILT_IN_TRUNC
225 || fcode == BUILT_IN_NEARBYINT)
226 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
227 {
228 tree fn = mathfn_built_in (type, fcode);
229
230 if (fn)
231 {
232 tree arg
233 = strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr, 1)));
234
235 /* Make sure (type)arg0 is an extension, otherwise we could end up
236 changing (float)floor(double d) into floorf((float)d), which is
237 incorrect because (float)d uses round-to-nearest and can round
238 up to the next integer. */
239 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
240 return
241 build_function_call_expr (fn,
242 build_tree_list (NULL_TREE,
243 fold (convert_to_real (type, arg))));
244 }
245 }
246
247 /* Propagate the cast into the operation. */
248 if (itype != type && FLOAT_TYPE_P (type))
249 switch (TREE_CODE (expr))
250 {
251 /* Convert (float)-x into -(float)x. This is safe for
252 round-to-nearest rounding mode. */
253 case ABS_EXPR:
254 case NEGATE_EXPR:
255 if (!flag_rounding_math
256 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr)))
257 return build1 (TREE_CODE (expr), type,
258 fold (convert_to_real (type,
259 TREE_OPERAND (expr, 0))));
260 break;
261 /* Convert (outertype)((innertype0)a+(innertype1)b)
262 into ((newtype)a+(newtype)b) where newtype
263 is the widest mode from all of these. */
264 case PLUS_EXPR:
265 case MINUS_EXPR:
266 case MULT_EXPR:
267 case RDIV_EXPR:
268 {
269 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
270 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
271
272 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
273 && FLOAT_TYPE_P (TREE_TYPE (arg1)))
274 {
275 tree newtype = type;
276
277 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
278 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode)
279 newtype = dfloat32_type_node;
280 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
281 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode)
282 newtype = dfloat64_type_node;
283 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
284 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode)
285 newtype = dfloat128_type_node;
286 if (newtype == dfloat32_type_node
287 || newtype == dfloat64_type_node
288 || newtype == dfloat128_type_node)
289 {
290 expr = build2 (TREE_CODE (expr), newtype,
291 fold (convert_to_real (newtype, arg0)),
292 fold (convert_to_real (newtype, arg1)));
293 if (newtype == type)
294 return expr;
295 break;
296 }
297
298 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
299 newtype = TREE_TYPE (arg0);
300 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
301 newtype = TREE_TYPE (arg1);
302 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype))
303 {
304 expr = build2 (TREE_CODE (expr), newtype,
305 fold (convert_to_real (newtype, arg0)),
306 fold (convert_to_real (newtype, arg1)));
307 if (newtype == type)
308 return expr;
309 }
310 }
311 }
312 break;
313 default:
314 break;
315 }
316
317 switch (TREE_CODE (TREE_TYPE (expr)))
318 {
319 case REAL_TYPE:
320 /* Ignore the conversion if we don't need to store intermediate
321 results and neither type is a decimal float. */
322 return build1 ((flag_float_store
323 || DECIMAL_FLOAT_TYPE_P (type)
324 || DECIMAL_FLOAT_TYPE_P (itype))
325 ? CONVERT_EXPR : NOP_EXPR, type, expr);
326
327 case INTEGER_TYPE:
328 case ENUMERAL_TYPE:
329 case BOOLEAN_TYPE:
330 return build1 (FLOAT_EXPR, type, expr);
331
332 case COMPLEX_TYPE:
333 return convert (type,
334 fold_build1 (REALPART_EXPR,
335 TREE_TYPE (TREE_TYPE (expr)), expr));
336
337 case POINTER_TYPE:
338 case REFERENCE_TYPE:
339 error ("pointer value used where a floating point value was expected");
340 return convert_to_real (type, integer_zero_node);
341
342 default:
343 error ("aggregate value used where a float was expected");
344 return convert_to_real (type, integer_zero_node);
345 }
346 }
347
348 /* Convert EXPR to some integer (or enum) type TYPE.
349
350 EXPR must be pointer, integer, discrete (enum, char, or bool), float, or
351 vector; in other cases error is called.
352
353 The result of this is always supposed to be a newly created tree node
354 not in use in any existing structure. */
355
356 tree
convert_to_integer(tree type,tree expr)357 convert_to_integer (tree type, tree expr)
358 {
359 enum tree_code ex_form = TREE_CODE (expr);
360 tree intype = TREE_TYPE (expr);
361 unsigned int inprec = TYPE_PRECISION (intype);
362 unsigned int outprec = TYPE_PRECISION (type);
363
364 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
365 be. Consider `enum E = { a, b = (enum E) 3 };'. */
366 if (!COMPLETE_TYPE_P (type))
367 {
368 error ("conversion to incomplete type");
369 return error_mark_node;
370 }
371
372 /* Convert e.g. (long)round(d) -> lround(d). */
373 /* If we're converting to char, we may encounter differing behavior
374 between converting from double->char vs double->long->char.
375 We're in "undefined" territory but we prefer to be conservative,
376 so only proceed in "unsafe" math mode. */
377 if (optimize
378 && (flag_unsafe_math_optimizations
379 || (long_integer_type_node
380 && outprec >= TYPE_PRECISION (long_integer_type_node))))
381 {
382 tree s_expr = strip_float_extensions (expr);
383 tree s_intype = TREE_TYPE (s_expr);
384 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
385 tree fn = 0;
386
387 switch (fcode)
388 {
389 CASE_FLT_FN (BUILT_IN_CEIL):
390 /* Only convert in ISO C99 mode. */
391 if (!TARGET_C99_FUNCTIONS)
392 break;
393 if (outprec < TYPE_PRECISION (long_integer_type_node)
394 || (outprec == TYPE_PRECISION (long_integer_type_node)
395 && !TYPE_UNSIGNED (type)))
396 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
397 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
398 && !TYPE_UNSIGNED (type))
399 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
400 break;
401
402 CASE_FLT_FN (BUILT_IN_FLOOR):
403 /* Only convert in ISO C99 mode. */
404 if (!TARGET_C99_FUNCTIONS)
405 break;
406 if (outprec < TYPE_PRECISION (long_integer_type_node)
407 || (outprec == TYPE_PRECISION (long_integer_type_node)
408 && !TYPE_UNSIGNED (type)))
409 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
410 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
411 && !TYPE_UNSIGNED (type))
412 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
413 break;
414
415 CASE_FLT_FN (BUILT_IN_ROUND):
416 if (outprec < TYPE_PRECISION (long_integer_type_node)
417 || (outprec == TYPE_PRECISION (long_integer_type_node)
418 && !TYPE_UNSIGNED (type)))
419 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
420 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
421 && !TYPE_UNSIGNED (type))
422 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
423 break;
424
425 CASE_FLT_FN (BUILT_IN_NEARBYINT):
426 /* Only convert nearbyint* if we can ignore math exceptions. */
427 if (flag_trapping_math)
428 break;
429 /* ... Fall through ... */
430 CASE_FLT_FN (BUILT_IN_RINT):
431 if (outprec < TYPE_PRECISION (long_integer_type_node)
432 || (outprec == TYPE_PRECISION (long_integer_type_node)
433 && !TYPE_UNSIGNED (type)))
434 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
435 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
436 && !TYPE_UNSIGNED (type))
437 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
438 break;
439
440 CASE_FLT_FN (BUILT_IN_TRUNC):
441 {
442 tree arglist = TREE_OPERAND (s_expr, 1);
443 return convert_to_integer (type, TREE_VALUE (arglist));
444 }
445
446 default:
447 break;
448 }
449
450 if (fn)
451 {
452 tree arglist = TREE_OPERAND (s_expr, 1);
453 tree newexpr = build_function_call_expr (fn, arglist);
454 return convert_to_integer (type, newexpr);
455 }
456 }
457
458 switch (TREE_CODE (intype))
459 {
460 case POINTER_TYPE:
461 case REFERENCE_TYPE:
462 if (integer_zerop (expr))
463 return build_int_cst (type, 0);
464
465 /* Convert to an unsigned integer of the correct width first,
466 and from there widen/truncate to the required type. */
467 expr = fold_build1 (CONVERT_EXPR,
468 lang_hooks.types.type_for_size (POINTER_SIZE, 0),
469 expr);
470 return fold_convert (type, expr);
471
472 case INTEGER_TYPE:
473 case ENUMERAL_TYPE:
474 case BOOLEAN_TYPE:
475 /* If this is a logical operation, which just returns 0 or 1, we can
476 change the type of the expression. */
477
478 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
479 {
480 expr = copy_node (expr);
481 TREE_TYPE (expr) = type;
482 return expr;
483 }
484
485 /* If we are widening the type, put in an explicit conversion.
486 Similarly if we are not changing the width. After this, we know
487 we are truncating EXPR. */
488
489 else if (outprec >= inprec)
490 {
491 enum tree_code code;
492 tree tem;
493
494 /* If the precision of the EXPR's type is K bits and the
495 destination mode has more bits, and the sign is changing,
496 it is not safe to use a NOP_EXPR. For example, suppose
497 that EXPR's type is a 3-bit unsigned integer type, the
498 TYPE is a 3-bit signed integer type, and the machine mode
499 for the types is 8-bit QImode. In that case, the
500 conversion necessitates an explicit sign-extension. In
501 the signed-to-unsigned case the high-order bits have to
502 be cleared. */
503 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
504 && (TYPE_PRECISION (TREE_TYPE (expr))
505 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr)))))
506 code = CONVERT_EXPR;
507 else
508 code = NOP_EXPR;
509
510 tem = fold_unary (code, type, expr);
511 if (tem)
512 return tem;
513
514 tem = build1 (code, type, expr);
515 TREE_NO_WARNING (tem) = 1;
516 return tem;
517 }
518
519 /* If TYPE is an enumeral type or a type with a precision less
520 than the number of bits in its mode, do the conversion to the
521 type corresponding to its mode, then do a nop conversion
522 to TYPE. */
523 else if (TREE_CODE (type) == ENUMERAL_TYPE
524 || outprec != GET_MODE_BITSIZE (TYPE_MODE (type)))
525 return build1 (NOP_EXPR, type,
526 convert (lang_hooks.types.type_for_mode
527 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
528 expr));
529
530 /* Here detect when we can distribute the truncation down past some
531 arithmetic. For example, if adding two longs and converting to an
532 int, we can equally well convert both to ints and then add.
533 For the operations handled here, such truncation distribution
534 is always safe.
535 It is desirable in these cases:
536 1) when truncating down to full-word from a larger size
537 2) when truncating takes no work.
538 3) when at least one operand of the arithmetic has been extended
539 (as by C's default conversions). In this case we need two conversions
540 if we do the arithmetic as already requested, so we might as well
541 truncate both and then combine. Perhaps that way we need only one.
542
543 Note that in general we cannot do the arithmetic in a type
544 shorter than the desired result of conversion, even if the operands
545 are both extended from a shorter type, because they might overflow
546 if combined in that type. The exceptions to this--the times when
547 two narrow values can be combined in their narrow type even to
548 make a wider result--are handled by "shorten" in build_binary_op. */
549
550 switch (ex_form)
551 {
552 case RSHIFT_EXPR:
553 /* We can pass truncation down through right shifting
554 when the shift count is a nonpositive constant. */
555 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
556 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
557 goto trunc1;
558 break;
559
560 case LSHIFT_EXPR:
561 /* We can pass truncation down through left shifting
562 when the shift count is a nonnegative constant and
563 the target type is unsigned. */
564 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
565 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
566 && TYPE_UNSIGNED (type)
567 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
568 {
569 /* If shift count is less than the width of the truncated type,
570 really shift. */
571 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
572 /* In this case, shifting is like multiplication. */
573 goto trunc1;
574 else
575 {
576 /* If it is >= that width, result is zero.
577 Handling this with trunc1 would give the wrong result:
578 (int) ((long long) a << 32) is well defined (as 0)
579 but (int) a << 32 is undefined and would get a
580 warning. */
581
582 tree t = build_int_cst (type, 0);
583
584 /* If the original expression had side-effects, we must
585 preserve it. */
586 if (TREE_SIDE_EFFECTS (expr))
587 return build2 (COMPOUND_EXPR, type, expr, t);
588 else
589 return t;
590 }
591 }
592 break;
593
594 case MAX_EXPR:
595 case MIN_EXPR:
596 case MULT_EXPR:
597 {
598 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
599 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
600
601 /* Don't distribute unless the output precision is at least as big
602 as the actual inputs. Otherwise, the comparison of the
603 truncated values will be wrong. */
604 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
605 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
606 /* If signedness of arg0 and arg1 don't match,
607 we can't necessarily find a type to compare them in. */
608 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
609 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
610 goto trunc1;
611 break;
612 }
613
614 case PLUS_EXPR:
615 case MINUS_EXPR:
616 case BIT_AND_EXPR:
617 case BIT_IOR_EXPR:
618 case BIT_XOR_EXPR:
619 trunc1:
620 {
621 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
622 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
623
624 if (outprec >= BITS_PER_WORD
625 || TRULY_NOOP_TRUNCATION (outprec, inprec)
626 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
627 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
628 {
629 /* Do the arithmetic in type TYPEX,
630 then convert result to TYPE. */
631 tree typex = type;
632
633 /* Can't do arithmetic in enumeral types
634 so use an integer type that will hold the values. */
635 if (TREE_CODE (typex) == ENUMERAL_TYPE)
636 typex = lang_hooks.types.type_for_size
637 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
638
639 /* But now perhaps TYPEX is as wide as INPREC.
640 In that case, do nothing special here.
641 (Otherwise would recurse infinitely in convert. */
642 if (TYPE_PRECISION (typex) != inprec)
643 {
644 /* Don't do unsigned arithmetic where signed was wanted,
645 or vice versa.
646 Exception: if both of the original operands were
647 unsigned then we can safely do the work as unsigned.
648 Exception: shift operations take their type solely
649 from the first argument.
650 Exception: the LSHIFT_EXPR case above requires that
651 we perform this operation unsigned lest we produce
652 signed-overflow undefinedness.
653 And we may need to do it as unsigned
654 if we truncate to the original size. */
655 if (TYPE_UNSIGNED (TREE_TYPE (expr))
656 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
657 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
658 || ex_form == LSHIFT_EXPR
659 || ex_form == RSHIFT_EXPR
660 || ex_form == LROTATE_EXPR
661 || ex_form == RROTATE_EXPR))
662 || ex_form == LSHIFT_EXPR
663 /* If we have !flag_wrapv, and either ARG0 or
664 ARG1 is of a signed type, we have to do
665 PLUS_EXPR or MINUS_EXPR in an unsigned
666 type. Otherwise, we would introduce
667 signed-overflow undefinedness. */
668 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
669 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
670 && (ex_form == PLUS_EXPR
671 || ex_form == MINUS_EXPR)))
672 typex = lang_hooks.types.unsigned_type (typex);
673 else
674 typex = lang_hooks.types.signed_type (typex);
675 return convert (type,
676 fold_build2 (ex_form, typex,
677 convert (typex, arg0),
678 convert (typex, arg1)));
679 }
680 }
681 }
682 break;
683
684 case NEGATE_EXPR:
685 case BIT_NOT_EXPR:
686 /* This is not correct for ABS_EXPR,
687 since we must test the sign before truncation. */
688 {
689 tree typex;
690
691 /* Don't do unsigned arithmetic where signed was wanted,
692 or vice versa. */
693 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
694 typex = lang_hooks.types.unsigned_type (type);
695 else
696 typex = lang_hooks.types.signed_type (type);
697 return convert (type,
698 fold_build1 (ex_form, typex,
699 convert (typex,
700 TREE_OPERAND (expr, 0))));
701 }
702
703 case NOP_EXPR:
704 /* Don't introduce a
705 "can't convert between vector values of different size" error. */
706 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
707 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
708 != GET_MODE_SIZE (TYPE_MODE (type))))
709 break;
710 /* If truncating after truncating, might as well do all at once.
711 If truncating after extending, we may get rid of wasted work. */
712 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
713
714 case COND_EXPR:
715 /* It is sometimes worthwhile to push the narrowing down through
716 the conditional and never loses. */
717 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
718 convert (type, TREE_OPERAND (expr, 1)),
719 convert (type, TREE_OPERAND (expr, 2)));
720
721 default:
722 break;
723 }
724
725 return build1 (CONVERT_EXPR, type, expr);
726
727 case REAL_TYPE:
728 return build1 (FIX_TRUNC_EXPR, type, expr);
729
730 case COMPLEX_TYPE:
731 return convert (type,
732 fold_build1 (REALPART_EXPR,
733 TREE_TYPE (TREE_TYPE (expr)), expr));
734
735 case VECTOR_TYPE:
736 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
737 {
738 error ("can't convert between vector values of different size");
739 return error_mark_node;
740 }
741 return build1 (VIEW_CONVERT_EXPR, type, expr);
742
743 default:
744 error ("aggregate value used where an integer was expected");
745 return convert (type, integer_zero_node);
746 }
747 }
748
749 /* Convert EXPR to the complex type TYPE in the usual ways. */
750
751 tree
convert_to_complex(tree type,tree expr)752 convert_to_complex (tree type, tree expr)
753 {
754 tree subtype = TREE_TYPE (type);
755
756 switch (TREE_CODE (TREE_TYPE (expr)))
757 {
758 case REAL_TYPE:
759 case INTEGER_TYPE:
760 case ENUMERAL_TYPE:
761 case BOOLEAN_TYPE:
762 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
763 convert (subtype, integer_zero_node));
764
765 case COMPLEX_TYPE:
766 {
767 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
768
769 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
770 return expr;
771 else if (TREE_CODE (expr) == COMPLEX_EXPR)
772 return fold_build2 (COMPLEX_EXPR, type,
773 convert (subtype, TREE_OPERAND (expr, 0)),
774 convert (subtype, TREE_OPERAND (expr, 1)));
775 else
776 {
777 expr = save_expr (expr);
778 return
779 fold_build2 (COMPLEX_EXPR, type,
780 convert (subtype,
781 fold_build1 (REALPART_EXPR,
782 TREE_TYPE (TREE_TYPE (expr)),
783 expr)),
784 convert (subtype,
785 fold_build1 (IMAGPART_EXPR,
786 TREE_TYPE (TREE_TYPE (expr)),
787 expr)));
788 }
789 }
790
791 case POINTER_TYPE:
792 case REFERENCE_TYPE:
793 error ("pointer value used where a complex was expected");
794 return convert_to_complex (type, integer_zero_node);
795
796 default:
797 error ("aggregate value used where a complex was expected");
798 return convert_to_complex (type, integer_zero_node);
799 }
800 }
801
802 /* Convert EXPR to the vector type TYPE in the usual ways. */
803
804 tree
convert_to_vector(tree type,tree expr)805 convert_to_vector (tree type, tree expr)
806 {
807 switch (TREE_CODE (TREE_TYPE (expr)))
808 {
809 case INTEGER_TYPE:
810 case VECTOR_TYPE:
811 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
812 {
813 error ("can't convert between vector values of different size");
814 return error_mark_node;
815 }
816 return build1 (VIEW_CONVERT_EXPR, type, expr);
817
818 default:
819 error ("can't convert value to a vector");
820 return error_mark_node;
821 }
822 }
823