xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/simplify-rtx.c (revision 212397c69a103ae7e5eafa8731ddfae671d2dee7)
1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
38 
39 /* Simplification and canonicalization of RTL.  */
40 
41 /* Much code operates on (low, high) pairs; the low value is an
42    unsigned wide int, the high value a signed wide int.  We
43    occasionally need to sign extend from low to high as if low were a
44    signed wide int.  */
45 #define HWI_SIGN_EXTEND(low) \
46  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
47 
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 				  unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 					   rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 					    enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 					rtx, rtx, rtx, rtx);
61 
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63    maximally negative number can overflow).  */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
66 {
67   return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
68 }
69 
70 /* Test whether expression, X, is an immediate constant that represents
71    the most significant bit of machine mode MODE.  */
72 
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
75 {
76   unsigned HOST_WIDE_INT val;
77   unsigned int width;
78 
79   if (GET_MODE_CLASS (mode) != MODE_INT)
80     return false;
81 
82   width = GET_MODE_PRECISION (mode);
83   if (width == 0)
84     return false;
85 
86   if (width <= HOST_BITS_PER_WIDE_INT
87       && CONST_INT_P (x))
88     val = INTVAL (x);
89   else if (width <= HOST_BITS_PER_DOUBLE_INT
90 	   && CONST_DOUBLE_AS_INT_P (x)
91 	   && CONST_DOUBLE_LOW (x) == 0)
92     {
93       val = CONST_DOUBLE_HIGH (x);
94       width -= HOST_BITS_PER_WIDE_INT;
95     }
96   else
97     /* FIXME: We don't yet have a representation for wider modes.  */
98     return false;
99 
100   if (width < HOST_BITS_PER_WIDE_INT)
101     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
103 }
104 
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106    (after masking with the mode mask of MODE).  Returns false if the
107    precision of MODE is too large to handle.  */
108 
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
111 {
112   unsigned int width;
113 
114   if (GET_MODE_CLASS (mode) != MODE_INT)
115     return false;
116 
117   width = GET_MODE_PRECISION (mode);
118   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119     return false;
120 
121   val &= GET_MODE_MASK (mode);
122   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 }
124 
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126    Returns false if the precision of MODE is too large to handle.  */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
129 {
130   unsigned int width;
131 
132   if (GET_MODE_CLASS (mode) != MODE_INT)
133     return false;
134 
135   width = GET_MODE_PRECISION (mode);
136   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137     return false;
138 
139   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140   return val != 0;
141 }
142 
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144    Returns false if the precision of MODE is too large to handle.  */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
147 {
148   unsigned int width;
149 
150   if (GET_MODE_CLASS (mode) != MODE_INT)
151     return false;
152 
153   width = GET_MODE_PRECISION (mode);
154   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155     return false;
156 
157   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158   return val == 0;
159 }
160 
161 /* Make a binary operation by properly ordering the operands and
162    seeing if the expression folds.  */
163 
164 rtx
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 		     rtx op1)
167 {
168   rtx tem;
169 
170   /* If this simplifies, do it.  */
171   tem = simplify_binary_operation (code, mode, op0, op1);
172   if (tem)
173     return tem;
174 
175   /* Put complex operands first and constants second if commutative.  */
176   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177       && swap_commutative_operands_p (op0, op1))
178     tem = op0, op0 = op1, op1 = tem;
179 
180   return gen_rtx_fmt_ee (code, mode, op0, op1);
181 }
182 
183 /* If X is a MEM referencing the constant pool, return the real value.
184    Otherwise return X.  */
185 rtx
186 avoid_constant_pool_reference (rtx x)
187 {
188   rtx c, tmp, addr;
189   enum machine_mode cmode;
190   HOST_WIDE_INT offset = 0;
191 
192   switch (GET_CODE (x))
193     {
194     case MEM:
195       break;
196 
197     case FLOAT_EXTEND:
198       /* Handle float extensions of constant pool references.  */
199       tmp = XEXP (x, 0);
200       c = avoid_constant_pool_reference (tmp);
201       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
202 	{
203 	  REAL_VALUE_TYPE d;
204 
205 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
207 	}
208       return x;
209 
210     default:
211       return x;
212     }
213 
214   if (GET_MODE (x) == BLKmode)
215     return x;
216 
217   addr = XEXP (x, 0);
218 
219   /* Call target hook to avoid the effects of -fpic etc....  */
220   addr = targetm.delegitimize_address (addr);
221 
222   /* Split the address into a base and integer offset.  */
223   if (GET_CODE (addr) == CONST
224       && GET_CODE (XEXP (addr, 0)) == PLUS
225       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
226     {
227       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228       addr = XEXP (XEXP (addr, 0), 0);
229     }
230 
231   if (GET_CODE (addr) == LO_SUM)
232     addr = XEXP (addr, 1);
233 
234   /* If this is a constant pool reference, we can turn it into its
235      constant and hope that simplifications happen.  */
236   if (GET_CODE (addr) == SYMBOL_REF
237       && CONSTANT_POOL_ADDRESS_P (addr))
238     {
239       c = get_pool_constant (addr);
240       cmode = get_pool_mode (addr);
241 
242       /* If we're accessing the constant in a different mode than it was
243          originally stored, attempt to fix that up via subreg simplifications.
244          If that fails we have no choice but to return the original memory.  */
245       if ((offset != 0 || cmode != GET_MODE (x))
246 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
247         {
248           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249           if (tem && CONSTANT_P (tem))
250             return tem;
251         }
252       else
253         return c;
254     }
255 
256   return x;
257 }
258 
259 /* Simplify a MEM based on its attributes.  This is the default
260    delegitimize_address target hook, and it's recommended that every
261    overrider call it.  */
262 
263 rtx
264 delegitimize_mem_from_attrs (rtx x)
265 {
266   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267      use their base addresses as equivalent.  */
268   if (MEM_P (x)
269       && MEM_EXPR (x)
270       && MEM_OFFSET_KNOWN_P (x))
271     {
272       tree decl = MEM_EXPR (x);
273       enum machine_mode mode = GET_MODE (x);
274       HOST_WIDE_INT offset = 0;
275 
276       switch (TREE_CODE (decl))
277 	{
278 	default:
279 	  decl = NULL;
280 	  break;
281 
282 	case VAR_DECL:
283 	  break;
284 
285 	case ARRAY_REF:
286 	case ARRAY_RANGE_REF:
287 	case COMPONENT_REF:
288 	case BIT_FIELD_REF:
289 	case REALPART_EXPR:
290 	case IMAGPART_EXPR:
291 	case VIEW_CONVERT_EXPR:
292 	  {
293 	    HOST_WIDE_INT bitsize, bitpos;
294 	    tree toffset;
295 	    int unsignedp, volatilep = 0;
296 
297 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 					&mode, &unsignedp, &volatilep, false);
299 	    if (bitsize != GET_MODE_BITSIZE (mode)
300 		|| (bitpos % BITS_PER_UNIT)
301 		|| (toffset && !host_integerp (toffset, 0)))
302 	      decl = NULL;
303 	    else
304 	      {
305 		offset += bitpos / BITS_PER_UNIT;
306 		if (toffset)
307 		  offset += TREE_INT_CST_LOW (toffset);
308 	      }
309 	    break;
310 	  }
311 	}
312 
313       if (decl
314 	  && mode == GET_MODE (x)
315 	  && TREE_CODE (decl) == VAR_DECL
316 	  && (TREE_STATIC (decl)
317 	      || DECL_THREAD_LOCAL_P (decl))
318 	  && DECL_RTL_SET_P (decl)
319 	  && MEM_P (DECL_RTL (decl)))
320 	{
321 	  rtx newx;
322 
323 	  offset += MEM_OFFSET (x);
324 
325 	  newx = DECL_RTL (decl);
326 
327 	  if (MEM_P (newx))
328 	    {
329 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
330 
331 	      /* Avoid creating a new MEM needlessly if we already had
332 		 the same address.  We do if there's no OFFSET and the
333 		 old address X is identical to NEWX, or if X is of the
334 		 form (plus NEWX OFFSET), or the NEWX is of the form
335 		 (plus Y (const_int Z)) and X is that with the offset
336 		 added: (plus Y (const_int Z+OFFSET)).  */
337 	      if (!((offset == 0
338 		     || (GET_CODE (o) == PLUS
339 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 			 && (offset == INTVAL (XEXP (o, 1))
341 			     || (GET_CODE (n) == PLUS
342 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 				 && (INTVAL (XEXP (n, 1)) + offset
344 				     == INTVAL (XEXP (o, 1)))
345 				 && (n = XEXP (n, 0))))
346 			 && (o = XEXP (o, 0))))
347 		    && rtx_equal_p (o, n)))
348 		x = adjust_address_nv (newx, mode, offset);
349 	    }
350 	  else if (GET_MODE (x) == GET_MODE (newx)
351 		   && offset == 0)
352 	    x = newx;
353 	}
354     }
355 
356   return x;
357 }
358 
359 /* Make a unary operation by first seeing if it folds and otherwise making
360    the specified operation.  */
361 
362 rtx
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 		    enum machine_mode op_mode)
365 {
366   rtx tem;
367 
368   /* If this simplifies, use it.  */
369   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370     return tem;
371 
372   return gen_rtx_fmt_e (code, mode, op);
373 }
374 
375 /* Likewise for ternary operations.  */
376 
377 rtx
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
380 {
381   rtx tem;
382 
383   /* If this simplifies, use it.  */
384   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 					      op0, op1, op2)))
386     return tem;
387 
388   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
389 }
390 
391 /* Likewise, for relational operations.
392    CMP_MODE specifies mode comparison is done in.  */
393 
394 rtx
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
397 {
398   rtx tem;
399 
400   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 						 op0, op1)))
402     return tem;
403 
404   return gen_rtx_fmt_ee (code, mode, op0, op1);
405 }
406 
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408    and simplify the result.  If FN is non-NULL, call this callback on each
409    X, if it returns non-NULL, replace X with its return value and simplify the
410    result.  */
411 
412 rtx
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
415 {
416   enum rtx_code code = GET_CODE (x);
417   enum machine_mode mode = GET_MODE (x);
418   enum machine_mode op_mode;
419   const char *fmt;
420   rtx op0, op1, op2, newx, op;
421   rtvec vec, newvec;
422   int i, j;
423 
424   if (__builtin_expect (fn != NULL, 0))
425     {
426       newx = fn (x, old_rtx, data);
427       if (newx)
428 	return newx;
429     }
430   else if (rtx_equal_p (x, old_rtx))
431     return copy_rtx ((rtx) data);
432 
433   switch (GET_RTX_CLASS (code))
434     {
435     case RTX_UNARY:
436       op0 = XEXP (x, 0);
437       op_mode = GET_MODE (op0);
438       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439       if (op0 == XEXP (x, 0))
440 	return x;
441       return simplify_gen_unary (code, mode, op0, op_mode);
442 
443     case RTX_BIN_ARITH:
444     case RTX_COMM_ARITH:
445       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 	return x;
449       return simplify_gen_binary (code, mode, op0, op1);
450 
451     case RTX_COMPARE:
452     case RTX_COMM_COMPARE:
453       op0 = XEXP (x, 0);
454       op1 = XEXP (x, 1);
455       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 	return x;
460       return simplify_gen_relational (code, mode, op_mode, op0, op1);
461 
462     case RTX_TERNARY:
463     case RTX_BITFIELD_OPS:
464       op0 = XEXP (x, 0);
465       op_mode = GET_MODE (op0);
466       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 	return x;
471       if (op_mode == VOIDmode)
472 	op_mode = GET_MODE (op0);
473       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
474 
475     case RTX_EXTRA:
476       if (code == SUBREG)
477 	{
478 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 	  if (op0 == SUBREG_REG (x))
480 	    return x;
481 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 				     GET_MODE (SUBREG_REG (x)),
483 				     SUBREG_BYTE (x));
484 	  return op0 ? op0 : x;
485 	}
486       break;
487 
488     case RTX_OBJ:
489       if (code == MEM)
490 	{
491 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 	  if (op0 == XEXP (x, 0))
493 	    return x;
494 	  return replace_equiv_address_nv (x, op0);
495 	}
496       else if (code == LO_SUM)
497 	{
498 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
500 
501 	  /* (lo_sum (high x) x) -> x  */
502 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 	    return op1;
504 
505 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 	    return x;
507 	  return gen_rtx_LO_SUM (mode, op0, op1);
508 	}
509       break;
510 
511     default:
512       break;
513     }
514 
515   newx = x;
516   fmt = GET_RTX_FORMAT (code);
517   for (i = 0; fmt[i]; i++)
518     switch (fmt[i])
519       {
520       case 'E':
521 	vec = XVEC (x, i);
522 	newvec = XVEC (newx, i);
523 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 	  {
525 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 					  old_rtx, fn, data);
527 	    if (op != RTVEC_ELT (vec, j))
528 	      {
529 		if (newvec == vec)
530 		  {
531 		    newvec = shallow_copy_rtvec (vec);
532 		    if (x == newx)
533 		      newx = shallow_copy_rtx (x);
534 		    XVEC (newx, i) = newvec;
535 		  }
536 		RTVEC_ELT (newvec, j) = op;
537 	      }
538 	  }
539 	break;
540 
541       case 'e':
542 	if (XEXP (x, i))
543 	  {
544 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 	    if (op != XEXP (x, i))
546 	      {
547 		if (x == newx)
548 		  newx = shallow_copy_rtx (x);
549 		XEXP (newx, i) = op;
550 	      }
551 	  }
552 	break;
553       }
554   return newx;
555 }
556 
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558    resulting RTX.  Return a new RTX which is as simplified as possible.  */
559 
560 rtx
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 {
563   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
564 }
565 
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567    Only handle cases where the truncated value is inherently an rvalue.
568 
569    RTL provides two ways of truncating a value:
570 
571    1. a lowpart subreg.  This form is only a truncation when both
572       the outer and inner modes (here MODE and OP_MODE respectively)
573       are scalar integers, and only then when the subreg is used as
574       an rvalue.
575 
576       It is only valid to form such truncating subregs if the
577       truncation requires no action by the target.  The onus for
578       proving this is on the creator of the subreg -- e.g. the
579       caller to simplify_subreg or simplify_gen_subreg -- and typically
580       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 
582    2. a TRUNCATE.  This form handles both scalar and compound integers.
583 
584    The first form is preferred where valid.  However, the TRUNCATE
585    handling in simplify_unary_operation turns the second form into the
586    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587    so it is generally safe to form rvalue truncations using:
588 
589       simplify_gen_unary (TRUNCATE, ...)
590 
591    and leave simplify_unary_operation to work out which representation
592    should be used.
593 
594    Because of the proof requirements on (1), simplify_truncation must
595    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596    regardless of whether the outer truncation came from a SUBREG or a
597    TRUNCATE.  For example, if the caller has proven that an SImode
598    truncation of:
599 
600       (and:DI X Y)
601 
602    is a no-op and can be represented as a subreg, it does not follow
603    that SImode truncations of X and Y are also no-ops.  On a target
604    like 64-bit MIPS that requires SImode values to be stored in
605    sign-extended form, an SImode truncation of:
606 
607       (and:DI (reg:DI X) (const_int 63))
608 
609    is trivially a no-op because only the lower 6 bits can be set.
610    However, X is still an arbitrary 64-bit number and so we cannot
611    assume that truncating it too is a no-op.  */
612 
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 		     enum machine_mode op_mode)
616 {
617   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619   gcc_assert (precision <= op_precision);
620 
621   /* Optimize truncations of zero and sign extended values.  */
622   if (GET_CODE (op) == ZERO_EXTEND
623       || GET_CODE (op) == SIGN_EXTEND)
624     {
625       /* There are three possibilities.  If MODE is the same as the
626 	 origmode, we can omit both the extension and the subreg.
627 	 If MODE is not larger than the origmode, we can apply the
628 	 truncation without the extension.  Finally, if the outermode
629 	 is larger than the origmode, we can just extend to the appropriate
630 	 mode.  */
631       enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632       if (mode == origmode)
633 	return XEXP (op, 0);
634       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 	return simplify_gen_unary (TRUNCATE, mode,
636 				   XEXP (op, 0), origmode);
637       else
638 	return simplify_gen_unary (GET_CODE (op), mode,
639 				   XEXP (op, 0), origmode);
640     }
641 
642   /* If the machine can perform operations in the truncated mode, distribute
643      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
644      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
645   if (1
646 #ifdef WORD_REGISTER_OPERATIONS
647       && precision >= BITS_PER_WORD
648 #endif
649       && (GET_CODE (op) == PLUS
650 	  || GET_CODE (op) == MINUS
651 	  || GET_CODE (op) == MULT))
652     {
653       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
654       if (op0)
655 	{
656 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
657 	  if (op1)
658 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
659 	}
660     }
661 
662   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
663      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
664      the outer subreg is effectively a truncation to the original mode.  */
665   if ((GET_CODE (op) == LSHIFTRT
666        || GET_CODE (op) == ASHIFTRT)
667       /* Ensure that OP_MODE is at least twice as wide as MODE
668 	 to avoid the possibility that an outer LSHIFTRT shifts by more
669 	 than the sign extension's sign_bit_copies and introduces zeros
670 	 into the high bits of the result.  */
671       && 2 * precision <= op_precision
672       && CONST_INT_P (XEXP (op, 1))
673       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
674       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
675       && UINTVAL (XEXP (op, 1)) < precision)
676     return simplify_gen_binary (ASHIFTRT, mode,
677 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
678 
679   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
680      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
681      the outer subreg is effectively a truncation to the original mode.  */
682   if ((GET_CODE (op) == LSHIFTRT
683        || GET_CODE (op) == ASHIFTRT)
684       && CONST_INT_P (XEXP (op, 1))
685       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
686       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
687       && UINTVAL (XEXP (op, 1)) < precision)
688     return simplify_gen_binary (LSHIFTRT, mode,
689 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690 
691   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
692      to (ashift:QI (x:QI) C), where C is a suitable small constant and
693      the outer subreg is effectively a truncation to the original mode.  */
694   if (GET_CODE (op) == ASHIFT
695       && CONST_INT_P (XEXP (op, 1))
696       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
697 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
698       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
699       && UINTVAL (XEXP (op, 1)) < precision)
700     return simplify_gen_binary (ASHIFT, mode,
701 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702 
703   /* Recognize a word extraction from a multi-word subreg.  */
704   if ((GET_CODE (op) == LSHIFTRT
705        || GET_CODE (op) == ASHIFTRT)
706       && SCALAR_INT_MODE_P (mode)
707       && SCALAR_INT_MODE_P (op_mode)
708       && precision >= BITS_PER_WORD
709       && 2 * precision <= op_precision
710       && CONST_INT_P (XEXP (op, 1))
711       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
712       && UINTVAL (XEXP (op, 1)) < op_precision)
713     {
714       int byte = subreg_lowpart_offset (mode, op_mode);
715       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
716       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
717 				  (WORDS_BIG_ENDIAN
718 				   ? byte - shifted_bytes
719 				   : byte + shifted_bytes));
720     }
721 
722   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
723      and try replacing the TRUNCATE and shift with it.  Don't do this
724      if the MEM has a mode-dependent address.  */
725   if ((GET_CODE (op) == LSHIFTRT
726        || GET_CODE (op) == ASHIFTRT)
727       && SCALAR_INT_MODE_P (op_mode)
728       && MEM_P (XEXP (op, 0))
729       && CONST_INT_P (XEXP (op, 1))
730       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
731       && INTVAL (XEXP (op, 1)) > 0
732       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
733       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
734 				     MEM_ADDR_SPACE (XEXP (op, 0)))
735       && ! MEM_VOLATILE_P (XEXP (op, 0))
736       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
737 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
738     {
739       int byte = subreg_lowpart_offset (mode, op_mode);
740       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
741       return adjust_address_nv (XEXP (op, 0), mode,
742 				(WORDS_BIG_ENDIAN
743 				 ? byte - shifted_bytes
744 				 : byte + shifted_bytes));
745     }
746 
747   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
748      (OP:SI foo:SI) if OP is NEG or ABS.  */
749   if ((GET_CODE (op) == ABS
750        || GET_CODE (op) == NEG)
751       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
752 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
753       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
754     return simplify_gen_unary (GET_CODE (op), mode,
755 			       XEXP (XEXP (op, 0), 0), mode);
756 
757   /* (truncate:A (subreg:B (truncate:C X) 0)) is
758      (truncate:A X).  */
759   if (GET_CODE (op) == SUBREG
760       && SCALAR_INT_MODE_P (mode)
761       && SCALAR_INT_MODE_P (op_mode)
762       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
763       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
764       && subreg_lowpart_p (op))
765     {
766       rtx inner = XEXP (SUBREG_REG (op), 0);
767       if (GET_MODE_PRECISION (mode)
768 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
769 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
770       else
771 	/* If subreg above is paradoxical and C is narrower
772 	   than A, return (subreg:A (truncate:C X) 0).  */
773 	return simplify_gen_subreg (mode, SUBREG_REG (op),
774 				    GET_MODE (SUBREG_REG (op)), 0);
775     }
776 
777   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
778   if (GET_CODE (op) == TRUNCATE)
779     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
780 			       GET_MODE (XEXP (op, 0)));
781 
782   return NULL_RTX;
783 }
784 
785 /* Try to simplify a unary operation CODE whose output mode is to be
786    MODE with input operand OP whose mode was originally OP_MODE.
787    Return zero if no simplification can be made.  */
788 rtx
789 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
790 			  rtx op, enum machine_mode op_mode)
791 {
792   rtx trueop, tem;
793 
794   trueop = avoid_constant_pool_reference (op);
795 
796   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
797   if (tem)
798     return tem;
799 
800   return simplify_unary_operation_1 (code, mode, op);
801 }
802 
803 /* Perform some simplifications we can do even if the operands
804    aren't constant.  */
805 static rtx
806 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
807 {
808   enum rtx_code reversed;
809   rtx temp;
810 
811   switch (code)
812     {
813     case NOT:
814       /* (not (not X)) == X.  */
815       if (GET_CODE (op) == NOT)
816 	return XEXP (op, 0);
817 
818       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
819 	 comparison is all ones.   */
820       if (COMPARISON_P (op)
821 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
822 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
823 	return simplify_gen_relational (reversed, mode, VOIDmode,
824 					XEXP (op, 0), XEXP (op, 1));
825 
826       /* (not (plus X -1)) can become (neg X).  */
827       if (GET_CODE (op) == PLUS
828 	  && XEXP (op, 1) == constm1_rtx)
829 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
830 
831       /* Similarly, (not (neg X)) is (plus X -1).  */
832       if (GET_CODE (op) == NEG)
833 	return plus_constant (mode, XEXP (op, 0), -1);
834 
835       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
836       if (GET_CODE (op) == XOR
837 	  && CONST_INT_P (XEXP (op, 1))
838 	  && (temp = simplify_unary_operation (NOT, mode,
839 					       XEXP (op, 1), mode)) != 0)
840 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
841 
842       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
843       if (GET_CODE (op) == PLUS
844 	  && CONST_INT_P (XEXP (op, 1))
845 	  && mode_signbit_p (mode, XEXP (op, 1))
846 	  && (temp = simplify_unary_operation (NOT, mode,
847 					       XEXP (op, 1), mode)) != 0)
848 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
849 
850 
851       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
852 	 operands other than 1, but that is not valid.  We could do a
853 	 similar simplification for (not (lshiftrt C X)) where C is
854 	 just the sign bit, but this doesn't seem common enough to
855 	 bother with.  */
856       if (GET_CODE (op) == ASHIFT
857 	  && XEXP (op, 0) == const1_rtx)
858 	{
859 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
860 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
861 	}
862 
863       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
864 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
865 	 so we can perform the above simplification.  */
866 
867       if (STORE_FLAG_VALUE == -1
868 	  && GET_CODE (op) == ASHIFTRT
869 	  && GET_CODE (XEXP (op, 1))
870 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
871 	return simplify_gen_relational (GE, mode, VOIDmode,
872 					XEXP (op, 0), const0_rtx);
873 
874 
875       if (GET_CODE (op) == SUBREG
876 	  && subreg_lowpart_p (op)
877 	  && (GET_MODE_SIZE (GET_MODE (op))
878 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
879 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
880 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
881 	{
882 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
883 	  rtx x;
884 
885 	  x = gen_rtx_ROTATE (inner_mode,
886 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
887 						  inner_mode),
888 			      XEXP (SUBREG_REG (op), 1));
889 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
890 	  if (temp)
891 	    return temp;
892 	}
893 
894       /* Apply De Morgan's laws to reduce number of patterns for machines
895 	 with negating logical insns (and-not, nand, etc.).  If result has
896 	 only one NOT, put it first, since that is how the patterns are
897 	 coded.  */
898 
899       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
900 	{
901 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
902 	  enum machine_mode op_mode;
903 
904 	  op_mode = GET_MODE (in1);
905 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
906 
907 	  op_mode = GET_MODE (in2);
908 	  if (op_mode == VOIDmode)
909 	    op_mode = mode;
910 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
911 
912 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
913 	    {
914 	      rtx tem = in2;
915 	      in2 = in1; in1 = tem;
916 	    }
917 
918 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
919 				 mode, in1, in2);
920 	}
921       break;
922 
923     case NEG:
924       /* (neg (neg X)) == X.  */
925       if (GET_CODE (op) == NEG)
926 	return XEXP (op, 0);
927 
928       /* (neg (plus X 1)) can become (not X).  */
929       if (GET_CODE (op) == PLUS
930 	  && XEXP (op, 1) == const1_rtx)
931 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
932 
933       /* Similarly, (neg (not X)) is (plus X 1).  */
934       if (GET_CODE (op) == NOT)
935 	return plus_constant (mode, XEXP (op, 0), 1);
936 
937       /* (neg (minus X Y)) can become (minus Y X).  This transformation
938 	 isn't safe for modes with signed zeros, since if X and Y are
939 	 both +0, (minus Y X) is the same as (minus X Y).  If the
940 	 rounding mode is towards +infinity (or -infinity) then the two
941 	 expressions will be rounded differently.  */
942       if (GET_CODE (op) == MINUS
943 	  && !HONOR_SIGNED_ZEROS (mode)
944 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
946 
947       if (GET_CODE (op) == PLUS
948 	  && !HONOR_SIGNED_ZEROS (mode)
949 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
950 	{
951 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
952 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
953 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
954 	    {
955 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
956 	      if (temp)
957 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
958 	    }
959 
960 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
961 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
962 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
963 	}
964 
965       /* (neg (mult A B)) becomes (mult A (neg B)).
966 	 This works even for floating-point values.  */
967       if (GET_CODE (op) == MULT
968 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
969 	{
970 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
971 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
972 	}
973 
974       /* NEG commutes with ASHIFT since it is multiplication.  Only do
975 	 this if we can then eliminate the NEG (e.g., if the operand
976 	 is a constant).  */
977       if (GET_CODE (op) == ASHIFT)
978 	{
979 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
980 	  if (temp)
981 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
982 	}
983 
984       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 	 C is equal to the width of MODE minus 1.  */
986       if (GET_CODE (op) == ASHIFTRT
987 	  && CONST_INT_P (XEXP (op, 1))
988 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
989 	return simplify_gen_binary (LSHIFTRT, mode,
990 				    XEXP (op, 0), XEXP (op, 1));
991 
992       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 	 C is equal to the width of MODE minus 1.  */
994       if (GET_CODE (op) == LSHIFTRT
995 	  && CONST_INT_P (XEXP (op, 1))
996 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 	return simplify_gen_binary (ASHIFTRT, mode,
998 				    XEXP (op, 0), XEXP (op, 1));
999 
1000       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1001       if (GET_CODE (op) == XOR
1002 	  && XEXP (op, 1) == const1_rtx
1003 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1004 	return plus_constant (mode, XEXP (op, 0), -1);
1005 
1006       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1007       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1008       if (GET_CODE (op) == LT
1009 	  && XEXP (op, 1) == const0_rtx
1010 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1011 	{
1012 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
1013 	  int isize = GET_MODE_PRECISION (inner);
1014 	  if (STORE_FLAG_VALUE == 1)
1015 	    {
1016 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1017 					  GEN_INT (isize - 1));
1018 	      if (mode == inner)
1019 		return temp;
1020 	      if (GET_MODE_PRECISION (mode) > isize)
1021 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1022 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1023 	    }
1024 	  else if (STORE_FLAG_VALUE == -1)
1025 	    {
1026 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1027 					  GEN_INT (isize - 1));
1028 	      if (mode == inner)
1029 		return temp;
1030 	      if (GET_MODE_PRECISION (mode) > isize)
1031 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1032 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1033 	    }
1034 	}
1035       break;
1036 
1037     case TRUNCATE:
1038       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 	 with the umulXi3_highpart patterns.  */
1040       if (GET_CODE (op) == LSHIFTRT
1041 	  && GET_CODE (XEXP (op, 0)) == MULT)
1042 	break;
1043 
1044       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1045 	{
1046 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1047 	    {
1048 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1049 	      if (temp)
1050 		return temp;
1051 	    }
1052 	  /* We can't handle truncation to a partial integer mode here
1053 	     because we don't know the real bitsize of the partial
1054 	     integer mode.  */
1055 	  break;
1056 	}
1057 
1058       if (GET_MODE (op) != VOIDmode)
1059 	{
1060 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1061 	  if (temp)
1062 	    return temp;
1063 	}
1064 
1065       /* If we know that the value is already truncated, we can
1066 	 replace the TRUNCATE with a SUBREG.  */
1067       if (GET_MODE_NUNITS (mode) == 1
1068 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1069 	      || truncated_to_mode (mode, op)))
1070 	{
1071 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 	  if (temp)
1073 	    return temp;
1074 	}
1075 
1076       /* A truncate of a comparison can be replaced with a subreg if
1077          STORE_FLAG_VALUE permits.  This is like the previous test,
1078          but it works even if the comparison is done in a mode larger
1079          than HOST_BITS_PER_WIDE_INT.  */
1080       if (HWI_COMPUTABLE_MODE_P (mode)
1081 	  && COMPARISON_P (op)
1082 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1083 	{
1084 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1085 	  if (temp)
1086 	    return temp;
1087 	}
1088 
1089       /* A truncate of a memory is just loading the low part of the memory
1090 	 if we are not changing the meaning of the address. */
1091       if (GET_CODE (op) == MEM
1092 	  && !VECTOR_MODE_P (mode)
1093 	  && !MEM_VOLATILE_P (op)
1094 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1095 	{
1096 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 	  if (temp)
1098 	    return temp;
1099 	}
1100 
1101       break;
1102 
1103     case FLOAT_TRUNCATE:
1104       if (DECIMAL_FLOAT_MODE_P (mode))
1105 	break;
1106 
1107       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1108       if (GET_CODE (op) == FLOAT_EXTEND
1109 	  && GET_MODE (XEXP (op, 0)) == mode)
1110 	return XEXP (op, 0);
1111 
1112       /* (float_truncate:SF (float_truncate:DF foo:XF))
1113          = (float_truncate:SF foo:XF).
1114 	 This may eliminate double rounding, so it is unsafe.
1115 
1116          (float_truncate:SF (float_extend:XF foo:DF))
1117          = (float_truncate:SF foo:DF).
1118 
1119          (float_truncate:DF (float_extend:XF foo:SF))
1120          = (float_extend:SF foo:DF).  */
1121       if ((GET_CODE (op) == FLOAT_TRUNCATE
1122 	   && flag_unsafe_math_optimizations)
1123 	  || GET_CODE (op) == FLOAT_EXTEND)
1124 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1125 							    0)))
1126 				   > GET_MODE_SIZE (mode)
1127 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1128 				   mode,
1129 				   XEXP (op, 0), mode);
1130 
1131       /*  (float_truncate (float x)) is (float x)  */
1132       if (GET_CODE (op) == FLOAT
1133 	  && (flag_unsafe_math_optimizations
1134 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1135 		  && ((unsigned)significand_size (GET_MODE (op))
1136 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1137 			  - num_sign_bit_copies (XEXP (op, 0),
1138 						 GET_MODE (XEXP (op, 0))))))))
1139 	return simplify_gen_unary (FLOAT, mode,
1140 				   XEXP (op, 0),
1141 				   GET_MODE (XEXP (op, 0)));
1142 
1143       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1145       if ((GET_CODE (op) == ABS
1146 	   || GET_CODE (op) == NEG)
1147 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1148 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1149 	return simplify_gen_unary (GET_CODE (op), mode,
1150 				   XEXP (XEXP (op, 0), 0), mode);
1151 
1152       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 	 is (float_truncate:SF x).  */
1154       if (GET_CODE (op) == SUBREG
1155 	  && subreg_lowpart_p (op)
1156 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1157 	return SUBREG_REG (op);
1158       break;
1159 
1160     case FLOAT_EXTEND:
1161       if (DECIMAL_FLOAT_MODE_P (mode))
1162 	break;
1163 
1164       /*  (float_extend (float_extend x)) is (float_extend x)
1165 
1166 	  (float_extend (float x)) is (float x) assuming that double
1167 	  rounding can't happen.
1168           */
1169       if (GET_CODE (op) == FLOAT_EXTEND
1170 	  || (GET_CODE (op) == FLOAT
1171 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1172 	      && ((unsigned)significand_size (GET_MODE (op))
1173 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1174 		      - num_sign_bit_copies (XEXP (op, 0),
1175 					     GET_MODE (XEXP (op, 0)))))))
1176 	return simplify_gen_unary (GET_CODE (op), mode,
1177 				   XEXP (op, 0),
1178 				   GET_MODE (XEXP (op, 0)));
1179 
1180       break;
1181 
1182     case ABS:
1183       /* (abs (neg <foo>)) -> (abs <foo>) */
1184       if (GET_CODE (op) == NEG)
1185 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1186 				   GET_MODE (XEXP (op, 0)));
1187 
1188       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1189          do nothing.  */
1190       if (GET_MODE (op) == VOIDmode)
1191 	break;
1192 
1193       /* If operand is something known to be positive, ignore the ABS.  */
1194       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1195 	  || val_signbit_known_clear_p (GET_MODE (op),
1196 					nonzero_bits (op, GET_MODE (op))))
1197 	return op;
1198 
1199       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1200       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1201 	return gen_rtx_NEG (mode, op);
1202 
1203       break;
1204 
1205     case FFS:
1206       /* (ffs (*_extend <X>)) = (ffs <X>) */
1207       if (GET_CODE (op) == SIGN_EXTEND
1208 	  || GET_CODE (op) == ZERO_EXTEND)
1209 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1210 				   GET_MODE (XEXP (op, 0)));
1211       break;
1212 
1213     case POPCOUNT:
1214       switch (GET_CODE (op))
1215 	{
1216 	case BSWAP:
1217 	case ZERO_EXTEND:
1218 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1220 				     GET_MODE (XEXP (op, 0)));
1221 
1222 	case ROTATE:
1223 	case ROTATERT:
1224 	  /* Rotations don't affect popcount.  */
1225 	  if (!side_effects_p (XEXP (op, 1)))
1226 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1227 				       GET_MODE (XEXP (op, 0)));
1228 	  break;
1229 
1230 	default:
1231 	  break;
1232 	}
1233       break;
1234 
1235     case PARITY:
1236       switch (GET_CODE (op))
1237 	{
1238 	case NOT:
1239 	case BSWAP:
1240 	case ZERO_EXTEND:
1241 	case SIGN_EXTEND:
1242 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1243 				     GET_MODE (XEXP (op, 0)));
1244 
1245 	case ROTATE:
1246 	case ROTATERT:
1247 	  /* Rotations don't affect parity.  */
1248 	  if (!side_effects_p (XEXP (op, 1)))
1249 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1250 				       GET_MODE (XEXP (op, 0)));
1251 	  break;
1252 
1253 	default:
1254 	  break;
1255 	}
1256       break;
1257 
1258     case BSWAP:
1259       /* (bswap (bswap x)) -> x.  */
1260       if (GET_CODE (op) == BSWAP)
1261 	return XEXP (op, 0);
1262       break;
1263 
1264     case FLOAT:
1265       /* (float (sign_extend <X>)) = (float <X>).  */
1266       if (GET_CODE (op) == SIGN_EXTEND)
1267 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1268 				   GET_MODE (XEXP (op, 0)));
1269       break;
1270 
1271     case SIGN_EXTEND:
1272       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 	 becomes just the MINUS if its mode is MODE.  This allows
1274 	 folding switch statements on machines using casesi (such as
1275 	 the VAX).  */
1276       if (GET_CODE (op) == TRUNCATE
1277 	  && GET_MODE (XEXP (op, 0)) == mode
1278 	  && GET_CODE (XEXP (op, 0)) == MINUS
1279 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1280 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1281 	return XEXP (op, 0);
1282 
1283       /* Extending a widening multiplication should be canonicalized to
1284 	 a wider widening multiplication.  */
1285       if (GET_CODE (op) == MULT)
1286 	{
1287 	  rtx lhs = XEXP (op, 0);
1288 	  rtx rhs = XEXP (op, 1);
1289 	  enum rtx_code lcode = GET_CODE (lhs);
1290 	  enum rtx_code rcode = GET_CODE (rhs);
1291 
1292 	  /* Widening multiplies usually extend both operands, but sometimes
1293 	     they use a shift to extract a portion of a register.  */
1294 	  if ((lcode == SIGN_EXTEND
1295 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1296 	      && (rcode == SIGN_EXTEND
1297 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1298 	    {
1299 	      enum machine_mode lmode = GET_MODE (lhs);
1300 	      enum machine_mode rmode = GET_MODE (rhs);
1301 	      int bits;
1302 
1303 	      if (lcode == ASHIFTRT)
1304 		/* Number of bits not shifted off the end.  */
1305 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1306 	      else /* lcode == SIGN_EXTEND */
1307 		/* Size of inner mode.  */
1308 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1309 
1310 	      if (rcode == ASHIFTRT)
1311 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1312 	      else /* rcode == SIGN_EXTEND */
1313 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1314 
1315 	      /* We can only widen multiplies if the result is mathematiclly
1316 		 equivalent.  I.e. if overflow was impossible.  */
1317 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1318 		return simplify_gen_binary
1319 			 (MULT, mode,
1320 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1321 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1322 	    }
1323 	}
1324 
1325       /* Check for a sign extension of a subreg of a promoted
1326 	 variable, where the promotion is sign-extended, and the
1327 	 target mode is the same as the variable's promotion.  */
1328       if (GET_CODE (op) == SUBREG
1329 	  && SUBREG_PROMOTED_VAR_P (op)
1330 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1331 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1332 	{
1333 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1334 	  if (temp)
1335 	    return temp;
1336 	}
1337 
1338       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1340       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1341 	{
1342 	  gcc_assert (GET_MODE_BITSIZE (mode)
1343 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1344 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1345 				     GET_MODE (XEXP (op, 0)));
1346 	}
1347 
1348       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 	 GET_MODE_BITSIZE (N) - I bits.
1351 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1353       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1354 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1355 	  && CONST_INT_P (XEXP (op, 1))
1356 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1357 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1358 	{
1359 	  enum machine_mode tmode
1360 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1361 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1362 	  gcc_assert (GET_MODE_BITSIZE (mode)
1363 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1364 	  if (tmode != BLKmode)
1365 	    {
1366 	      rtx inner =
1367 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1368 	      if (inner)
1369 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1370 					   ? SIGN_EXTEND : ZERO_EXTEND,
1371 					   mode, inner, tmode);
1372 	    }
1373 	}
1374 
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376       /* As we do not know which address space the pointer is referring to,
1377 	 we can do this only if the target does not support different pointer
1378 	 or address modes depending on the address space.  */
1379       if (target_default_pointer_address_modes_p ()
1380 	  && ! POINTERS_EXTEND_UNSIGNED
1381 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1382 	  && (CONSTANT_P (op)
1383 	      || (GET_CODE (op) == SUBREG
1384 		  && REG_P (SUBREG_REG (op))
1385 		  && REG_POINTER (SUBREG_REG (op))
1386 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1387 	return convert_memory_address (Pmode, op);
1388 #endif
1389       break;
1390 
1391     case ZERO_EXTEND:
1392       /* Check for a zero extension of a subreg of a promoted
1393 	 variable, where the promotion is zero-extended, and the
1394 	 target mode is the same as the variable's promotion.  */
1395       if (GET_CODE (op) == SUBREG
1396 	  && SUBREG_PROMOTED_VAR_P (op)
1397 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1398 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1399 	{
1400 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1401 	  if (temp)
1402 	    return temp;
1403 	}
1404 
1405       /* Extending a widening multiplication should be canonicalized to
1406 	 a wider widening multiplication.  */
1407       if (GET_CODE (op) == MULT)
1408 	{
1409 	  rtx lhs = XEXP (op, 0);
1410 	  rtx rhs = XEXP (op, 1);
1411 	  enum rtx_code lcode = GET_CODE (lhs);
1412 	  enum rtx_code rcode = GET_CODE (rhs);
1413 
1414 	  /* Widening multiplies usually extend both operands, but sometimes
1415 	     they use a shift to extract a portion of a register.  */
1416 	  if ((lcode == ZERO_EXTEND
1417 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1418 	      && (rcode == ZERO_EXTEND
1419 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1420 	    {
1421 	      enum machine_mode lmode = GET_MODE (lhs);
1422 	      enum machine_mode rmode = GET_MODE (rhs);
1423 	      int bits;
1424 
1425 	      if (lcode == LSHIFTRT)
1426 		/* Number of bits not shifted off the end.  */
1427 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1428 	      else /* lcode == ZERO_EXTEND */
1429 		/* Size of inner mode.  */
1430 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1431 
1432 	      if (rcode == LSHIFTRT)
1433 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1434 	      else /* rcode == ZERO_EXTEND */
1435 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1436 
1437 	      /* We can only widen multiplies if the result is mathematiclly
1438 		 equivalent.  I.e. if overflow was impossible.  */
1439 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1440 		return simplify_gen_binary
1441 			 (MULT, mode,
1442 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1443 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1444 	    }
1445 	}
1446 
1447       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1448       if (GET_CODE (op) == ZERO_EXTEND)
1449 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1450 				   GET_MODE (XEXP (op, 0)));
1451 
1452       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 	 GET_MODE_BITSIZE (N) - I bits.  */
1455       if (GET_CODE (op) == LSHIFTRT
1456 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1457 	  && CONST_INT_P (XEXP (op, 1))
1458 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1459 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1460 	{
1461 	  enum machine_mode tmode
1462 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1463 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1464 	  if (tmode != BLKmode)
1465 	    {
1466 	      rtx inner =
1467 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1468 	      if (inner)
1469 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1470 	    }
1471 	}
1472 
1473 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1474       /* As we do not know which address space the pointer is referring to,
1475 	 we can do this only if the target does not support different pointer
1476 	 or address modes depending on the address space.  */
1477       if (target_default_pointer_address_modes_p ()
1478 	  && POINTERS_EXTEND_UNSIGNED > 0
1479 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1480 	  && (CONSTANT_P (op)
1481 	      || (GET_CODE (op) == SUBREG
1482 		  && REG_P (SUBREG_REG (op))
1483 		  && REG_POINTER (SUBREG_REG (op))
1484 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1485 	return convert_memory_address (Pmode, op);
1486 #endif
1487       break;
1488 
1489     default:
1490       break;
1491     }
1492 
1493   return 0;
1494 }
1495 
1496 /* Try to compute the value of a unary operation CODE whose output mode is to
1497    be MODE with input operand OP whose mode was originally OP_MODE.
1498    Return zero if the value cannot be computed.  */
1499 rtx
1500 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1501 				rtx op, enum machine_mode op_mode)
1502 {
1503   unsigned int width = GET_MODE_PRECISION (mode);
1504   unsigned int op_width = GET_MODE_PRECISION (op_mode);
1505 
1506   if (code == VEC_DUPLICATE)
1507     {
1508       gcc_assert (VECTOR_MODE_P (mode));
1509       if (GET_MODE (op) != VOIDmode)
1510       {
1511 	if (!VECTOR_MODE_P (GET_MODE (op)))
1512 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1513 	else
1514 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1515 						(GET_MODE (op)));
1516       }
1517       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1518 	  || GET_CODE (op) == CONST_VECTOR)
1519 	{
1520           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1521           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1522 	  rtvec v = rtvec_alloc (n_elts);
1523 	  unsigned int i;
1524 
1525 	  if (GET_CODE (op) != CONST_VECTOR)
1526 	    for (i = 0; i < n_elts; i++)
1527 	      RTVEC_ELT (v, i) = op;
1528 	  else
1529 	    {
1530 	      enum machine_mode inmode = GET_MODE (op);
1531               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1532               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1533 
1534 	      gcc_assert (in_n_elts < n_elts);
1535 	      gcc_assert ((n_elts % in_n_elts) == 0);
1536 	      for (i = 0; i < n_elts; i++)
1537 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1538 	    }
1539 	  return gen_rtx_CONST_VECTOR (mode, v);
1540 	}
1541     }
1542 
1543   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1544     {
1545       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1546       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1547       enum machine_mode opmode = GET_MODE (op);
1548       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1549       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1550       rtvec v = rtvec_alloc (n_elts);
1551       unsigned int i;
1552 
1553       gcc_assert (op_n_elts == n_elts);
1554       for (i = 0; i < n_elts; i++)
1555 	{
1556 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1557 					    CONST_VECTOR_ELT (op, i),
1558 					    GET_MODE_INNER (opmode));
1559 	  if (!x)
1560 	    return 0;
1561 	  RTVEC_ELT (v, i) = x;
1562 	}
1563       return gen_rtx_CONST_VECTOR (mode, v);
1564     }
1565 
1566   /* The order of these tests is critical so that, for example, we don't
1567      check the wrong mode (input vs. output) for a conversion operation,
1568      such as FIX.  At some point, this should be simplified.  */
1569 
1570   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1571     {
1572       HOST_WIDE_INT hv, lv;
1573       REAL_VALUE_TYPE d;
1574 
1575       if (CONST_INT_P (op))
1576 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1577       else
1578 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1579 
1580       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1581       d = real_value_truncate (mode, d);
1582       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1583     }
1584   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1585     {
1586       HOST_WIDE_INT hv, lv;
1587       REAL_VALUE_TYPE d;
1588 
1589       if (CONST_INT_P (op))
1590 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1591       else
1592 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1593 
1594       if (op_mode == VOIDmode
1595 	  || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1596 	/* We should never get a negative number.  */
1597 	gcc_assert (hv >= 0);
1598       else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1599 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1600 
1601       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1602       d = real_value_truncate (mode, d);
1603       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1604     }
1605 
1606   if (CONST_INT_P (op)
1607       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1608     {
1609       HOST_WIDE_INT arg0 = INTVAL (op);
1610       HOST_WIDE_INT val;
1611 
1612       switch (code)
1613 	{
1614 	case NOT:
1615 	  val = ~ arg0;
1616 	  break;
1617 
1618 	case NEG:
1619 	  val = - arg0;
1620 	  break;
1621 
1622 	case ABS:
1623 	  val = (arg0 >= 0 ? arg0 : - arg0);
1624 	  break;
1625 
1626 	case FFS:
1627 	  arg0 &= GET_MODE_MASK (mode);
1628 	  val = ffs_hwi (arg0);
1629 	  break;
1630 
1631 	case CLZ:
1632 	  arg0 &= GET_MODE_MASK (mode);
1633 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1634 	    ;
1635 	  else
1636 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1637 	  break;
1638 
1639 	case CLRSB:
1640 	  arg0 &= GET_MODE_MASK (mode);
1641 	  if (arg0 == 0)
1642 	    val = GET_MODE_PRECISION (mode) - 1;
1643 	  else if (arg0 >= 0)
1644 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1645 	  else if (arg0 < 0)
1646 	    val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1647 	  break;
1648 
1649 	case CTZ:
1650 	  arg0 &= GET_MODE_MASK (mode);
1651 	  if (arg0 == 0)
1652 	    {
1653 	      /* Even if the value at zero is undefined, we have to come
1654 		 up with some replacement.  Seems good enough.  */
1655 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1656 		val = GET_MODE_PRECISION (mode);
1657 	    }
1658 	  else
1659 	    val = ctz_hwi (arg0);
1660 	  break;
1661 
1662 	case POPCOUNT:
1663 	  arg0 &= GET_MODE_MASK (mode);
1664 	  val = 0;
1665 	  while (arg0)
1666 	    val++, arg0 &= arg0 - 1;
1667 	  break;
1668 
1669 	case PARITY:
1670 	  arg0 &= GET_MODE_MASK (mode);
1671 	  val = 0;
1672 	  while (arg0)
1673 	    val++, arg0 &= arg0 - 1;
1674 	  val &= 1;
1675 	  break;
1676 
1677 	case BSWAP:
1678 	  {
1679 	    unsigned int s;
1680 
1681 	    val = 0;
1682 	    for (s = 0; s < width; s += 8)
1683 	      {
1684 		unsigned int d = width - s - 8;
1685 		unsigned HOST_WIDE_INT byte;
1686 		byte = (arg0 >> s) & 0xff;
1687 		val |= byte << d;
1688 	      }
1689 	  }
1690 	  break;
1691 
1692 	case TRUNCATE:
1693 	  val = arg0;
1694 	  break;
1695 
1696 	case ZERO_EXTEND:
1697 	  /* When zero-extending a CONST_INT, we need to know its
1698              original mode.  */
1699 	  gcc_assert (op_mode != VOIDmode);
1700 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1701 	    {
1702 	      /* If we were really extending the mode,
1703 		 we would have to distinguish between zero-extension
1704 		 and sign-extension.  */
1705 	      gcc_assert (width == op_width);
1706 	      val = arg0;
1707 	    }
1708 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1709 	    val = arg0 & GET_MODE_MASK (op_mode);
1710 	  else
1711 	    return 0;
1712 	  break;
1713 
1714 	case SIGN_EXTEND:
1715 	  if (op_mode == VOIDmode)
1716 	    op_mode = mode;
1717 	  op_width = GET_MODE_PRECISION (op_mode);
1718 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1719 	    {
1720 	      /* If we were really extending the mode,
1721 		 we would have to distinguish between zero-extension
1722 		 and sign-extension.  */
1723 	      gcc_assert (width == op_width);
1724 	      val = arg0;
1725 	    }
1726 	  else if (op_width < HOST_BITS_PER_WIDE_INT)
1727 	    {
1728 	      val = arg0 & GET_MODE_MASK (op_mode);
1729 	      if (val_signbit_known_set_p (op_mode, val))
1730 		val |= ~GET_MODE_MASK (op_mode);
1731 	    }
1732 	  else
1733 	    return 0;
1734 	  break;
1735 
1736 	case SQRT:
1737 	case FLOAT_EXTEND:
1738 	case FLOAT_TRUNCATE:
1739 	case SS_TRUNCATE:
1740 	case US_TRUNCATE:
1741 	case SS_NEG:
1742 	case US_NEG:
1743 	case SS_ABS:
1744 	  return 0;
1745 
1746 	default:
1747 	  gcc_unreachable ();
1748 	}
1749 
1750       return gen_int_mode (val, mode);
1751     }
1752 
1753   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1754      for a DImode operation on a CONST_INT.  */
1755   else if (width <= HOST_BITS_PER_DOUBLE_INT
1756 	   && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1757     {
1758       double_int first, value;
1759 
1760       if (CONST_DOUBLE_AS_INT_P (op))
1761 	first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1762 				       CONST_DOUBLE_LOW (op));
1763       else
1764 	first = double_int::from_shwi (INTVAL (op));
1765 
1766       switch (code)
1767 	{
1768 	case NOT:
1769 	  value = ~first;
1770 	  break;
1771 
1772 	case NEG:
1773 	  value = -first;
1774 	  break;
1775 
1776 	case ABS:
1777 	  if (first.is_negative ())
1778 	    value = -first;
1779 	  else
1780 	    value = first;
1781 	  break;
1782 
1783 	case FFS:
1784 	  value.high = 0;
1785 	  if (first.low != 0)
1786 	    value.low = ffs_hwi (first.low);
1787 	  else if (first.high != 0)
1788 	    value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1789 	  else
1790 	    value.low = 0;
1791 	  break;
1792 
1793 	case CLZ:
1794 	  value.high = 0;
1795 	  if (first.high != 0)
1796 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1797 	              - HOST_BITS_PER_WIDE_INT;
1798 	  else if (first.low != 0)
1799 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1800 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1801 	    value.low = GET_MODE_PRECISION (mode);
1802 	  break;
1803 
1804 	case CTZ:
1805 	  value.high = 0;
1806 	  if (first.low != 0)
1807 	    value.low = ctz_hwi (first.low);
1808 	  else if (first.high != 0)
1809 	    value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1810 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1811 	    value.low = GET_MODE_PRECISION (mode);
1812 	  break;
1813 
1814 	case POPCOUNT:
1815 	  value = double_int_zero;
1816 	  while (first.low)
1817 	    {
1818 	      value.low++;
1819 	      first.low &= first.low - 1;
1820 	    }
1821 	  while (first.high)
1822 	    {
1823 	      value.low++;
1824 	      first.high &= first.high - 1;
1825 	    }
1826 	  break;
1827 
1828 	case PARITY:
1829 	  value = double_int_zero;
1830 	  while (first.low)
1831 	    {
1832 	      value.low++;
1833 	      first.low &= first.low - 1;
1834 	    }
1835 	  while (first.high)
1836 	    {
1837 	      value.low++;
1838 	      first.high &= first.high - 1;
1839 	    }
1840 	  value.low &= 1;
1841 	  break;
1842 
1843 	case BSWAP:
1844 	  {
1845 	    unsigned int s;
1846 
1847 	    value = double_int_zero;
1848 	    for (s = 0; s < width; s += 8)
1849 	      {
1850 		unsigned int d = width - s - 8;
1851 		unsigned HOST_WIDE_INT byte;
1852 
1853 		if (s < HOST_BITS_PER_WIDE_INT)
1854 		  byte = (first.low >> s) & 0xff;
1855 		else
1856 		  byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1857 
1858 		if (d < HOST_BITS_PER_WIDE_INT)
1859 		  value.low |= byte << d;
1860 		else
1861 		  value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1862 	      }
1863 	  }
1864 	  break;
1865 
1866 	case TRUNCATE:
1867 	  /* This is just a change-of-mode, so do nothing.  */
1868 	  value = first;
1869 	  break;
1870 
1871 	case ZERO_EXTEND:
1872 	  gcc_assert (op_mode != VOIDmode);
1873 
1874 	  if (op_width > HOST_BITS_PER_WIDE_INT)
1875 	    return 0;
1876 
1877 	  value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1878 	  break;
1879 
1880 	case SIGN_EXTEND:
1881 	  if (op_mode == VOIDmode
1882 	      || op_width > HOST_BITS_PER_WIDE_INT)
1883 	    return 0;
1884 	  else
1885 	    {
1886 	      value.low = first.low & GET_MODE_MASK (op_mode);
1887 	      if (val_signbit_known_set_p (op_mode, value.low))
1888 		value.low |= ~GET_MODE_MASK (op_mode);
1889 
1890 	      value.high = HWI_SIGN_EXTEND (value.low);
1891 	    }
1892 	  break;
1893 
1894 	case SQRT:
1895 	  return 0;
1896 
1897 	default:
1898 	  return 0;
1899 	}
1900 
1901       return immed_double_int_const (value, mode);
1902     }
1903 
1904   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1905 	   && SCALAR_FLOAT_MODE_P (mode)
1906 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1907     {
1908       REAL_VALUE_TYPE d, t;
1909       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1910 
1911       switch (code)
1912 	{
1913 	case SQRT:
1914 	  if (HONOR_SNANS (mode) && real_isnan (&d))
1915 	    return 0;
1916 	  real_sqrt (&t, mode, &d);
1917 	  d = t;
1918 	  break;
1919 	case ABS:
1920 	  d = real_value_abs (&d);
1921 	  break;
1922 	case NEG:
1923 	  d = real_value_negate (&d);
1924 	  break;
1925 	case FLOAT_TRUNCATE:
1926 	  d = real_value_truncate (mode, d);
1927 	  break;
1928 	case FLOAT_EXTEND:
1929 	  /* All this does is change the mode, unless changing
1930 	     mode class.  */
1931 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1932 	    real_convert (&d, mode, &d);
1933 	  break;
1934 	case FIX:
1935 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1936 	  break;
1937 	case NOT:
1938 	  {
1939 	    long tmp[4];
1940 	    int i;
1941 
1942 	    real_to_target (tmp, &d, GET_MODE (op));
1943 	    for (i = 0; i < 4; i++)
1944 	      tmp[i] = ~tmp[i];
1945 	    real_from_target (&d, tmp, mode);
1946 	    break;
1947 	  }
1948 	default:
1949 	  gcc_unreachable ();
1950 	}
1951       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1952     }
1953 
1954   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1955 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1956 	   && GET_MODE_CLASS (mode) == MODE_INT
1957 	   && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1958     {
1959       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 	 operators are intentionally left unspecified (to ease implementation
1961 	 by target backends), for consistency, this routine implements the
1962 	 same semantics for constant folding as used by the middle-end.  */
1963 
1964       /* This was formerly used only for non-IEEE float.
1965 	 eggert@twinsun.com says it is safe for IEEE also.  */
1966       HOST_WIDE_INT xh, xl, th, tl;
1967       REAL_VALUE_TYPE x, t;
1968       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1969       switch (code)
1970 	{
1971 	case FIX:
1972 	  if (REAL_VALUE_ISNAN (x))
1973 	    return const0_rtx;
1974 
1975 	  /* Test against the signed upper bound.  */
1976 	  if (width > HOST_BITS_PER_WIDE_INT)
1977 	    {
1978 	      th = ((unsigned HOST_WIDE_INT) 1
1979 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1980 	      tl = -1;
1981 	    }
1982 	  else
1983 	    {
1984 	      th = 0;
1985 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1986 	    }
1987 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1988 	  if (REAL_VALUES_LESS (t, x))
1989 	    {
1990 	      xh = th;
1991 	      xl = tl;
1992 	      break;
1993 	    }
1994 
1995 	  /* Test against the signed lower bound.  */
1996 	  if (width > HOST_BITS_PER_WIDE_INT)
1997 	    {
1998 	      th = (unsigned HOST_WIDE_INT) (-1)
1999 		   << (width - HOST_BITS_PER_WIDE_INT - 1);
2000 	      tl = 0;
2001 	    }
2002 	  else
2003 	    {
2004 	      th = -1;
2005 	      tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2006 	    }
2007 	  real_from_integer (&t, VOIDmode, tl, th, 0);
2008 	  if (REAL_VALUES_LESS (x, t))
2009 	    {
2010 	      xh = th;
2011 	      xl = tl;
2012 	      break;
2013 	    }
2014 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2015 	  break;
2016 
2017 	case UNSIGNED_FIX:
2018 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2019 	    return const0_rtx;
2020 
2021 	  /* Test against the unsigned upper bound.  */
2022 	  if (width == HOST_BITS_PER_DOUBLE_INT)
2023 	    {
2024 	      th = -1;
2025 	      tl = -1;
2026 	    }
2027 	  else if (width >= HOST_BITS_PER_WIDE_INT)
2028 	    {
2029 	      th = ((unsigned HOST_WIDE_INT) 1
2030 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2031 	      tl = -1;
2032 	    }
2033 	  else
2034 	    {
2035 	      th = 0;
2036 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2037 	    }
2038 	  real_from_integer (&t, VOIDmode, tl, th, 1);
2039 	  if (REAL_VALUES_LESS (t, x))
2040 	    {
2041 	      xh = th;
2042 	      xl = tl;
2043 	      break;
2044 	    }
2045 
2046 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2047 	  break;
2048 
2049 	default:
2050 	  gcc_unreachable ();
2051 	}
2052       return immed_double_const (xl, xh, mode);
2053     }
2054 
2055   return NULL_RTX;
2056 }
2057 
2058 /* Subroutine of simplify_binary_operation to simplify a commutative,
2059    associative binary operation CODE with result mode MODE, operating
2060    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2061    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2062    canonicalization is possible.  */
2063 
2064 static rtx
2065 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2066 				rtx op0, rtx op1)
2067 {
2068   rtx tem;
2069 
2070   /* Linearize the operator to the left.  */
2071   if (GET_CODE (op1) == code)
2072     {
2073       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2074       if (GET_CODE (op0) == code)
2075 	{
2076 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2077 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2078 	}
2079 
2080       /* "a op (b op c)" becomes "(b op c) op a".  */
2081       if (! swap_commutative_operands_p (op1, op0))
2082 	return simplify_gen_binary (code, mode, op1, op0);
2083 
2084       tem = op0;
2085       op0 = op1;
2086       op1 = tem;
2087     }
2088 
2089   if (GET_CODE (op0) == code)
2090     {
2091       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2092       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2093 	{
2094 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2095 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2096 	}
2097 
2098       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2099       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2100       if (tem != 0)
2101         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2102 
2103       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2104       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2105       if (tem != 0)
2106         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2107     }
2108 
2109   return 0;
2110 }
2111 
2112 
2113 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2114    and OP1.  Return 0 if no simplification is possible.
2115 
2116    Don't use this for relational operations such as EQ or LT.
2117    Use simplify_relational_operation instead.  */
2118 rtx
2119 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2120 			   rtx op0, rtx op1)
2121 {
2122   rtx trueop0, trueop1;
2123   rtx tem;
2124 
2125   /* Relational operations don't work here.  We must know the mode
2126      of the operands in order to do the comparison correctly.
2127      Assuming a full word can give incorrect results.
2128      Consider comparing 128 with -128 in QImode.  */
2129   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2130   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2131 
2132   /* Make sure the constant is second.  */
2133   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2134       && swap_commutative_operands_p (op0, op1))
2135     {
2136       tem = op0, op0 = op1, op1 = tem;
2137     }
2138 
2139   trueop0 = avoid_constant_pool_reference (op0);
2140   trueop1 = avoid_constant_pool_reference (op1);
2141 
2142   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2143   if (tem)
2144     return tem;
2145   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2146 }
2147 
2148 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2149    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2150    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2151    actual constants.  */
2152 
2153 static rtx
2154 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2155 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2156 {
2157   rtx tem, reversed, opleft, opright;
2158   HOST_WIDE_INT val;
2159   unsigned int width = GET_MODE_PRECISION (mode);
2160 
2161   /* Even if we can't compute a constant result,
2162      there are some cases worth simplifying.  */
2163 
2164   switch (code)
2165     {
2166     case PLUS:
2167       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2168 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2169 	 when x is -0 and the rounding mode is not towards -infinity,
2170 	 since (-0) + 0 is then 0.  */
2171       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2172 	return op0;
2173 
2174       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2175 	 transformations are safe even for IEEE.  */
2176       if (GET_CODE (op0) == NEG)
2177 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2178       else if (GET_CODE (op1) == NEG)
2179 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2180 
2181       /* (~a) + 1 -> -a */
2182       if (INTEGRAL_MODE_P (mode)
2183 	  && GET_CODE (op0) == NOT
2184 	  && trueop1 == const1_rtx)
2185 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2186 
2187       /* Handle both-operands-constant cases.  We can only add
2188 	 CONST_INTs to constants since the sum of relocatable symbols
2189 	 can't be handled by most assemblers.  Don't add CONST_INT
2190 	 to CONST_INT since overflow won't be computed properly if wider
2191 	 than HOST_BITS_PER_WIDE_INT.  */
2192 
2193       if ((GET_CODE (op0) == CONST
2194 	   || GET_CODE (op0) == SYMBOL_REF
2195 	   || GET_CODE (op0) == LABEL_REF)
2196 	  && CONST_INT_P (op1))
2197 	return plus_constant (mode, op0, INTVAL (op1));
2198       else if ((GET_CODE (op1) == CONST
2199 		|| GET_CODE (op1) == SYMBOL_REF
2200 		|| GET_CODE (op1) == LABEL_REF)
2201 	       && CONST_INT_P (op0))
2202 	return plus_constant (mode, op1, INTVAL (op0));
2203 
2204       /* See if this is something like X * C - X or vice versa or
2205 	 if the multiplication is written as a shift.  If so, we can
2206 	 distribute and make a new multiply, shift, or maybe just
2207 	 have X (if C is 2 in the example above).  But don't make
2208 	 something more expensive than we had before.  */
2209 
2210       if (SCALAR_INT_MODE_P (mode))
2211 	{
2212 	  double_int coeff0, coeff1;
2213 	  rtx lhs = op0, rhs = op1;
2214 
2215 	  coeff0 = double_int_one;
2216 	  coeff1 = double_int_one;
2217 
2218 	  if (GET_CODE (lhs) == NEG)
2219 	    {
2220 	      coeff0 = double_int_minus_one;
2221 	      lhs = XEXP (lhs, 0);
2222 	    }
2223 	  else if (GET_CODE (lhs) == MULT
2224 		   && CONST_INT_P (XEXP (lhs, 1)))
2225 	    {
2226 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2227 	      lhs = XEXP (lhs, 0);
2228 	    }
2229 	  else if (GET_CODE (lhs) == ASHIFT
2230 		   && CONST_INT_P (XEXP (lhs, 1))
2231                    && INTVAL (XEXP (lhs, 1)) >= 0
2232 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2233 	    {
2234 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2235 	      lhs = XEXP (lhs, 0);
2236 	    }
2237 
2238 	  if (GET_CODE (rhs) == NEG)
2239 	    {
2240 	      coeff1 = double_int_minus_one;
2241 	      rhs = XEXP (rhs, 0);
2242 	    }
2243 	  else if (GET_CODE (rhs) == MULT
2244 		   && CONST_INT_P (XEXP (rhs, 1)))
2245 	    {
2246 	      coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2247 	      rhs = XEXP (rhs, 0);
2248 	    }
2249 	  else if (GET_CODE (rhs) == ASHIFT
2250 		   && CONST_INT_P (XEXP (rhs, 1))
2251 		   && INTVAL (XEXP (rhs, 1)) >= 0
2252 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2253 	    {
2254 	      coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2255 	      rhs = XEXP (rhs, 0);
2256 	    }
2257 
2258 	  if (rtx_equal_p (lhs, rhs))
2259 	    {
2260 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2261 	      rtx coeff;
2262 	      double_int val;
2263 	      bool speed = optimize_function_for_speed_p (cfun);
2264 
2265 	      val = coeff0 + coeff1;
2266 	      coeff = immed_double_int_const (val, mode);
2267 
2268 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2269 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2270 		? tem : 0;
2271 	    }
2272 	}
2273 
2274       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2275       if (CONST_SCALAR_INT_P (op1)
2276 	  && GET_CODE (op0) == XOR
2277 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2278 	  && mode_signbit_p (mode, op1))
2279 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2280 				    simplify_gen_binary (XOR, mode, op1,
2281 							 XEXP (op0, 1)));
2282 
2283       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2284       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2285 	  && GET_CODE (op0) == MULT
2286 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2287 	{
2288 	  rtx in1, in2;
2289 
2290 	  in1 = XEXP (XEXP (op0, 0), 0);
2291 	  in2 = XEXP (op0, 1);
2292 	  return simplify_gen_binary (MINUS, mode, op1,
2293 				      simplify_gen_binary (MULT, mode,
2294 							   in1, in2));
2295 	}
2296 
2297       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2298 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2299 	 is 1.  */
2300       if (COMPARISON_P (op0)
2301 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2302 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2303 	  && (reversed = reversed_comparison (op0, mode)))
2304 	return
2305 	  simplify_gen_unary (NEG, mode, reversed, mode);
2306 
2307       /* If one of the operands is a PLUS or a MINUS, see if we can
2308 	 simplify this by the associative law.
2309 	 Don't use the associative law for floating point.
2310 	 The inaccuracy makes it nonassociative,
2311 	 and subtle programs can break if operations are associated.  */
2312 
2313       if (INTEGRAL_MODE_P (mode)
2314 	  && (plus_minus_operand_p (op0)
2315 	      || plus_minus_operand_p (op1))
2316 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2317 	return tem;
2318 
2319       /* Reassociate floating point addition only when the user
2320 	 specifies associative math operations.  */
2321       if (FLOAT_MODE_P (mode)
2322 	  && flag_associative_math)
2323 	{
2324 	  tem = simplify_associative_operation (code, mode, op0, op1);
2325 	  if (tem)
2326 	    return tem;
2327 	}
2328       break;
2329 
2330     case COMPARE:
2331       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2332       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2333 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2334 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2335 	{
2336 	  rtx xop00 = XEXP (op0, 0);
2337 	  rtx xop10 = XEXP (op1, 0);
2338 
2339 #ifdef HAVE_cc0
2340 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2341 #else
2342 	    if (REG_P (xop00) && REG_P (xop10)
2343 		&& GET_MODE (xop00) == GET_MODE (xop10)
2344 		&& REGNO (xop00) == REGNO (xop10)
2345 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2346 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2347 #endif
2348 	      return xop00;
2349 	}
2350       break;
2351 
2352     case MINUS:
2353       /* We can't assume x-x is 0 even with non-IEEE floating point,
2354 	 but since it is zero except in very strange circumstances, we
2355 	 will treat it as zero with -ffinite-math-only.  */
2356       if (rtx_equal_p (trueop0, trueop1)
2357 	  && ! side_effects_p (op0)
2358 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2359 	return CONST0_RTX (mode);
2360 
2361       /* Change subtraction from zero into negation.  (0 - x) is the
2362 	 same as -x when x is NaN, infinite, or finite and nonzero.
2363 	 But if the mode has signed zeros, and does not round towards
2364 	 -infinity, then 0 - 0 is 0, not -0.  */
2365       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2366 	return simplify_gen_unary (NEG, mode, op1, mode);
2367 
2368       /* (-1 - a) is ~a.  */
2369       if (trueop0 == constm1_rtx)
2370 	return simplify_gen_unary (NOT, mode, op1, mode);
2371 
2372       /* Subtracting 0 has no effect unless the mode has signed zeros
2373 	 and supports rounding towards -infinity.  In such a case,
2374 	 0 - 0 is -0.  */
2375       if (!(HONOR_SIGNED_ZEROS (mode)
2376 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2377 	  && trueop1 == CONST0_RTX (mode))
2378 	return op0;
2379 
2380       /* See if this is something like X * C - X or vice versa or
2381 	 if the multiplication is written as a shift.  If so, we can
2382 	 distribute and make a new multiply, shift, or maybe just
2383 	 have X (if C is 2 in the example above).  But don't make
2384 	 something more expensive than we had before.  */
2385 
2386       if (SCALAR_INT_MODE_P (mode))
2387 	{
2388 	  double_int coeff0, negcoeff1;
2389 	  rtx lhs = op0, rhs = op1;
2390 
2391 	  coeff0 = double_int_one;
2392 	  negcoeff1 = double_int_minus_one;
2393 
2394 	  if (GET_CODE (lhs) == NEG)
2395 	    {
2396 	      coeff0 = double_int_minus_one;
2397 	      lhs = XEXP (lhs, 0);
2398 	    }
2399 	  else if (GET_CODE (lhs) == MULT
2400 		   && CONST_INT_P (XEXP (lhs, 1)))
2401 	    {
2402 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2403 	      lhs = XEXP (lhs, 0);
2404 	    }
2405 	  else if (GET_CODE (lhs) == ASHIFT
2406 		   && CONST_INT_P (XEXP (lhs, 1))
2407 		   && INTVAL (XEXP (lhs, 1)) >= 0
2408 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2409 	    {
2410 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2411 	      lhs = XEXP (lhs, 0);
2412 	    }
2413 
2414 	  if (GET_CODE (rhs) == NEG)
2415 	    {
2416 	      negcoeff1 = double_int_one;
2417 	      rhs = XEXP (rhs, 0);
2418 	    }
2419 	  else if (GET_CODE (rhs) == MULT
2420 		   && CONST_INT_P (XEXP (rhs, 1)))
2421 	    {
2422 	      negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2423 	      rhs = XEXP (rhs, 0);
2424 	    }
2425 	  else if (GET_CODE (rhs) == ASHIFT
2426 		   && CONST_INT_P (XEXP (rhs, 1))
2427 		   && INTVAL (XEXP (rhs, 1)) >= 0
2428 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2429 	    {
2430 	      negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2431 	      negcoeff1 = -negcoeff1;
2432 	      rhs = XEXP (rhs, 0);
2433 	    }
2434 
2435 	  if (rtx_equal_p (lhs, rhs))
2436 	    {
2437 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2438 	      rtx coeff;
2439 	      double_int val;
2440 	      bool speed = optimize_function_for_speed_p (cfun);
2441 
2442 	      val = coeff0 + negcoeff1;
2443 	      coeff = immed_double_int_const (val, mode);
2444 
2445 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2446 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2447 		? tem : 0;
2448 	    }
2449 	}
2450 
2451       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2452       if (GET_CODE (op1) == NEG)
2453 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2454 
2455       /* (-x - c) may be simplified as (-c - x).  */
2456       if (GET_CODE (op0) == NEG
2457 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2458 	{
2459 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2460 	  if (tem)
2461 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2462 	}
2463 
2464       /* Don't let a relocatable value get a negative coeff.  */
2465       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2466 	return simplify_gen_binary (PLUS, mode,
2467 				    op0,
2468 				    neg_const_int (mode, op1));
2469 
2470       /* (x - (x & y)) -> (x & ~y) */
2471       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2472 	{
2473 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2474 	    {
2475 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2476 					GET_MODE (XEXP (op1, 1)));
2477 	      return simplify_gen_binary (AND, mode, op0, tem);
2478 	    }
2479 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2480 	    {
2481 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2482 					GET_MODE (XEXP (op1, 0)));
2483 	      return simplify_gen_binary (AND, mode, op0, tem);
2484 	    }
2485 	}
2486 
2487       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2488 	 by reversing the comparison code if valid.  */
2489       if (STORE_FLAG_VALUE == 1
2490 	  && trueop0 == const1_rtx
2491 	  && COMPARISON_P (op1)
2492 	  && (reversed = reversed_comparison (op1, mode)))
2493 	return reversed;
2494 
2495       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2496       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2497 	  && GET_CODE (op1) == MULT
2498 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2499 	{
2500 	  rtx in1, in2;
2501 
2502 	  in1 = XEXP (XEXP (op1, 0), 0);
2503 	  in2 = XEXP (op1, 1);
2504 	  return simplify_gen_binary (PLUS, mode,
2505 				      simplify_gen_binary (MULT, mode,
2506 							   in1, in2),
2507 				      op0);
2508 	}
2509 
2510       /* Canonicalize (minus (neg A) (mult B C)) to
2511 	 (minus (mult (neg B) C) A).  */
2512       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2513 	  && GET_CODE (op1) == MULT
2514 	  && GET_CODE (op0) == NEG)
2515 	{
2516 	  rtx in1, in2;
2517 
2518 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2519 	  in2 = XEXP (op1, 1);
2520 	  return simplify_gen_binary (MINUS, mode,
2521 				      simplify_gen_binary (MULT, mode,
2522 							   in1, in2),
2523 				      XEXP (op0, 0));
2524 	}
2525 
2526       /* If one of the operands is a PLUS or a MINUS, see if we can
2527 	 simplify this by the associative law.  This will, for example,
2528          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2529 	 Don't use the associative law for floating point.
2530 	 The inaccuracy makes it nonassociative,
2531 	 and subtle programs can break if operations are associated.  */
2532 
2533       if (INTEGRAL_MODE_P (mode)
2534 	  && (plus_minus_operand_p (op0)
2535 	      || plus_minus_operand_p (op1))
2536 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2537 	return tem;
2538       break;
2539 
2540     case MULT:
2541       if (trueop1 == constm1_rtx)
2542 	return simplify_gen_unary (NEG, mode, op0, mode);
2543 
2544       if (GET_CODE (op0) == NEG)
2545 	{
2546 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2547 	  /* If op1 is a MULT as well and simplify_unary_operation
2548 	     just moved the NEG to the second operand, simplify_gen_binary
2549 	     below could through simplify_associative_operation move
2550 	     the NEG around again and recurse endlessly.  */
2551 	  if (temp
2552 	      && GET_CODE (op1) == MULT
2553 	      && GET_CODE (temp) == MULT
2554 	      && XEXP (op1, 0) == XEXP (temp, 0)
2555 	      && GET_CODE (XEXP (temp, 1)) == NEG
2556 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2557 	    temp = NULL_RTX;
2558 	  if (temp)
2559 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2560 	}
2561       if (GET_CODE (op1) == NEG)
2562 	{
2563 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2564 	  /* If op0 is a MULT as well and simplify_unary_operation
2565 	     just moved the NEG to the second operand, simplify_gen_binary
2566 	     below could through simplify_associative_operation move
2567 	     the NEG around again and recurse endlessly.  */
2568 	  if (temp
2569 	      && GET_CODE (op0) == MULT
2570 	      && GET_CODE (temp) == MULT
2571 	      && XEXP (op0, 0) == XEXP (temp, 0)
2572 	      && GET_CODE (XEXP (temp, 1)) == NEG
2573 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2574 	    temp = NULL_RTX;
2575 	  if (temp)
2576 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2577 	}
2578 
2579       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2580 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2581 	 when the mode has signed zeros, since multiplying a negative
2582 	 number by 0 will give -0, not 0.  */
2583       if (!HONOR_NANS (mode)
2584 	  && !HONOR_SIGNED_ZEROS (mode)
2585 	  && trueop1 == CONST0_RTX (mode)
2586 	  && ! side_effects_p (op0))
2587 	return op1;
2588 
2589       /* In IEEE floating point, x*1 is not equivalent to x for
2590 	 signalling NaNs.  */
2591       if (!HONOR_SNANS (mode)
2592 	  && trueop1 == CONST1_RTX (mode))
2593 	return op0;
2594 
2595       /* Convert multiply by constant power of two into shift unless
2596 	 we are still generating RTL.  This test is a kludge.  */
2597       if (CONST_INT_P (trueop1)
2598 	  && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2599 	  /* If the mode is larger than the host word size, and the
2600 	     uppermost bit is set, then this isn't a power of two due
2601 	     to implicit sign extension.  */
2602 	  && (width <= HOST_BITS_PER_WIDE_INT
2603 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2604 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2605 
2606       /* Likewise for multipliers wider than a word.  */
2607       if (CONST_DOUBLE_AS_INT_P (trueop1)
2608 	  && GET_MODE (op0) == mode
2609 	  && CONST_DOUBLE_LOW (trueop1) == 0
2610 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2611 	  && (val < HOST_BITS_PER_DOUBLE_INT - 1
2612 	      || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2613 	return simplify_gen_binary (ASHIFT, mode, op0,
2614 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2615 
2616       /* x*2 is x+x and x*(-1) is -x */
2617       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2618 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2619 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2620 	  && GET_MODE (op0) == mode)
2621 	{
2622 	  REAL_VALUE_TYPE d;
2623 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2624 
2625 	  if (REAL_VALUES_EQUAL (d, dconst2))
2626 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2627 
2628 	  if (!HONOR_SNANS (mode)
2629 	      && REAL_VALUES_EQUAL (d, dconstm1))
2630 	    return simplify_gen_unary (NEG, mode, op0, mode);
2631 	}
2632 
2633       /* Optimize -x * -x as x * x.  */
2634       if (FLOAT_MODE_P (mode)
2635 	  && GET_CODE (op0) == NEG
2636 	  && GET_CODE (op1) == NEG
2637 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2638 	  && !side_effects_p (XEXP (op0, 0)))
2639 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2640 
2641       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2642       if (SCALAR_FLOAT_MODE_P (mode)
2643 	  && GET_CODE (op0) == ABS
2644 	  && GET_CODE (op1) == ABS
2645 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2646 	  && !side_effects_p (XEXP (op0, 0)))
2647 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2648 
2649       /* Reassociate multiplication, but for floating point MULTs
2650 	 only when the user specifies unsafe math optimizations.  */
2651       if (! FLOAT_MODE_P (mode)
2652 	  || flag_unsafe_math_optimizations)
2653 	{
2654 	  tem = simplify_associative_operation (code, mode, op0, op1);
2655 	  if (tem)
2656 	    return tem;
2657 	}
2658       break;
2659 
2660     case IOR:
2661       if (trueop1 == CONST0_RTX (mode))
2662 	return op0;
2663       if (INTEGRAL_MODE_P (mode)
2664 	  && trueop1 == CONSTM1_RTX (mode)
2665 	  && !side_effects_p (op0))
2666 	return op1;
2667       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2668 	return op0;
2669       /* A | (~A) -> -1 */
2670       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2671 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2672 	  && ! side_effects_p (op0)
2673 	  && SCALAR_INT_MODE_P (mode))
2674 	return constm1_rtx;
2675 
2676       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2677       if (CONST_INT_P (op1)
2678 	  && HWI_COMPUTABLE_MODE_P (mode)
2679 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2680 	  && !side_effects_p (op0))
2681 	return op1;
2682 
2683       /* Canonicalize (X & C1) | C2.  */
2684       if (GET_CODE (op0) == AND
2685 	  && CONST_INT_P (trueop1)
2686 	  && CONST_INT_P (XEXP (op0, 1)))
2687 	{
2688 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2689 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2690 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2691 
2692 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2693 	  if ((c1 & c2) == c1
2694 	      && !side_effects_p (XEXP (op0, 0)))
2695 	    return trueop1;
2696 
2697 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2698 	  if (((c1|c2) & mask) == mask)
2699 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2700 
2701 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2702 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2703 	    {
2704 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2705 					 gen_int_mode (c1 & ~c2, mode));
2706 	      return simplify_gen_binary (IOR, mode, tem, op1);
2707 	    }
2708 	}
2709 
2710       /* Convert (A & B) | A to A.  */
2711       if (GET_CODE (op0) == AND
2712 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2713 	      || rtx_equal_p (XEXP (op0, 1), op1))
2714 	  && ! side_effects_p (XEXP (op0, 0))
2715 	  && ! side_effects_p (XEXP (op0, 1)))
2716 	return op1;
2717 
2718       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2719          mode size to (rotate A CX).  */
2720 
2721       if (GET_CODE (op1) == ASHIFT
2722           || GET_CODE (op1) == SUBREG)
2723         {
2724 	  opleft = op1;
2725 	  opright = op0;
2726 	}
2727       else
2728         {
2729 	  opright = op1;
2730 	  opleft = op0;
2731 	}
2732 
2733       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2734           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2735           && CONST_INT_P (XEXP (opleft, 1))
2736           && CONST_INT_P (XEXP (opright, 1))
2737           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2738               == GET_MODE_PRECISION (mode)))
2739         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2740 
2741       /* Same, but for ashift that has been "simplified" to a wider mode
2742         by simplify_shift_const.  */
2743 
2744       if (GET_CODE (opleft) == SUBREG
2745           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2746           && GET_CODE (opright) == LSHIFTRT
2747           && GET_CODE (XEXP (opright, 0)) == SUBREG
2748           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2749           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2750           && (GET_MODE_SIZE (GET_MODE (opleft))
2751               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2752           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2753                           SUBREG_REG (XEXP (opright, 0)))
2754           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2755           && CONST_INT_P (XEXP (opright, 1))
2756           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2757               == GET_MODE_PRECISION (mode)))
2758         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2759                                XEXP (SUBREG_REG (opleft), 1));
2760 
2761       /* If we have (ior (and (X C1) C2)), simplify this by making
2762 	 C1 as small as possible if C1 actually changes.  */
2763       if (CONST_INT_P (op1)
2764 	  && (HWI_COMPUTABLE_MODE_P (mode)
2765 	      || INTVAL (op1) > 0)
2766 	  && GET_CODE (op0) == AND
2767 	  && CONST_INT_P (XEXP (op0, 1))
2768 	  && CONST_INT_P (op1)
2769 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2770 	return simplify_gen_binary (IOR, mode,
2771 				    simplify_gen_binary
2772 					  (AND, mode, XEXP (op0, 0),
2773 					   GEN_INT (UINTVAL (XEXP (op0, 1))
2774 						    & ~UINTVAL (op1))),
2775 				    op1);
2776 
2777       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2778          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2779 	 the PLUS does not affect any of the bits in OP1: then we can do
2780 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2781          can be safely shifted left C bits.  */
2782       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2783           && GET_CODE (XEXP (op0, 0)) == PLUS
2784           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2785           && CONST_INT_P (XEXP (op0, 1))
2786           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2787         {
2788           int count = INTVAL (XEXP (op0, 1));
2789           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2790 
2791           if (mask >> count == INTVAL (trueop1)
2792 	      && trunc_int_for_mode (mask, mode) == mask
2793               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2794 	    return simplify_gen_binary (ASHIFTRT, mode,
2795 					plus_constant (mode, XEXP (op0, 0),
2796 						       mask),
2797 					XEXP (op0, 1));
2798         }
2799 
2800       tem = simplify_associative_operation (code, mode, op0, op1);
2801       if (tem)
2802 	return tem;
2803       break;
2804 
2805     case XOR:
2806       if (trueop1 == CONST0_RTX (mode))
2807 	return op0;
2808       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2809 	return simplify_gen_unary (NOT, mode, op0, mode);
2810       if (rtx_equal_p (trueop0, trueop1)
2811 	  && ! side_effects_p (op0)
2812 	  && GET_MODE_CLASS (mode) != MODE_CC)
2813 	 return CONST0_RTX (mode);
2814 
2815       /* Canonicalize XOR of the most significant bit to PLUS.  */
2816       if (CONST_SCALAR_INT_P (op1)
2817 	  && mode_signbit_p (mode, op1))
2818 	return simplify_gen_binary (PLUS, mode, op0, op1);
2819       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2820       if (CONST_SCALAR_INT_P (op1)
2821 	  && GET_CODE (op0) == PLUS
2822 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2823 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2824 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2825 				    simplify_gen_binary (XOR, mode, op1,
2826 							 XEXP (op0, 1)));
2827 
2828       /* If we are XORing two things that have no bits in common,
2829 	 convert them into an IOR.  This helps to detect rotation encoded
2830 	 using those methods and possibly other simplifications.  */
2831 
2832       if (HWI_COMPUTABLE_MODE_P (mode)
2833 	  && (nonzero_bits (op0, mode)
2834 	      & nonzero_bits (op1, mode)) == 0)
2835 	return (simplify_gen_binary (IOR, mode, op0, op1));
2836 
2837       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2838 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2839 	 (NOT y).  */
2840       {
2841 	int num_negated = 0;
2842 
2843 	if (GET_CODE (op0) == NOT)
2844 	  num_negated++, op0 = XEXP (op0, 0);
2845 	if (GET_CODE (op1) == NOT)
2846 	  num_negated++, op1 = XEXP (op1, 0);
2847 
2848 	if (num_negated == 2)
2849 	  return simplify_gen_binary (XOR, mode, op0, op1);
2850 	else if (num_negated == 1)
2851 	  return simplify_gen_unary (NOT, mode,
2852 				     simplify_gen_binary (XOR, mode, op0, op1),
2853 				     mode);
2854       }
2855 
2856       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2857 	 correspond to a machine insn or result in further simplifications
2858 	 if B is a constant.  */
2859 
2860       if (GET_CODE (op0) == AND
2861 	  && rtx_equal_p (XEXP (op0, 1), op1)
2862 	  && ! side_effects_p (op1))
2863 	return simplify_gen_binary (AND, mode,
2864 				    simplify_gen_unary (NOT, mode,
2865 							XEXP (op0, 0), mode),
2866 				    op1);
2867 
2868       else if (GET_CODE (op0) == AND
2869 	       && rtx_equal_p (XEXP (op0, 0), op1)
2870 	       && ! side_effects_p (op1))
2871 	return simplify_gen_binary (AND, mode,
2872 				    simplify_gen_unary (NOT, mode,
2873 							XEXP (op0, 1), mode),
2874 				    op1);
2875 
2876       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2877 	 we can transform like this:
2878             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2879                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2880                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2881 	 Attempt a few simplifications when B and C are both constants.  */
2882       if (GET_CODE (op0) == AND
2883 	  && CONST_INT_P (op1)
2884 	  && CONST_INT_P (XEXP (op0, 1)))
2885 	{
2886 	  rtx a = XEXP (op0, 0);
2887 	  rtx b = XEXP (op0, 1);
2888 	  rtx c = op1;
2889 	  HOST_WIDE_INT bval = INTVAL (b);
2890 	  HOST_WIDE_INT cval = INTVAL (c);
2891 
2892 	  rtx na_c
2893 	    = simplify_binary_operation (AND, mode,
2894 					 simplify_gen_unary (NOT, mode, a, mode),
2895 					 c);
2896 	  if ((~cval & bval) == 0)
2897 	    {
2898 	      /* Try to simplify ~A&C | ~B&C.  */
2899 	      if (na_c != NULL_RTX)
2900 		return simplify_gen_binary (IOR, mode, na_c,
2901 					    GEN_INT (~bval & cval));
2902 	    }
2903 	  else
2904 	    {
2905 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2906 	      if (na_c == const0_rtx)
2907 		{
2908 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2909 						    GEN_INT (~cval & bval));
2910 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2911 					      GEN_INT (~bval & cval));
2912 		}
2913 	    }
2914 	}
2915 
2916       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2917 	 comparison if STORE_FLAG_VALUE is 1.  */
2918       if (STORE_FLAG_VALUE == 1
2919 	  && trueop1 == const1_rtx
2920 	  && COMPARISON_P (op0)
2921 	  && (reversed = reversed_comparison (op0, mode)))
2922 	return reversed;
2923 
2924       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2925 	 is (lt foo (const_int 0)), so we can perform the above
2926 	 simplification if STORE_FLAG_VALUE is 1.  */
2927 
2928       if (STORE_FLAG_VALUE == 1
2929 	  && trueop1 == const1_rtx
2930 	  && GET_CODE (op0) == LSHIFTRT
2931 	  && CONST_INT_P (XEXP (op0, 1))
2932 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2933 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2934 
2935       /* (xor (comparison foo bar) (const_int sign-bit))
2936 	 when STORE_FLAG_VALUE is the sign bit.  */
2937       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2938 	  && trueop1 == const_true_rtx
2939 	  && COMPARISON_P (op0)
2940 	  && (reversed = reversed_comparison (op0, mode)))
2941 	return reversed;
2942 
2943       tem = simplify_associative_operation (code, mode, op0, op1);
2944       if (tem)
2945 	return tem;
2946       break;
2947 
2948     case AND:
2949       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2950 	return trueop1;
2951       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2952 	return op0;
2953       if (HWI_COMPUTABLE_MODE_P (mode))
2954 	{
2955 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2956 	  HOST_WIDE_INT nzop1;
2957 	  if (CONST_INT_P (trueop1))
2958 	    {
2959 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2960 	      /* If we are turning off bits already known off in OP0, we need
2961 		 not do an AND.  */
2962 	      if ((nzop0 & ~val1) == 0)
2963 		return op0;
2964 	    }
2965 	  nzop1 = nonzero_bits (trueop1, mode);
2966 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2967 	  if ((nzop1 & nzop0) == 0
2968 	      && !side_effects_p (op0) && !side_effects_p (op1))
2969 	    return CONST0_RTX (mode);
2970 	}
2971       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2972 	  && GET_MODE_CLASS (mode) != MODE_CC)
2973 	return op0;
2974       /* A & (~A) -> 0 */
2975       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2976 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2977 	  && ! side_effects_p (op0)
2978 	  && GET_MODE_CLASS (mode) != MODE_CC)
2979 	return CONST0_RTX (mode);
2980 
2981       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2982 	 there are no nonzero bits of C outside of X's mode.  */
2983       if ((GET_CODE (op0) == SIGN_EXTEND
2984 	   || GET_CODE (op0) == ZERO_EXTEND)
2985 	  && CONST_INT_P (trueop1)
2986 	  && HWI_COMPUTABLE_MODE_P (mode)
2987 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2988 	      & UINTVAL (trueop1)) == 0)
2989 	{
2990 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2991 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2992 				     gen_int_mode (INTVAL (trueop1),
2993 						   imode));
2994 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2995 	}
2996 
2997       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2998 	 we might be able to further simplify the AND with X and potentially
2999 	 remove the truncation altogether.  */
3000       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3001 	{
3002 	  rtx x = XEXP (op0, 0);
3003 	  enum machine_mode xmode = GET_MODE (x);
3004 	  tem = simplify_gen_binary (AND, xmode, x,
3005 				     gen_int_mode (INTVAL (trueop1), xmode));
3006 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3007 	}
3008 
3009       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3010       if (GET_CODE (op0) == IOR
3011 	  && CONST_INT_P (trueop1)
3012 	  && CONST_INT_P (XEXP (op0, 1)))
3013 	{
3014 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3015 	  return simplify_gen_binary (IOR, mode,
3016 				      simplify_gen_binary (AND, mode,
3017 							   XEXP (op0, 0), op1),
3018 				      gen_int_mode (tmp, mode));
3019 	}
3020 
3021       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3022 	 insn (and may simplify more).  */
3023       if (GET_CODE (op0) == XOR
3024 	  && rtx_equal_p (XEXP (op0, 0), op1)
3025 	  && ! side_effects_p (op1))
3026 	return simplify_gen_binary (AND, mode,
3027 				    simplify_gen_unary (NOT, mode,
3028 							XEXP (op0, 1), mode),
3029 				    op1);
3030 
3031       if (GET_CODE (op0) == XOR
3032 	  && rtx_equal_p (XEXP (op0, 1), op1)
3033 	  && ! side_effects_p (op1))
3034 	return simplify_gen_binary (AND, mode,
3035 				    simplify_gen_unary (NOT, mode,
3036 							XEXP (op0, 0), mode),
3037 				    op1);
3038 
3039       /* Similarly for (~(A ^ B)) & A.  */
3040       if (GET_CODE (op0) == NOT
3041 	  && GET_CODE (XEXP (op0, 0)) == XOR
3042 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3043 	  && ! side_effects_p (op1))
3044 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3045 
3046       if (GET_CODE (op0) == NOT
3047 	  && GET_CODE (XEXP (op0, 0)) == XOR
3048 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3049 	  && ! side_effects_p (op1))
3050 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3051 
3052       /* Convert (A | B) & A to A.  */
3053       if (GET_CODE (op0) == IOR
3054 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3055 	      || rtx_equal_p (XEXP (op0, 1), op1))
3056 	  && ! side_effects_p (XEXP (op0, 0))
3057 	  && ! side_effects_p (XEXP (op0, 1)))
3058 	return op1;
3059 
3060       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3061 	 ((A & N) + B) & M -> (A + B) & M
3062 	 Similarly if (N & M) == 0,
3063 	 ((A | N) + B) & M -> (A + B) & M
3064 	 and for - instead of + and/or ^ instead of |.
3065          Also, if (N & M) == 0, then
3066 	 (A +- N) & M -> A & M.  */
3067       if (CONST_INT_P (trueop1)
3068 	  && HWI_COMPUTABLE_MODE_P (mode)
3069 	  && ~UINTVAL (trueop1)
3070 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3071 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3072 	{
3073 	  rtx pmop[2];
3074 	  int which;
3075 
3076 	  pmop[0] = XEXP (op0, 0);
3077 	  pmop[1] = XEXP (op0, 1);
3078 
3079 	  if (CONST_INT_P (pmop[1])
3080 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3081 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3082 
3083 	  for (which = 0; which < 2; which++)
3084 	    {
3085 	      tem = pmop[which];
3086 	      switch (GET_CODE (tem))
3087 		{
3088 		case AND:
3089 		  if (CONST_INT_P (XEXP (tem, 1))
3090 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3091 		      == UINTVAL (trueop1))
3092 		    pmop[which] = XEXP (tem, 0);
3093 		  break;
3094 		case IOR:
3095 		case XOR:
3096 		  if (CONST_INT_P (XEXP (tem, 1))
3097 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3098 		    pmop[which] = XEXP (tem, 0);
3099 		  break;
3100 		default:
3101 		  break;
3102 		}
3103 	    }
3104 
3105 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3106 	    {
3107 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3108 					 pmop[0], pmop[1]);
3109 	      return simplify_gen_binary (code, mode, tem, op1);
3110 	    }
3111 	}
3112 
3113       /* (and X (ior (not X) Y) -> (and X Y) */
3114       if (GET_CODE (op1) == IOR
3115 	  && GET_CODE (XEXP (op1, 0)) == NOT
3116 	  && op0 == XEXP (XEXP (op1, 0), 0))
3117        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3118 
3119       /* (and (ior (not X) Y) X) -> (and X Y) */
3120       if (GET_CODE (op0) == IOR
3121 	  && GET_CODE (XEXP (op0, 0)) == NOT
3122 	  && op1 == XEXP (XEXP (op0, 0), 0))
3123 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3124 
3125       tem = simplify_associative_operation (code, mode, op0, op1);
3126       if (tem)
3127 	return tem;
3128       break;
3129 
3130     case UDIV:
3131       /* 0/x is 0 (or x&0 if x has side-effects).  */
3132       if (trueop0 == CONST0_RTX (mode))
3133 	{
3134 	  if (side_effects_p (op1))
3135 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3136 	  return trueop0;
3137 	}
3138       /* x/1 is x.  */
3139       if (trueop1 == CONST1_RTX (mode))
3140 	{
3141 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3142 	  if (tem)
3143 	    return tem;
3144 	}
3145       /* Convert divide by power of two into shift.  */
3146       if (CONST_INT_P (trueop1)
3147 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3148 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3149       break;
3150 
3151     case DIV:
3152       /* Handle floating point and integers separately.  */
3153       if (SCALAR_FLOAT_MODE_P (mode))
3154 	{
3155 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3156 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3157 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3158 	     zeros, since dividing 0 by a negative number gives -0.0  */
3159 	  if (trueop0 == CONST0_RTX (mode)
3160 	      && !HONOR_NANS (mode)
3161 	      && !HONOR_SIGNED_ZEROS (mode)
3162 	      && ! side_effects_p (op1))
3163 	    return op0;
3164 	  /* x/1.0 is x.  */
3165 	  if (trueop1 == CONST1_RTX (mode)
3166 	      && !HONOR_SNANS (mode))
3167 	    return op0;
3168 
3169 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3170 	      && trueop1 != CONST0_RTX (mode))
3171 	    {
3172 	      REAL_VALUE_TYPE d;
3173 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3174 
3175 	      /* x/-1.0 is -x.  */
3176 	      if (REAL_VALUES_EQUAL (d, dconstm1)
3177 		  && !HONOR_SNANS (mode))
3178 		return simplify_gen_unary (NEG, mode, op0, mode);
3179 
3180 	      /* Change FP division by a constant into multiplication.
3181 		 Only do this with -freciprocal-math.  */
3182 	      if (flag_reciprocal_math
3183 		  && !REAL_VALUES_EQUAL (d, dconst0))
3184 		{
3185 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3186 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3187 		  return simplify_gen_binary (MULT, mode, op0, tem);
3188 		}
3189 	    }
3190 	}
3191       else if (SCALAR_INT_MODE_P (mode))
3192 	{
3193 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3194 	  if (trueop0 == CONST0_RTX (mode)
3195 	      && !cfun->can_throw_non_call_exceptions)
3196 	    {
3197 	      if (side_effects_p (op1))
3198 		return simplify_gen_binary (AND, mode, op1, trueop0);
3199 	      return trueop0;
3200 	    }
3201 	  /* x/1 is x.  */
3202 	  if (trueop1 == CONST1_RTX (mode))
3203 	    {
3204 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3205 	      if (tem)
3206 		return tem;
3207 	    }
3208 	  /* x/-1 is -x.  */
3209 	  if (trueop1 == constm1_rtx)
3210 	    {
3211 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3212 	      if (x)
3213 		return simplify_gen_unary (NEG, mode, x, mode);
3214 	    }
3215 	}
3216       break;
3217 
3218     case UMOD:
3219       /* 0%x is 0 (or x&0 if x has side-effects).  */
3220       if (trueop0 == CONST0_RTX (mode))
3221 	{
3222 	  if (side_effects_p (op1))
3223 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3224 	  return trueop0;
3225 	}
3226       /* x%1 is 0 (of x&0 if x has side-effects).  */
3227       if (trueop1 == CONST1_RTX (mode))
3228 	{
3229 	  if (side_effects_p (op0))
3230 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3231 	  return CONST0_RTX (mode);
3232 	}
3233       /* Implement modulus by power of two as AND.  */
3234       if (CONST_INT_P (trueop1)
3235 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3236 	return simplify_gen_binary (AND, mode, op0,
3237 				    GEN_INT (INTVAL (op1) - 1));
3238       break;
3239 
3240     case MOD:
3241       /* 0%x is 0 (or x&0 if x has side-effects).  */
3242       if (trueop0 == CONST0_RTX (mode))
3243 	{
3244 	  if (side_effects_p (op1))
3245 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3246 	  return trueop0;
3247 	}
3248       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3249       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3250 	{
3251 	  if (side_effects_p (op0))
3252 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3253 	  return CONST0_RTX (mode);
3254 	}
3255       break;
3256 
3257     case ROTATERT:
3258     case ROTATE:
3259     case ASHIFTRT:
3260       if (trueop1 == CONST0_RTX (mode))
3261 	return op0;
3262       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3263 	return op0;
3264       /* Rotating ~0 always results in ~0.  */
3265       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3266 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3267 	  && ! side_effects_p (op1))
3268 	return op0;
3269     canonicalize_shift:
3270       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3271 	{
3272 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3273 	  if (val != INTVAL (op1))
3274 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3275 	}
3276       break;
3277 
3278     case ASHIFT:
3279     case SS_ASHIFT:
3280     case US_ASHIFT:
3281       if (trueop1 == CONST0_RTX (mode))
3282 	return op0;
3283       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3284 	return op0;
3285       goto canonicalize_shift;
3286 
3287     case LSHIFTRT:
3288       if (trueop1 == CONST0_RTX (mode))
3289 	return op0;
3290       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3291 	return op0;
3292       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3293       if (GET_CODE (op0) == CLZ
3294 	  && CONST_INT_P (trueop1)
3295 	  && STORE_FLAG_VALUE == 1
3296 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3297 	{
3298 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3299 	  unsigned HOST_WIDE_INT zero_val = 0;
3300 
3301 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3302 	      && zero_val == GET_MODE_PRECISION (imode)
3303 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3304 	    return simplify_gen_relational (EQ, mode, imode,
3305 					    XEXP (op0, 0), const0_rtx);
3306 	}
3307       goto canonicalize_shift;
3308 
3309     case SMIN:
3310       if (width <= HOST_BITS_PER_WIDE_INT
3311 	  && mode_signbit_p (mode, trueop1)
3312 	  && ! side_effects_p (op0))
3313 	return op1;
3314       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3315 	return op0;
3316       tem = simplify_associative_operation (code, mode, op0, op1);
3317       if (tem)
3318 	return tem;
3319       break;
3320 
3321     case SMAX:
3322       if (width <= HOST_BITS_PER_WIDE_INT
3323 	  && CONST_INT_P (trueop1)
3324 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3325 	  && ! side_effects_p (op0))
3326 	return op1;
3327       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3328 	return op0;
3329       tem = simplify_associative_operation (code, mode, op0, op1);
3330       if (tem)
3331 	return tem;
3332       break;
3333 
3334     case UMIN:
3335       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3336 	return op1;
3337       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3338 	return op0;
3339       tem = simplify_associative_operation (code, mode, op0, op1);
3340       if (tem)
3341 	return tem;
3342       break;
3343 
3344     case UMAX:
3345       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3346 	return op1;
3347       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3348 	return op0;
3349       tem = simplify_associative_operation (code, mode, op0, op1);
3350       if (tem)
3351 	return tem;
3352       break;
3353 
3354     case SS_PLUS:
3355     case US_PLUS:
3356     case SS_MINUS:
3357     case US_MINUS:
3358     case SS_MULT:
3359     case US_MULT:
3360     case SS_DIV:
3361     case US_DIV:
3362       /* ??? There are simplifications that can be done.  */
3363       return 0;
3364 
3365     case VEC_SELECT:
3366       if (!VECTOR_MODE_P (mode))
3367 	{
3368 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3369 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3370 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3371 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3372 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3373 
3374 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3375 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3376 						      (trueop1, 0, 0)));
3377 
3378 	  /* Extract a scalar element from a nested VEC_SELECT expression
3379 	     (with optional nested VEC_CONCAT expression).  Some targets
3380 	     (i386) extract scalar element from a vector using chain of
3381 	     nested VEC_SELECT expressions.  When input operand is a memory
3382 	     operand, this operation can be simplified to a simple scalar
3383 	     load from an offseted memory address.  */
3384 	  if (GET_CODE (trueop0) == VEC_SELECT)
3385 	    {
3386 	      rtx op0 = XEXP (trueop0, 0);
3387 	      rtx op1 = XEXP (trueop0, 1);
3388 
3389 	      enum machine_mode opmode = GET_MODE (op0);
3390 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3391 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3392 
3393 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3394 	      int elem;
3395 
3396 	      rtvec vec;
3397 	      rtx tmp_op, tmp;
3398 
3399 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3400 	      gcc_assert (i < n_elts);
3401 
3402 	      /* Select element, pointed by nested selector.  */
3403 	      elem = INTVAL (XVECEXP (op1, 0, i));
3404 
3405 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3406 	      if (GET_CODE (op0) == VEC_CONCAT)
3407 		{
3408 		  rtx op00 = XEXP (op0, 0);
3409 		  rtx op01 = XEXP (op0, 1);
3410 
3411 		  enum machine_mode mode00, mode01;
3412 		  int n_elts00, n_elts01;
3413 
3414 		  mode00 = GET_MODE (op00);
3415 		  mode01 = GET_MODE (op01);
3416 
3417 		  /* Find out number of elements of each operand.  */
3418 		  if (VECTOR_MODE_P (mode00))
3419 		    {
3420 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3421 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3422 		    }
3423 		  else
3424 		    n_elts00 = 1;
3425 
3426 		  if (VECTOR_MODE_P (mode01))
3427 		    {
3428 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3429 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3430 		    }
3431 		  else
3432 		    n_elts01 = 1;
3433 
3434 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3435 
3436 		  /* Select correct operand of VEC_CONCAT
3437 		     and adjust selector. */
3438 		  if (elem < n_elts01)
3439 		    tmp_op = op00;
3440 		  else
3441 		    {
3442 		      tmp_op = op01;
3443 		      elem -= n_elts00;
3444 		    }
3445 		}
3446 	      else
3447 		tmp_op = op0;
3448 
3449 	      vec = rtvec_alloc (1);
3450 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3451 
3452 	      tmp = gen_rtx_fmt_ee (code, mode,
3453 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3454 	      return tmp;
3455 	    }
3456 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3457 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3458 	    return XEXP (trueop0, 0);
3459 	}
3460       else
3461 	{
3462 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3463 	  gcc_assert (GET_MODE_INNER (mode)
3464 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3465 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3466 
3467 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3468 	    {
3469 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3470 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3471 	      rtvec v = rtvec_alloc (n_elts);
3472 	      unsigned int i;
3473 
3474 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3475 	      for (i = 0; i < n_elts; i++)
3476 		{
3477 		  rtx x = XVECEXP (trueop1, 0, i);
3478 
3479 		  gcc_assert (CONST_INT_P (x));
3480 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3481 						       INTVAL (x));
3482 		}
3483 
3484 	      return gen_rtx_CONST_VECTOR (mode, v);
3485 	    }
3486 
3487 	  /* Recognize the identity.  */
3488 	  if (GET_MODE (trueop0) == mode)
3489 	    {
3490 	      bool maybe_ident = true;
3491 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3492 		{
3493 		  rtx j = XVECEXP (trueop1, 0, i);
3494 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3495 		    {
3496 		      maybe_ident = false;
3497 		      break;
3498 		    }
3499 		}
3500 	      if (maybe_ident)
3501 		return trueop0;
3502 	    }
3503 
3504 	  /* If we build {a,b} then permute it, build the result directly.  */
3505 	  if (XVECLEN (trueop1, 0) == 2
3506 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3507 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3508 	      && GET_CODE (trueop0) == VEC_CONCAT
3509 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3510 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3511 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3512 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3513 	    {
3514 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3515 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3516 	      rtx subop0, subop1;
3517 
3518 	      gcc_assert (i0 < 4 && i1 < 4);
3519 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3520 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3521 
3522 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3523 	    }
3524 
3525 	  if (XVECLEN (trueop1, 0) == 2
3526 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3527 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3528 	      && GET_CODE (trueop0) == VEC_CONCAT
3529 	      && GET_MODE (trueop0) == mode)
3530 	    {
3531 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3532 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3533 	      rtx subop0, subop1;
3534 
3535 	      gcc_assert (i0 < 2 && i1 < 2);
3536 	      subop0 = XEXP (trueop0, i0);
3537 	      subop1 = XEXP (trueop0, i1);
3538 
3539 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3540 	    }
3541 	}
3542 
3543       if (XVECLEN (trueop1, 0) == 1
3544 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3545 	  && GET_CODE (trueop0) == VEC_CONCAT)
3546 	{
3547 	  rtx vec = trueop0;
3548 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3549 
3550 	  /* Try to find the element in the VEC_CONCAT.  */
3551 	  while (GET_MODE (vec) != mode
3552 		 && GET_CODE (vec) == VEC_CONCAT)
3553 	    {
3554 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3555 	      if (offset < vec_size)
3556 		vec = XEXP (vec, 0);
3557 	      else
3558 		{
3559 		  offset -= vec_size;
3560 		  vec = XEXP (vec, 1);
3561 		}
3562 	      vec = avoid_constant_pool_reference (vec);
3563 	    }
3564 
3565 	  if (GET_MODE (vec) == mode)
3566 	    return vec;
3567 	}
3568 
3569       return 0;
3570     case VEC_CONCAT:
3571       {
3572 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3573 				      ? GET_MODE (trueop0)
3574 				      : GET_MODE_INNER (mode));
3575 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3576 				      ? GET_MODE (trueop1)
3577 				      : GET_MODE_INNER (mode));
3578 
3579 	gcc_assert (VECTOR_MODE_P (mode));
3580 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3581 		    == GET_MODE_SIZE (mode));
3582 
3583 	if (VECTOR_MODE_P (op0_mode))
3584 	  gcc_assert (GET_MODE_INNER (mode)
3585 		      == GET_MODE_INNER (op0_mode));
3586 	else
3587 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3588 
3589 	if (VECTOR_MODE_P (op1_mode))
3590 	  gcc_assert (GET_MODE_INNER (mode)
3591 		      == GET_MODE_INNER (op1_mode));
3592 	else
3593 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3594 
3595 	if ((GET_CODE (trueop0) == CONST_VECTOR
3596 	     || CONST_SCALAR_INT_P (trueop0)
3597 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3598 	    && (GET_CODE (trueop1) == CONST_VECTOR
3599 		|| CONST_SCALAR_INT_P (trueop1)
3600 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3601 	  {
3602 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3603 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3604 	    rtvec v = rtvec_alloc (n_elts);
3605 	    unsigned int i;
3606 	    unsigned in_n_elts = 1;
3607 
3608 	    if (VECTOR_MODE_P (op0_mode))
3609 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3610 	    for (i = 0; i < n_elts; i++)
3611 	      {
3612 		if (i < in_n_elts)
3613 		  {
3614 		    if (!VECTOR_MODE_P (op0_mode))
3615 		      RTVEC_ELT (v, i) = trueop0;
3616 		    else
3617 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3618 		  }
3619 		else
3620 		  {
3621 		    if (!VECTOR_MODE_P (op1_mode))
3622 		      RTVEC_ELT (v, i) = trueop1;
3623 		    else
3624 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3625 							   i - in_n_elts);
3626 		  }
3627 	      }
3628 
3629 	    return gen_rtx_CONST_VECTOR (mode, v);
3630 	  }
3631 
3632 	/* Try to merge VEC_SELECTs from the same vector into a single one.  */
3633 	if (GET_CODE (trueop0) == VEC_SELECT
3634 	    && GET_CODE (trueop1) == VEC_SELECT
3635 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3636 	  {
3637 	    rtx par0 = XEXP (trueop0, 1);
3638 	    rtx par1 = XEXP (trueop1, 1);
3639 	    int len0 = XVECLEN (par0, 0);
3640 	    int len1 = XVECLEN (par1, 0);
3641 	    rtvec vec = rtvec_alloc (len0 + len1);
3642 	    for (int i = 0; i < len0; i++)
3643 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3644 	    for (int i = 0; i < len1; i++)
3645 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3646 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3647 					gen_rtx_PARALLEL (VOIDmode, vec));
3648 	  }
3649       }
3650       return 0;
3651 
3652     default:
3653       gcc_unreachable ();
3654     }
3655 
3656   return 0;
3657 }
3658 
3659 rtx
3660 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3661 				 rtx op0, rtx op1)
3662 {
3663   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3664   HOST_WIDE_INT val;
3665   unsigned int width = GET_MODE_PRECISION (mode);
3666 
3667   if (VECTOR_MODE_P (mode)
3668       && code != VEC_CONCAT
3669       && GET_CODE (op0) == CONST_VECTOR
3670       && GET_CODE (op1) == CONST_VECTOR)
3671     {
3672       unsigned n_elts = GET_MODE_NUNITS (mode);
3673       enum machine_mode op0mode = GET_MODE (op0);
3674       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3675       enum machine_mode op1mode = GET_MODE (op1);
3676       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3677       rtvec v = rtvec_alloc (n_elts);
3678       unsigned int i;
3679 
3680       gcc_assert (op0_n_elts == n_elts);
3681       gcc_assert (op1_n_elts == n_elts);
3682       for (i = 0; i < n_elts; i++)
3683 	{
3684 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3685 					     CONST_VECTOR_ELT (op0, i),
3686 					     CONST_VECTOR_ELT (op1, i));
3687 	  if (!x)
3688 	    return 0;
3689 	  RTVEC_ELT (v, i) = x;
3690 	}
3691 
3692       return gen_rtx_CONST_VECTOR (mode, v);
3693     }
3694 
3695   if (VECTOR_MODE_P (mode)
3696       && code == VEC_CONCAT
3697       && (CONST_SCALAR_INT_P (op0)
3698 	  || GET_CODE (op0) == CONST_FIXED
3699 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3700       && (CONST_SCALAR_INT_P (op1)
3701 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3702 	  || GET_CODE (op1) == CONST_FIXED))
3703     {
3704       unsigned n_elts = GET_MODE_NUNITS (mode);
3705       rtvec v = rtvec_alloc (n_elts);
3706 
3707       gcc_assert (n_elts >= 2);
3708       if (n_elts == 2)
3709 	{
3710 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3711 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3712 
3713 	  RTVEC_ELT (v, 0) = op0;
3714 	  RTVEC_ELT (v, 1) = op1;
3715 	}
3716       else
3717 	{
3718 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3719 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3720 	  unsigned i;
3721 
3722 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3723 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3724 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3725 
3726 	  for (i = 0; i < op0_n_elts; ++i)
3727 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3728 	  for (i = 0; i < op1_n_elts; ++i)
3729 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3730 	}
3731 
3732       return gen_rtx_CONST_VECTOR (mode, v);
3733     }
3734 
3735   if (SCALAR_FLOAT_MODE_P (mode)
3736       && CONST_DOUBLE_AS_FLOAT_P (op0)
3737       && CONST_DOUBLE_AS_FLOAT_P (op1)
3738       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3739     {
3740       if (code == AND
3741 	  || code == IOR
3742 	  || code == XOR)
3743 	{
3744 	  long tmp0[4];
3745 	  long tmp1[4];
3746 	  REAL_VALUE_TYPE r;
3747 	  int i;
3748 
3749 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3750 			  GET_MODE (op0));
3751 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3752 			  GET_MODE (op1));
3753 	  for (i = 0; i < 4; i++)
3754 	    {
3755 	      switch (code)
3756 	      {
3757 	      case AND:
3758 		tmp0[i] &= tmp1[i];
3759 		break;
3760 	      case IOR:
3761 		tmp0[i] |= tmp1[i];
3762 		break;
3763 	      case XOR:
3764 		tmp0[i] ^= tmp1[i];
3765 		break;
3766 	      default:
3767 		gcc_unreachable ();
3768 	      }
3769 	    }
3770 	   real_from_target (&r, tmp0, mode);
3771 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3772 	}
3773       else
3774 	{
3775 	  REAL_VALUE_TYPE f0, f1, value, result;
3776 	  bool inexact;
3777 
3778 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3779 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3780 	  real_convert (&f0, mode, &f0);
3781 	  real_convert (&f1, mode, &f1);
3782 
3783 	  if (HONOR_SNANS (mode)
3784 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3785 	    return 0;
3786 
3787 	  if (code == DIV
3788 	      && REAL_VALUES_EQUAL (f1, dconst0)
3789 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3790 	    return 0;
3791 
3792 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3793 	      && flag_trapping_math
3794 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3795 	    {
3796 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3797 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3798 
3799 	      switch (code)
3800 		{
3801 		case PLUS:
3802 		  /* Inf + -Inf = NaN plus exception.  */
3803 		  if (s0 != s1)
3804 		    return 0;
3805 		  break;
3806 		case MINUS:
3807 		  /* Inf - Inf = NaN plus exception.  */
3808 		  if (s0 == s1)
3809 		    return 0;
3810 		  break;
3811 		case DIV:
3812 		  /* Inf / Inf = NaN plus exception.  */
3813 		  return 0;
3814 		default:
3815 		  break;
3816 		}
3817 	    }
3818 
3819 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3820 	      && flag_trapping_math
3821 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3822 		  || (REAL_VALUE_ISINF (f1)
3823 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3824 	    /* Inf * 0 = NaN plus exception.  */
3825 	    return 0;
3826 
3827 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3828 				     &f0, &f1);
3829 	  real_convert (&result, mode, &value);
3830 
3831 	  /* Don't constant fold this floating point operation if
3832 	     the result has overflowed and flag_trapping_math.  */
3833 
3834 	  if (flag_trapping_math
3835 	      && MODE_HAS_INFINITIES (mode)
3836 	      && REAL_VALUE_ISINF (result)
3837 	      && !REAL_VALUE_ISINF (f0)
3838 	      && !REAL_VALUE_ISINF (f1))
3839 	    /* Overflow plus exception.  */
3840 	    return 0;
3841 
3842 	  /* Don't constant fold this floating point operation if the
3843 	     result may dependent upon the run-time rounding mode and
3844 	     flag_rounding_math is set, or if GCC's software emulation
3845 	     is unable to accurately represent the result.  */
3846 
3847 	  if ((flag_rounding_math
3848 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3849 	      && (inexact || !real_identical (&result, &value)))
3850 	    return NULL_RTX;
3851 
3852 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3853 	}
3854     }
3855 
3856   /* We can fold some multi-word operations.  */
3857   if (GET_MODE_CLASS (mode) == MODE_INT
3858       && width == HOST_BITS_PER_DOUBLE_INT
3859       && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3860       && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3861     {
3862       double_int o0, o1, res, tmp;
3863       bool overflow;
3864 
3865       o0 = rtx_to_double_int (op0);
3866       o1 = rtx_to_double_int (op1);
3867 
3868       switch (code)
3869 	{
3870 	case MINUS:
3871 	  /* A - B == A + (-B).  */
3872 	  o1 = -o1;
3873 
3874 	  /* Fall through....  */
3875 
3876 	case PLUS:
3877 	  res = o0 + o1;
3878 	  break;
3879 
3880 	case MULT:
3881 	  res = o0 * o1;
3882 	  break;
3883 
3884 	case DIV:
3885           res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3886 					 &tmp, &overflow);
3887 	  if (overflow)
3888 	    return 0;
3889 	  break;
3890 
3891 	case MOD:
3892           tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3893 					 &res, &overflow);
3894 	  if (overflow)
3895 	    return 0;
3896 	  break;
3897 
3898 	case UDIV:
3899           res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3900 					 &tmp, &overflow);
3901 	  if (overflow)
3902 	    return 0;
3903 	  break;
3904 
3905 	case UMOD:
3906           tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3907 					 &res, &overflow);
3908 	  if (overflow)
3909 	    return 0;
3910 	  break;
3911 
3912 	case AND:
3913 	  res = o0 & o1;
3914 	  break;
3915 
3916 	case IOR:
3917 	  res = o0 | o1;
3918 	  break;
3919 
3920 	case XOR:
3921 	  res = o0 ^ o1;
3922 	  break;
3923 
3924 	case SMIN:
3925 	  res = o0.smin (o1);
3926 	  break;
3927 
3928 	case SMAX:
3929 	  res = o0.smax (o1);
3930 	  break;
3931 
3932 	case UMIN:
3933 	  res = o0.umin (o1);
3934 	  break;
3935 
3936 	case UMAX:
3937 	  res = o0.umax (o1);
3938 	  break;
3939 
3940 	case LSHIFTRT:   case ASHIFTRT:
3941 	case ASHIFT:
3942 	case ROTATE:     case ROTATERT:
3943 	  {
3944 	    unsigned HOST_WIDE_INT cnt;
3945 
3946 	    if (SHIFT_COUNT_TRUNCATED)
3947 	      {
3948 		o1.high = 0;
3949 		o1.low &= GET_MODE_PRECISION (mode) - 1;
3950 	      }
3951 
3952 	    if (!o1.fits_uhwi ()
3953 	        || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3954 	      return 0;
3955 
3956 	    cnt = o1.to_uhwi ();
3957 	    unsigned short prec = GET_MODE_PRECISION (mode);
3958 
3959 	    if (code == LSHIFTRT || code == ASHIFTRT)
3960 	      res = o0.rshift (cnt, prec, code == ASHIFTRT);
3961 	    else if (code == ASHIFT)
3962 	      res = o0.alshift (cnt, prec);
3963 	    else if (code == ROTATE)
3964 	      res = o0.lrotate (cnt, prec);
3965 	    else /* code == ROTATERT */
3966 	      res = o0.rrotate (cnt, prec);
3967 	  }
3968 	  break;
3969 
3970 	default:
3971 	  return 0;
3972 	}
3973 
3974       return immed_double_int_const (res, mode);
3975     }
3976 
3977   if (CONST_INT_P (op0) && CONST_INT_P (op1)
3978       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3979     {
3980       /* Get the integer argument values in two forms:
3981          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3982 
3983       arg0 = INTVAL (op0);
3984       arg1 = INTVAL (op1);
3985 
3986       if (width < HOST_BITS_PER_WIDE_INT)
3987         {
3988           arg0 &= GET_MODE_MASK (mode);
3989           arg1 &= GET_MODE_MASK (mode);
3990 
3991           arg0s = arg0;
3992 	  if (val_signbit_known_set_p (mode, arg0s))
3993 	    arg0s |= ~GET_MODE_MASK (mode);
3994 
3995           arg1s = arg1;
3996 	  if (val_signbit_known_set_p (mode, arg1s))
3997 	    arg1s |= ~GET_MODE_MASK (mode);
3998 	}
3999       else
4000 	{
4001 	  arg0s = arg0;
4002 	  arg1s = arg1;
4003 	}
4004 
4005       /* Compute the value of the arithmetic.  */
4006 
4007       switch (code)
4008 	{
4009 	case PLUS:
4010 	  val = arg0s + arg1s;
4011 	  break;
4012 
4013 	case MINUS:
4014 	  val = arg0s - arg1s;
4015 	  break;
4016 
4017 	case MULT:
4018 	  val = arg0s * arg1s;
4019 	  break;
4020 
4021 	case DIV:
4022 	  if (arg1s == 0
4023 	      || ((unsigned HOST_WIDE_INT) arg0s
4024 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4025 		  && arg1s == -1))
4026 	    return 0;
4027 	  val = arg0s / arg1s;
4028 	  break;
4029 
4030 	case MOD:
4031 	  if (arg1s == 0
4032 	      || ((unsigned HOST_WIDE_INT) arg0s
4033 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4034 		  && arg1s == -1))
4035 	    return 0;
4036 	  val = arg0s % arg1s;
4037 	  break;
4038 
4039 	case UDIV:
4040 	  if (arg1 == 0
4041 	      || ((unsigned HOST_WIDE_INT) arg0s
4042 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4043 		  && arg1s == -1))
4044 	    return 0;
4045 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4046 	  break;
4047 
4048 	case UMOD:
4049 	  if (arg1 == 0
4050 	      || ((unsigned HOST_WIDE_INT) arg0s
4051 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4052 		  && arg1s == -1))
4053 	    return 0;
4054 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4055 	  break;
4056 
4057 	case AND:
4058 	  val = arg0 & arg1;
4059 	  break;
4060 
4061 	case IOR:
4062 	  val = arg0 | arg1;
4063 	  break;
4064 
4065 	case XOR:
4066 	  val = arg0 ^ arg1;
4067 	  break;
4068 
4069 	case LSHIFTRT:
4070 	case ASHIFT:
4071 	case ASHIFTRT:
4072 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4073 	     the value is in range.  We can't return any old value for
4074 	     out-of-range arguments because either the middle-end (via
4075 	     shift_truncation_mask) or the back-end might be relying on
4076 	     target-specific knowledge.  Nor can we rely on
4077 	     shift_truncation_mask, since the shift might not be part of an
4078 	     ashlM3, lshrM3 or ashrM3 instruction.  */
4079 	  if (SHIFT_COUNT_TRUNCATED)
4080 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4081 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4082 	    return 0;
4083 
4084 	  val = (code == ASHIFT
4085 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4086 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4087 
4088 	  /* Sign-extend the result for arithmetic right shifts.  */
4089 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4090 	    val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4091 	  break;
4092 
4093 	case ROTATERT:
4094 	  if (arg1 < 0)
4095 	    return 0;
4096 
4097 	  arg1 %= width;
4098 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4099 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4100 	  break;
4101 
4102 	case ROTATE:
4103 	  if (arg1 < 0)
4104 	    return 0;
4105 
4106 	  arg1 %= width;
4107 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4108 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4109 	  break;
4110 
4111 	case COMPARE:
4112 	  /* Do nothing here.  */
4113 	  return 0;
4114 
4115 	case SMIN:
4116 	  val = arg0s <= arg1s ? arg0s : arg1s;
4117 	  break;
4118 
4119 	case UMIN:
4120 	  val = ((unsigned HOST_WIDE_INT) arg0
4121 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4122 	  break;
4123 
4124 	case SMAX:
4125 	  val = arg0s > arg1s ? arg0s : arg1s;
4126 	  break;
4127 
4128 	case UMAX:
4129 	  val = ((unsigned HOST_WIDE_INT) arg0
4130 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4131 	  break;
4132 
4133 	case SS_PLUS:
4134 	case US_PLUS:
4135 	case SS_MINUS:
4136 	case US_MINUS:
4137 	case SS_MULT:
4138 	case US_MULT:
4139 	case SS_DIV:
4140 	case US_DIV:
4141 	case SS_ASHIFT:
4142 	case US_ASHIFT:
4143 	  /* ??? There are simplifications that can be done.  */
4144 	  return 0;
4145 
4146 	default:
4147 	  gcc_unreachable ();
4148 	}
4149 
4150       return gen_int_mode (val, mode);
4151     }
4152 
4153   return NULL_RTX;
4154 }
4155 
4156 
4157 
4158 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4159    PLUS or MINUS.
4160 
4161    Rather than test for specific case, we do this by a brute-force method
4162    and do all possible simplifications until no more changes occur.  Then
4163    we rebuild the operation.  */
4164 
4165 struct simplify_plus_minus_op_data
4166 {
4167   rtx op;
4168   short neg;
4169 };
4170 
4171 static bool
4172 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4173 {
4174   int result;
4175 
4176   result = (commutative_operand_precedence (y)
4177 	    - commutative_operand_precedence (x));
4178   if (result)
4179     return result > 0;
4180 
4181   /* Group together equal REGs to do more simplification.  */
4182   if (REG_P (x) && REG_P (y))
4183     return REGNO (x) > REGNO (y);
4184   else
4185     return false;
4186 }
4187 
4188 static rtx
4189 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4190 		     rtx op1)
4191 {
4192   struct simplify_plus_minus_op_data ops[8];
4193   rtx result, tem;
4194   int n_ops = 2, input_ops = 2;
4195   int changed, n_constants = 0, canonicalized = 0;
4196   int i, j;
4197 
4198   memset (ops, 0, sizeof ops);
4199 
4200   /* Set up the two operands and then expand them until nothing has been
4201      changed.  If we run out of room in our array, give up; this should
4202      almost never happen.  */
4203 
4204   ops[0].op = op0;
4205   ops[0].neg = 0;
4206   ops[1].op = op1;
4207   ops[1].neg = (code == MINUS);
4208 
4209   do
4210     {
4211       changed = 0;
4212 
4213       for (i = 0; i < n_ops; i++)
4214 	{
4215 	  rtx this_op = ops[i].op;
4216 	  int this_neg = ops[i].neg;
4217 	  enum rtx_code this_code = GET_CODE (this_op);
4218 
4219 	  switch (this_code)
4220 	    {
4221 	    case PLUS:
4222 	    case MINUS:
4223 	      if (n_ops == 7)
4224 		return NULL_RTX;
4225 
4226 	      ops[n_ops].op = XEXP (this_op, 1);
4227 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4228 	      n_ops++;
4229 
4230 	      ops[i].op = XEXP (this_op, 0);
4231 	      input_ops++;
4232 	      changed = 1;
4233 	      canonicalized |= this_neg;
4234 	      break;
4235 
4236 	    case NEG:
4237 	      ops[i].op = XEXP (this_op, 0);
4238 	      ops[i].neg = ! this_neg;
4239 	      changed = 1;
4240 	      canonicalized = 1;
4241 	      break;
4242 
4243 	    case CONST:
4244 	      if (n_ops < 7
4245 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4246 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4247 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4248 		{
4249 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4250 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4251 		  ops[n_ops].neg = this_neg;
4252 		  n_ops++;
4253 		  changed = 1;
4254 	          canonicalized = 1;
4255 		}
4256 	      break;
4257 
4258 	    case NOT:
4259 	      /* ~a -> (-a - 1) */
4260 	      if (n_ops != 7)
4261 		{
4262 		  ops[n_ops].op = CONSTM1_RTX (mode);
4263 		  ops[n_ops++].neg = this_neg;
4264 		  ops[i].op = XEXP (this_op, 0);
4265 		  ops[i].neg = !this_neg;
4266 		  changed = 1;
4267 	          canonicalized = 1;
4268 		}
4269 	      break;
4270 
4271 	    case CONST_INT:
4272 	      n_constants++;
4273 	      if (this_neg)
4274 		{
4275 		  ops[i].op = neg_const_int (mode, this_op);
4276 		  ops[i].neg = 0;
4277 		  changed = 1;
4278 	          canonicalized = 1;
4279 		}
4280 	      break;
4281 
4282 	    default:
4283 	      break;
4284 	    }
4285 	}
4286     }
4287   while (changed);
4288 
4289   if (n_constants > 1)
4290     canonicalized = 1;
4291 
4292   gcc_assert (n_ops >= 2);
4293 
4294   /* If we only have two operands, we can avoid the loops.  */
4295   if (n_ops == 2)
4296     {
4297       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4298       rtx lhs, rhs;
4299 
4300       /* Get the two operands.  Be careful with the order, especially for
4301 	 the cases where code == MINUS.  */
4302       if (ops[0].neg && ops[1].neg)
4303 	{
4304 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4305 	  rhs = ops[1].op;
4306 	}
4307       else if (ops[0].neg)
4308 	{
4309 	  lhs = ops[1].op;
4310 	  rhs = ops[0].op;
4311 	}
4312       else
4313 	{
4314 	  lhs = ops[0].op;
4315 	  rhs = ops[1].op;
4316 	}
4317 
4318       return simplify_const_binary_operation (code, mode, lhs, rhs);
4319     }
4320 
4321   /* Now simplify each pair of operands until nothing changes.  */
4322   do
4323     {
4324       /* Insertion sort is good enough for an eight-element array.  */
4325       for (i = 1; i < n_ops; i++)
4326         {
4327           struct simplify_plus_minus_op_data save;
4328           j = i - 1;
4329           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4330 	    continue;
4331 
4332           canonicalized = 1;
4333           save = ops[i];
4334           do
4335 	    ops[j + 1] = ops[j];
4336           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4337           ops[j + 1] = save;
4338         }
4339 
4340       changed = 0;
4341       for (i = n_ops - 1; i > 0; i--)
4342 	for (j = i - 1; j >= 0; j--)
4343 	  {
4344 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4345 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4346 
4347 	    if (lhs != 0 && rhs != 0)
4348 	      {
4349 		enum rtx_code ncode = PLUS;
4350 
4351 		if (lneg != rneg)
4352 		  {
4353 		    ncode = MINUS;
4354 		    if (lneg)
4355 		      tem = lhs, lhs = rhs, rhs = tem;
4356 		  }
4357 		else if (swap_commutative_operands_p (lhs, rhs))
4358 		  tem = lhs, lhs = rhs, rhs = tem;
4359 
4360 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4361 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4362 		  {
4363 		    rtx tem_lhs, tem_rhs;
4364 
4365 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4366 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4367 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4368 
4369 		    if (tem && !CONSTANT_P (tem))
4370 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4371 		  }
4372 		else
4373 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4374 
4375 		/* Reject "simplifications" that just wrap the two
4376 		   arguments in a CONST.  Failure to do so can result
4377 		   in infinite recursion with simplify_binary_operation
4378 		   when it calls us to simplify CONST operations.  */
4379 		if (tem
4380 		    && ! (GET_CODE (tem) == CONST
4381 			  && GET_CODE (XEXP (tem, 0)) == ncode
4382 			  && XEXP (XEXP (tem, 0), 0) == lhs
4383 			  && XEXP (XEXP (tem, 0), 1) == rhs))
4384 		  {
4385 		    lneg &= rneg;
4386 		    if (GET_CODE (tem) == NEG)
4387 		      tem = XEXP (tem, 0), lneg = !lneg;
4388 		    if (CONST_INT_P (tem) && lneg)
4389 		      tem = neg_const_int (mode, tem), lneg = 0;
4390 
4391 		    ops[i].op = tem;
4392 		    ops[i].neg = lneg;
4393 		    ops[j].op = NULL_RTX;
4394 		    changed = 1;
4395 		    canonicalized = 1;
4396 		  }
4397 	      }
4398 	  }
4399 
4400       /* If nothing changed, fail.  */
4401       if (!canonicalized)
4402         return NULL_RTX;
4403 
4404       /* Pack all the operands to the lower-numbered entries.  */
4405       for (i = 0, j = 0; j < n_ops; j++)
4406         if (ops[j].op)
4407           {
4408 	    ops[i] = ops[j];
4409 	    i++;
4410           }
4411       n_ops = i;
4412     }
4413   while (changed);
4414 
4415   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4416   if (n_ops == 2
4417       && CONST_INT_P (ops[1].op)
4418       && CONSTANT_P (ops[0].op)
4419       && ops[0].neg)
4420     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4421 
4422   /* We suppressed creation of trivial CONST expressions in the
4423      combination loop to avoid recursion.  Create one manually now.
4424      The combination loop should have ensured that there is exactly
4425      one CONST_INT, and the sort will have ensured that it is last
4426      in the array and that any other constant will be next-to-last.  */
4427 
4428   if (n_ops > 1
4429       && CONST_INT_P (ops[n_ops - 1].op)
4430       && CONSTANT_P (ops[n_ops - 2].op))
4431     {
4432       rtx value = ops[n_ops - 1].op;
4433       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4434 	value = neg_const_int (mode, value);
4435       ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4436 					 INTVAL (value));
4437       n_ops--;
4438     }
4439 
4440   /* Put a non-negated operand first, if possible.  */
4441 
4442   for (i = 0; i < n_ops && ops[i].neg; i++)
4443     continue;
4444   if (i == n_ops)
4445     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4446   else if (i != 0)
4447     {
4448       tem = ops[0].op;
4449       ops[0] = ops[i];
4450       ops[i].op = tem;
4451       ops[i].neg = 1;
4452     }
4453 
4454   /* Now make the result by performing the requested operations.  */
4455   result = ops[0].op;
4456   for (i = 1; i < n_ops; i++)
4457     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4458 			     mode, result, ops[i].op);
4459 
4460   return result;
4461 }
4462 
4463 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4464 static bool
4465 plus_minus_operand_p (const_rtx x)
4466 {
4467   return GET_CODE (x) == PLUS
4468          || GET_CODE (x) == MINUS
4469 	 || (GET_CODE (x) == CONST
4470 	     && GET_CODE (XEXP (x, 0)) == PLUS
4471 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4472 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4473 }
4474 
4475 /* Like simplify_binary_operation except used for relational operators.
4476    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4477    not also be VOIDmode.
4478 
4479    CMP_MODE specifies in which mode the comparison is done in, so it is
4480    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4481    the operands or, if both are VOIDmode, the operands are compared in
4482    "infinite precision".  */
4483 rtx
4484 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4485 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
4486 {
4487   rtx tem, trueop0, trueop1;
4488 
4489   if (cmp_mode == VOIDmode)
4490     cmp_mode = GET_MODE (op0);
4491   if (cmp_mode == VOIDmode)
4492     cmp_mode = GET_MODE (op1);
4493 
4494   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4495   if (tem)
4496     {
4497       if (SCALAR_FLOAT_MODE_P (mode))
4498 	{
4499           if (tem == const0_rtx)
4500             return CONST0_RTX (mode);
4501 #ifdef FLOAT_STORE_FLAG_VALUE
4502 	  {
4503 	    REAL_VALUE_TYPE val;
4504 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4505 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4506 	  }
4507 #else
4508 	  return NULL_RTX;
4509 #endif
4510 	}
4511       if (VECTOR_MODE_P (mode))
4512 	{
4513 	  if (tem == const0_rtx)
4514 	    return CONST0_RTX (mode);
4515 #ifdef VECTOR_STORE_FLAG_VALUE
4516 	  {
4517 	    int i, units;
4518 	    rtvec v;
4519 
4520 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4521 	    if (val == NULL_RTX)
4522 	      return NULL_RTX;
4523 	    if (val == const1_rtx)
4524 	      return CONST1_RTX (mode);
4525 
4526 	    units = GET_MODE_NUNITS (mode);
4527 	    v = rtvec_alloc (units);
4528 	    for (i = 0; i < units; i++)
4529 	      RTVEC_ELT (v, i) = val;
4530 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4531 	  }
4532 #else
4533 	  return NULL_RTX;
4534 #endif
4535 	}
4536 
4537       return tem;
4538     }
4539 
4540   /* For the following tests, ensure const0_rtx is op1.  */
4541   if (swap_commutative_operands_p (op0, op1)
4542       || (op0 == const0_rtx && op1 != const0_rtx))
4543     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4544 
4545   /* If op0 is a compare, extract the comparison arguments from it.  */
4546   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4547     return simplify_gen_relational (code, mode, VOIDmode,
4548 				    XEXP (op0, 0), XEXP (op0, 1));
4549 
4550   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4551       || CC0_P (op0))
4552     return NULL_RTX;
4553 
4554   trueop0 = avoid_constant_pool_reference (op0);
4555   trueop1 = avoid_constant_pool_reference (op1);
4556   return simplify_relational_operation_1 (code, mode, cmp_mode,
4557 		  			  trueop0, trueop1);
4558 }
4559 
4560 /* This part of simplify_relational_operation is only used when CMP_MODE
4561    is not in class MODE_CC (i.e. it is a real comparison).
4562 
4563    MODE is the mode of the result, while CMP_MODE specifies in which
4564    mode the comparison is done in, so it is the mode of the operands.  */
4565 
4566 static rtx
4567 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4568 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4569 {
4570   enum rtx_code op0code = GET_CODE (op0);
4571 
4572   if (op1 == const0_rtx && COMPARISON_P (op0))
4573     {
4574       /* If op0 is a comparison, extract the comparison arguments
4575          from it.  */
4576       if (code == NE)
4577 	{
4578 	  if (GET_MODE (op0) == mode)
4579 	    return simplify_rtx (op0);
4580 	  else
4581 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4582 					    XEXP (op0, 0), XEXP (op0, 1));
4583 	}
4584       else if (code == EQ)
4585 	{
4586 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4587 	  if (new_code != UNKNOWN)
4588 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4589 					    XEXP (op0, 0), XEXP (op0, 1));
4590 	}
4591     }
4592 
4593   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4594      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4595   if ((code == LTU || code == GEU)
4596       && GET_CODE (op0) == PLUS
4597       && CONST_INT_P (XEXP (op0, 1))
4598       && (rtx_equal_p (op1, XEXP (op0, 0))
4599 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4600       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4601       && XEXP (op0, 1) != const0_rtx)
4602     {
4603       rtx new_cmp
4604 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4605       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4606 				      cmp_mode, XEXP (op0, 0), new_cmp);
4607     }
4608 
4609   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4610   if ((code == LTU || code == GEU)
4611       && GET_CODE (op0) == PLUS
4612       && rtx_equal_p (op1, XEXP (op0, 1))
4613       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4614       && !rtx_equal_p (op1, XEXP (op0, 0)))
4615     return simplify_gen_relational (code, mode, cmp_mode, op0,
4616 				    copy_rtx (XEXP (op0, 0)));
4617 
4618   if (op1 == const0_rtx)
4619     {
4620       /* Canonicalize (GTU x 0) as (NE x 0).  */
4621       if (code == GTU)
4622         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4623       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4624       if (code == LEU)
4625         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4626     }
4627   else if (op1 == const1_rtx)
4628     {
4629       switch (code)
4630         {
4631         case GE:
4632 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4633 	  return simplify_gen_relational (GT, mode, cmp_mode,
4634 					  op0, const0_rtx);
4635 	case GEU:
4636 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4637 	  return simplify_gen_relational (NE, mode, cmp_mode,
4638 					  op0, const0_rtx);
4639 	case LT:
4640 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4641 	  return simplify_gen_relational (LE, mode, cmp_mode,
4642 					  op0, const0_rtx);
4643 	case LTU:
4644 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4645 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4646 					  op0, const0_rtx);
4647 	default:
4648 	  break;
4649 	}
4650     }
4651   else if (op1 == constm1_rtx)
4652     {
4653       /* Canonicalize (LE x -1) as (LT x 0).  */
4654       if (code == LE)
4655         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4656       /* Canonicalize (GT x -1) as (GE x 0).  */
4657       if (code == GT)
4658         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4659     }
4660 
4661   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4662   if ((code == EQ || code == NE)
4663       && (op0code == PLUS || op0code == MINUS)
4664       && CONSTANT_P (op1)
4665       && CONSTANT_P (XEXP (op0, 1))
4666       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4667     {
4668       rtx x = XEXP (op0, 0);
4669       rtx c = XEXP (op0, 1);
4670       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4671       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4672 
4673       /* Detect an infinite recursive condition, where we oscillate at this
4674 	 simplification case between:
4675 	    A + B == C  <--->  C - B == A,
4676 	 where A, B, and C are all constants with non-simplifiable expressions,
4677 	 usually SYMBOL_REFs.  */
4678       if (GET_CODE (tem) == invcode
4679 	  && CONSTANT_P (x)
4680 	  && rtx_equal_p (c, XEXP (tem, 1)))
4681 	return NULL_RTX;
4682 
4683       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4684     }
4685 
4686   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4687      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4688   if (code == NE
4689       && op1 == const0_rtx
4690       && GET_MODE_CLASS (mode) == MODE_INT
4691       && cmp_mode != VOIDmode
4692       /* ??? Work-around BImode bugs in the ia64 backend.  */
4693       && mode != BImode
4694       && cmp_mode != BImode
4695       && nonzero_bits (op0, cmp_mode) == 1
4696       && STORE_FLAG_VALUE == 1)
4697     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4698 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4699 	   : lowpart_subreg (mode, op0, cmp_mode);
4700 
4701   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4702   if ((code == EQ || code == NE)
4703       && op1 == const0_rtx
4704       && op0code == XOR)
4705     return simplify_gen_relational (code, mode, cmp_mode,
4706 				    XEXP (op0, 0), XEXP (op0, 1));
4707 
4708   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4709   if ((code == EQ || code == NE)
4710       && op0code == XOR
4711       && rtx_equal_p (XEXP (op0, 0), op1)
4712       && !side_effects_p (XEXP (op0, 0)))
4713     return simplify_gen_relational (code, mode, cmp_mode,
4714 				    XEXP (op0, 1), const0_rtx);
4715 
4716   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4717   if ((code == EQ || code == NE)
4718       && op0code == XOR
4719       && rtx_equal_p (XEXP (op0, 1), op1)
4720       && !side_effects_p (XEXP (op0, 1)))
4721     return simplify_gen_relational (code, mode, cmp_mode,
4722 				    XEXP (op0, 0), const0_rtx);
4723 
4724   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4725   if ((code == EQ || code == NE)
4726       && op0code == XOR
4727       && CONST_SCALAR_INT_P (op1)
4728       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4729     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4730 				    simplify_gen_binary (XOR, cmp_mode,
4731 							 XEXP (op0, 1), op1));
4732 
4733   if (op0code == POPCOUNT && op1 == const0_rtx)
4734     switch (code)
4735       {
4736       case EQ:
4737       case LE:
4738       case LEU:
4739 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4740 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4741 					XEXP (op0, 0), const0_rtx);
4742 
4743       case NE:
4744       case GT:
4745       case GTU:
4746 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4747 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4748 					XEXP (op0, 0), const0_rtx);
4749 
4750       default:
4751 	break;
4752       }
4753 
4754   return NULL_RTX;
4755 }
4756 
4757 enum
4758 {
4759   CMP_EQ = 1,
4760   CMP_LT = 2,
4761   CMP_GT = 4,
4762   CMP_LTU = 8,
4763   CMP_GTU = 16
4764 };
4765 
4766 
4767 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4768    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4769    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4770    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4771    For floating-point comparisons, assume that the operands were ordered.  */
4772 
4773 static rtx
4774 comparison_result (enum rtx_code code, int known_results)
4775 {
4776   switch (code)
4777     {
4778     case EQ:
4779     case UNEQ:
4780       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4781     case NE:
4782     case LTGT:
4783       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4784 
4785     case LT:
4786     case UNLT:
4787       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4788     case GE:
4789     case UNGE:
4790       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4791 
4792     case GT:
4793     case UNGT:
4794       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4795     case LE:
4796     case UNLE:
4797       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4798 
4799     case LTU:
4800       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4801     case GEU:
4802       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4803 
4804     case GTU:
4805       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4806     case LEU:
4807       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4808 
4809     case ORDERED:
4810       return const_true_rtx;
4811     case UNORDERED:
4812       return const0_rtx;
4813     default:
4814       gcc_unreachable ();
4815     }
4816 }
4817 
4818 /* Check if the given comparison (done in the given MODE) is actually a
4819    tautology or a contradiction.
4820    If no simplification is possible, this function returns zero.
4821    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4822 
4823 rtx
4824 simplify_const_relational_operation (enum rtx_code code,
4825 				     enum machine_mode mode,
4826 				     rtx op0, rtx op1)
4827 {
4828   rtx tem;
4829   rtx trueop0;
4830   rtx trueop1;
4831 
4832   gcc_assert (mode != VOIDmode
4833 	      || (GET_MODE (op0) == VOIDmode
4834 		  && GET_MODE (op1) == VOIDmode));
4835 
4836   /* If op0 is a compare, extract the comparison arguments from it.  */
4837   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4838     {
4839       op1 = XEXP (op0, 1);
4840       op0 = XEXP (op0, 0);
4841 
4842       if (GET_MODE (op0) != VOIDmode)
4843 	mode = GET_MODE (op0);
4844       else if (GET_MODE (op1) != VOIDmode)
4845 	mode = GET_MODE (op1);
4846       else
4847 	return 0;
4848     }
4849 
4850   /* We can't simplify MODE_CC values since we don't know what the
4851      actual comparison is.  */
4852   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4853     return 0;
4854 
4855   /* Make sure the constant is second.  */
4856   if (swap_commutative_operands_p (op0, op1))
4857     {
4858       tem = op0, op0 = op1, op1 = tem;
4859       code = swap_condition (code);
4860     }
4861 
4862   trueop0 = avoid_constant_pool_reference (op0);
4863   trueop1 = avoid_constant_pool_reference (op1);
4864 
4865   /* For integer comparisons of A and B maybe we can simplify A - B and can
4866      then simplify a comparison of that with zero.  If A and B are both either
4867      a register or a CONST_INT, this can't help; testing for these cases will
4868      prevent infinite recursion here and speed things up.
4869 
4870      We can only do this for EQ and NE comparisons as otherwise we may
4871      lose or introduce overflow which we cannot disregard as undefined as
4872      we do not know the signedness of the operation on either the left or
4873      the right hand side of the comparison.  */
4874 
4875   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4876       && (code == EQ || code == NE)
4877       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4878 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4879       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4880       /* We cannot do this if tem is a nonzero address.  */
4881       && ! nonzero_address_p (tem))
4882     return simplify_const_relational_operation (signed_condition (code),
4883 						mode, tem, const0_rtx);
4884 
4885   if (! HONOR_NANS (mode) && code == ORDERED)
4886     return const_true_rtx;
4887 
4888   if (! HONOR_NANS (mode) && code == UNORDERED)
4889     return const0_rtx;
4890 
4891   /* For modes without NaNs, if the two operands are equal, we know the
4892      result except if they have side-effects.  Even with NaNs we know
4893      the result of unordered comparisons and, if signaling NaNs are
4894      irrelevant, also the result of LT/GT/LTGT.  */
4895   if ((! HONOR_NANS (GET_MODE (trueop0))
4896        || code == UNEQ || code == UNLE || code == UNGE
4897        || ((code == LT || code == GT || code == LTGT)
4898 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
4899       && rtx_equal_p (trueop0, trueop1)
4900       && ! side_effects_p (trueop0))
4901     return comparison_result (code, CMP_EQ);
4902 
4903   /* If the operands are floating-point constants, see if we can fold
4904      the result.  */
4905   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4906       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4907       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4908     {
4909       REAL_VALUE_TYPE d0, d1;
4910 
4911       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4912       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4913 
4914       /* Comparisons are unordered iff at least one of the values is NaN.  */
4915       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4916 	switch (code)
4917 	  {
4918 	  case UNEQ:
4919 	  case UNLT:
4920 	  case UNGT:
4921 	  case UNLE:
4922 	  case UNGE:
4923 	  case NE:
4924 	  case UNORDERED:
4925 	    return const_true_rtx;
4926 	  case EQ:
4927 	  case LT:
4928 	  case GT:
4929 	  case LE:
4930 	  case GE:
4931 	  case LTGT:
4932 	  case ORDERED:
4933 	    return const0_rtx;
4934 	  default:
4935 	    return 0;
4936 	  }
4937 
4938       return comparison_result (code,
4939 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4940 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4941     }
4942 
4943   /* Otherwise, see if the operands are both integers.  */
4944   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4945        && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4946        && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4947     {
4948       int width = GET_MODE_PRECISION (mode);
4949       HOST_WIDE_INT l0s, h0s, l1s, h1s;
4950       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4951 
4952       /* Get the two words comprising each integer constant.  */
4953       if (CONST_DOUBLE_AS_INT_P (trueop0))
4954 	{
4955 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4956 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4957 	}
4958       else
4959 	{
4960 	  l0u = l0s = INTVAL (trueop0);
4961 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
4962 	}
4963 
4964       if (CONST_DOUBLE_AS_INT_P (trueop1))
4965 	{
4966 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4967 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4968 	}
4969       else
4970 	{
4971 	  l1u = l1s = INTVAL (trueop1);
4972 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
4973 	}
4974 
4975       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4976 	 we have to sign or zero-extend the values.  */
4977       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4978 	{
4979 	  l0u &= GET_MODE_MASK (mode);
4980 	  l1u &= GET_MODE_MASK (mode);
4981 
4982 	  if (val_signbit_known_set_p (mode, l0s))
4983 	    l0s |= ~GET_MODE_MASK (mode);
4984 
4985 	  if (val_signbit_known_set_p (mode, l1s))
4986 	    l1s |= ~GET_MODE_MASK (mode);
4987 	}
4988       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4989 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4990 
4991       if (h0u == h1u && l0u == l1u)
4992 	return comparison_result (code, CMP_EQ);
4993       else
4994 	{
4995 	  int cr;
4996 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4997 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4998 	  return comparison_result (code, cr);
4999 	}
5000     }
5001 
5002   /* Optimize comparisons with upper and lower bounds.  */
5003   if (HWI_COMPUTABLE_MODE_P (mode)
5004       && CONST_INT_P (trueop1))
5005     {
5006       int sign;
5007       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5008       HOST_WIDE_INT val = INTVAL (trueop1);
5009       HOST_WIDE_INT mmin, mmax;
5010 
5011       if (code == GEU
5012 	  || code == LEU
5013 	  || code == GTU
5014 	  || code == LTU)
5015 	sign = 0;
5016       else
5017 	sign = 1;
5018 
5019       /* Get a reduced range if the sign bit is zero.  */
5020       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5021 	{
5022 	  mmin = 0;
5023 	  mmax = nonzero;
5024 	}
5025       else
5026 	{
5027 	  rtx mmin_rtx, mmax_rtx;
5028 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5029 
5030 	  mmin = INTVAL (mmin_rtx);
5031 	  mmax = INTVAL (mmax_rtx);
5032 	  if (sign)
5033 	    {
5034 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5035 
5036 	      mmin >>= (sign_copies - 1);
5037 	      mmax >>= (sign_copies - 1);
5038 	    }
5039 	}
5040 
5041       switch (code)
5042 	{
5043 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5044 	case GEU:
5045 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5046 	    return const_true_rtx;
5047 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5048 	    return const0_rtx;
5049 	  break;
5050 	case GE:
5051 	  if (val <= mmin)
5052 	    return const_true_rtx;
5053 	  if (val > mmax)
5054 	    return const0_rtx;
5055 	  break;
5056 
5057 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5058 	case LEU:
5059 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5060 	    return const_true_rtx;
5061 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5062 	    return const0_rtx;
5063 	  break;
5064 	case LE:
5065 	  if (val >= mmax)
5066 	    return const_true_rtx;
5067 	  if (val < mmin)
5068 	    return const0_rtx;
5069 	  break;
5070 
5071 	case EQ:
5072 	  /* x == y is always false for y out of range.  */
5073 	  if (val < mmin || val > mmax)
5074 	    return const0_rtx;
5075 	  break;
5076 
5077 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5078 	case GTU:
5079 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5080 	    return const0_rtx;
5081 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5082 	    return const_true_rtx;
5083 	  break;
5084 	case GT:
5085 	  if (val >= mmax)
5086 	    return const0_rtx;
5087 	  if (val < mmin)
5088 	    return const_true_rtx;
5089 	  break;
5090 
5091 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5092 	case LTU:
5093 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5094 	    return const0_rtx;
5095 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5096 	    return const_true_rtx;
5097 	  break;
5098 	case LT:
5099 	  if (val <= mmin)
5100 	    return const0_rtx;
5101 	  if (val > mmax)
5102 	    return const_true_rtx;
5103 	  break;
5104 
5105 	case NE:
5106 	  /* x != y is always true for y out of range.  */
5107 	  if (val < mmin || val > mmax)
5108 	    return const_true_rtx;
5109 	  break;
5110 
5111 	default:
5112 	  break;
5113 	}
5114     }
5115 
5116   /* Optimize integer comparisons with zero.  */
5117   if (trueop1 == const0_rtx)
5118     {
5119       /* Some addresses are known to be nonzero.  We don't know
5120 	 their sign, but equality comparisons are known.  */
5121       if (nonzero_address_p (trueop0))
5122 	{
5123 	  if (code == EQ || code == LEU)
5124 	    return const0_rtx;
5125 	  if (code == NE || code == GTU)
5126 	    return const_true_rtx;
5127 	}
5128 
5129       /* See if the first operand is an IOR with a constant.  If so, we
5130 	 may be able to determine the result of this comparison.  */
5131       if (GET_CODE (op0) == IOR)
5132 	{
5133 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5134 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5135 	    {
5136 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5137 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5138 			      && (UINTVAL (inner_const)
5139 				  & ((unsigned HOST_WIDE_INT) 1
5140 				     << sign_bitnum)));
5141 
5142 	      switch (code)
5143 		{
5144 		case EQ:
5145 		case LEU:
5146 		  return const0_rtx;
5147 		case NE:
5148 		case GTU:
5149 		  return const_true_rtx;
5150 		case LT:
5151 		case LE:
5152 		  if (has_sign)
5153 		    return const_true_rtx;
5154 		  break;
5155 		case GT:
5156 		case GE:
5157 		  if (has_sign)
5158 		    return const0_rtx;
5159 		  break;
5160 		default:
5161 		  break;
5162 		}
5163 	    }
5164 	}
5165     }
5166 
5167   /* Optimize comparison of ABS with zero.  */
5168   if (trueop1 == CONST0_RTX (mode)
5169       && (GET_CODE (trueop0) == ABS
5170 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5171 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5172     {
5173       switch (code)
5174 	{
5175 	case LT:
5176 	  /* Optimize abs(x) < 0.0.  */
5177 	  if (!HONOR_SNANS (mode)
5178 	      && (!INTEGRAL_MODE_P (mode)
5179 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5180 	    {
5181 	      if (INTEGRAL_MODE_P (mode)
5182 		  && (issue_strict_overflow_warning
5183 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5184 		warning (OPT_Wstrict_overflow,
5185 			 ("assuming signed overflow does not occur when "
5186 			  "assuming abs (x) < 0 is false"));
5187 	       return const0_rtx;
5188 	    }
5189 	  break;
5190 
5191 	case GE:
5192 	  /* Optimize abs(x) >= 0.0.  */
5193 	  if (!HONOR_NANS (mode)
5194 	      && (!INTEGRAL_MODE_P (mode)
5195 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5196 	    {
5197 	      if (INTEGRAL_MODE_P (mode)
5198 	          && (issue_strict_overflow_warning
5199 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5200 	        warning (OPT_Wstrict_overflow,
5201 			 ("assuming signed overflow does not occur when "
5202 			  "assuming abs (x) >= 0 is true"));
5203 	      return const_true_rtx;
5204 	    }
5205 	  break;
5206 
5207 	case UNGE:
5208 	  /* Optimize ! (abs(x) < 0.0).  */
5209 	  return const_true_rtx;
5210 
5211 	default:
5212 	  break;
5213 	}
5214     }
5215 
5216   return 0;
5217 }
5218 
5219 /* Simplify CODE, an operation with result mode MODE and three operands,
5220    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5221    a constant.  Return 0 if no simplifications is possible.  */
5222 
5223 rtx
5224 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5225 			    enum machine_mode op0_mode, rtx op0, rtx op1,
5226 			    rtx op2)
5227 {
5228   unsigned int width = GET_MODE_PRECISION (mode);
5229   bool any_change = false;
5230   rtx tem;
5231 
5232   /* VOIDmode means "infinite" precision.  */
5233   if (width == 0)
5234     width = HOST_BITS_PER_WIDE_INT;
5235 
5236   switch (code)
5237     {
5238     case FMA:
5239       /* Simplify negations around the multiplication.  */
5240       /* -a * -b + c  =>  a * b + c.  */
5241       if (GET_CODE (op0) == NEG)
5242 	{
5243 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5244 	  if (tem)
5245 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5246 	}
5247       else if (GET_CODE (op1) == NEG)
5248 	{
5249 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5250 	  if (tem)
5251 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5252 	}
5253 
5254       /* Canonicalize the two multiplication operands.  */
5255       /* a * -b + c  =>  -b * a + c.  */
5256       if (swap_commutative_operands_p (op0, op1))
5257 	tem = op0, op0 = op1, op1 = tem, any_change = true;
5258 
5259       if (any_change)
5260 	return gen_rtx_FMA (mode, op0, op1, op2);
5261       return NULL_RTX;
5262 
5263     case SIGN_EXTRACT:
5264     case ZERO_EXTRACT:
5265       if (CONST_INT_P (op0)
5266 	  && CONST_INT_P (op1)
5267 	  && CONST_INT_P (op2)
5268 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5269 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5270 	{
5271 	  /* Extracting a bit-field from a constant */
5272 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5273 	  HOST_WIDE_INT op1val = INTVAL (op1);
5274 	  HOST_WIDE_INT op2val = INTVAL (op2);
5275 	  if (BITS_BIG_ENDIAN)
5276 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5277 	  else
5278 	    val >>= op2val;
5279 
5280 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5281 	    {
5282 	      /* First zero-extend.  */
5283 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5284 	      /* If desired, propagate sign bit.  */
5285 	      if (code == SIGN_EXTRACT
5286 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5287 		     != 0)
5288 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5289 	    }
5290 
5291 	  return gen_int_mode (val, mode);
5292 	}
5293       break;
5294 
5295     case IF_THEN_ELSE:
5296       if (CONST_INT_P (op0))
5297 	return op0 != const0_rtx ? op1 : op2;
5298 
5299       /* Convert c ? a : a into "a".  */
5300       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5301 	return op1;
5302 
5303       /* Convert a != b ? a : b into "a".  */
5304       if (GET_CODE (op0) == NE
5305 	  && ! side_effects_p (op0)
5306 	  && ! HONOR_NANS (mode)
5307 	  && ! HONOR_SIGNED_ZEROS (mode)
5308 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5309 	       && rtx_equal_p (XEXP (op0, 1), op2))
5310 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5311 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5312 	return op1;
5313 
5314       /* Convert a == b ? a : b into "b".  */
5315       if (GET_CODE (op0) == EQ
5316 	  && ! side_effects_p (op0)
5317 	  && ! HONOR_NANS (mode)
5318 	  && ! HONOR_SIGNED_ZEROS (mode)
5319 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5320 	       && rtx_equal_p (XEXP (op0, 1), op2))
5321 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5322 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5323 	return op2;
5324 
5325       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5326 	{
5327 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5328 					? GET_MODE (XEXP (op0, 1))
5329 					: GET_MODE (XEXP (op0, 0)));
5330 	  rtx temp;
5331 
5332 	  /* Look for happy constants in op1 and op2.  */
5333 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5334 	    {
5335 	      HOST_WIDE_INT t = INTVAL (op1);
5336 	      HOST_WIDE_INT f = INTVAL (op2);
5337 
5338 	      if (t == STORE_FLAG_VALUE && f == 0)
5339 	        code = GET_CODE (op0);
5340 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5341 		{
5342 		  enum rtx_code tmp;
5343 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5344 		  if (tmp == UNKNOWN)
5345 		    break;
5346 		  code = tmp;
5347 		}
5348 	      else
5349 		break;
5350 
5351 	      return simplify_gen_relational (code, mode, cmp_mode,
5352 					      XEXP (op0, 0), XEXP (op0, 1));
5353 	    }
5354 
5355 	  if (cmp_mode == VOIDmode)
5356 	    cmp_mode = op0_mode;
5357 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5358 			  			cmp_mode, XEXP (op0, 0),
5359 						XEXP (op0, 1));
5360 
5361 	  /* See if any simplifications were possible.  */
5362 	  if (temp)
5363 	    {
5364 	      if (CONST_INT_P (temp))
5365 		return temp == const0_rtx ? op2 : op1;
5366 	      else if (temp)
5367 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5368 	    }
5369 	}
5370       break;
5371 
5372     case VEC_MERGE:
5373       gcc_assert (GET_MODE (op0) == mode);
5374       gcc_assert (GET_MODE (op1) == mode);
5375       gcc_assert (VECTOR_MODE_P (mode));
5376       op2 = avoid_constant_pool_reference (op2);
5377       if (CONST_INT_P (op2))
5378 	{
5379           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5380 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5381 	  int mask = (1 << n_elts) - 1;
5382 
5383 	  if (!(INTVAL (op2) & mask))
5384 	    return op1;
5385 	  if ((INTVAL (op2) & mask) == mask)
5386 	    return op0;
5387 
5388 	  op0 = avoid_constant_pool_reference (op0);
5389 	  op1 = avoid_constant_pool_reference (op1);
5390 	  if (GET_CODE (op0) == CONST_VECTOR
5391 	      && GET_CODE (op1) == CONST_VECTOR)
5392 	    {
5393 	      rtvec v = rtvec_alloc (n_elts);
5394 	      unsigned int i;
5395 
5396 	      for (i = 0; i < n_elts; i++)
5397 		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5398 				    ? CONST_VECTOR_ELT (op0, i)
5399 				    : CONST_VECTOR_ELT (op1, i));
5400 	      return gen_rtx_CONST_VECTOR (mode, v);
5401 	    }
5402 	}
5403       break;
5404 
5405     default:
5406       gcc_unreachable ();
5407     }
5408 
5409   return 0;
5410 }
5411 
5412 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5413    or CONST_VECTOR,
5414    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5415 
5416    Works by unpacking OP into a collection of 8-bit values
5417    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5418    and then repacking them again for OUTERMODE.  */
5419 
5420 static rtx
5421 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5422 		       enum machine_mode innermode, unsigned int byte)
5423 {
5424   /* We support up to 512-bit values (for V8DFmode).  */
5425   enum {
5426     max_bitsize = 512,
5427     value_bit = 8,
5428     value_mask = (1 << value_bit) - 1
5429   };
5430   unsigned char value[max_bitsize / value_bit];
5431   int value_start;
5432   int i;
5433   int elem;
5434 
5435   int num_elem;
5436   rtx * elems;
5437   int elem_bitsize;
5438   rtx result_s;
5439   rtvec result_v = NULL;
5440   enum mode_class outer_class;
5441   enum machine_mode outer_submode;
5442 
5443   /* Some ports misuse CCmode.  */
5444   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5445     return op;
5446 
5447   /* We have no way to represent a complex constant at the rtl level.  */
5448   if (COMPLEX_MODE_P (outermode))
5449     return NULL_RTX;
5450 
5451   /* Unpack the value.  */
5452 
5453   if (GET_CODE (op) == CONST_VECTOR)
5454     {
5455       num_elem = CONST_VECTOR_NUNITS (op);
5456       elems = &CONST_VECTOR_ELT (op, 0);
5457       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5458     }
5459   else
5460     {
5461       num_elem = 1;
5462       elems = &op;
5463       elem_bitsize = max_bitsize;
5464     }
5465   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5466   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5467   /* I don't know how to handle endianness of sub-units.  */
5468   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5469 
5470   for (elem = 0; elem < num_elem; elem++)
5471     {
5472       unsigned char * vp;
5473       rtx el = elems[elem];
5474 
5475       /* Vectors are kept in target memory order.  (This is probably
5476 	 a mistake.)  */
5477       {
5478 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5479 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5480 			  / BITS_PER_UNIT);
5481 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5482 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5483 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5484 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5485 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5486       }
5487 
5488       switch (GET_CODE (el))
5489 	{
5490 	case CONST_INT:
5491 	  for (i = 0;
5492 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5493 	       i += value_bit)
5494 	    *vp++ = INTVAL (el) >> i;
5495 	  /* CONST_INTs are always logically sign-extended.  */
5496 	  for (; i < elem_bitsize; i += value_bit)
5497 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5498 	  break;
5499 
5500 	case CONST_DOUBLE:
5501 	  if (GET_MODE (el) == VOIDmode)
5502 	    {
5503 	      unsigned char extend = 0;
5504 	      /* If this triggers, someone should have generated a
5505 		 CONST_INT instead.  */
5506 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5507 
5508 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5509 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5510 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5511 		{
5512 		  *vp++
5513 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5514 		  i += value_bit;
5515 		}
5516 
5517 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5518 		extend = -1;
5519 	      for (; i < elem_bitsize; i += value_bit)
5520 		*vp++ = extend;
5521 	    }
5522 	  else
5523 	    {
5524 	      long tmp[max_bitsize / 32];
5525 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5526 
5527 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5528 	      gcc_assert (bitsize <= elem_bitsize);
5529 	      gcc_assert (bitsize % value_bit == 0);
5530 
5531 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5532 			      GET_MODE (el));
5533 
5534 	      /* real_to_target produces its result in words affected by
5535 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5536 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5537 	         of SUBREG in rtl.texi.  */
5538 	      for (i = 0; i < bitsize; i += value_bit)
5539 		{
5540 		  int ibase;
5541 		  if (WORDS_BIG_ENDIAN)
5542 		    ibase = bitsize - 1 - i;
5543 		  else
5544 		    ibase = i;
5545 		  *vp++ = tmp[ibase / 32] >> i % 32;
5546 		}
5547 
5548 	      /* It shouldn't matter what's done here, so fill it with
5549 		 zero.  */
5550 	      for (; i < elem_bitsize; i += value_bit)
5551 		*vp++ = 0;
5552 	    }
5553 	  break;
5554 
5555         case CONST_FIXED:
5556 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5557 	    {
5558 	      for (i = 0; i < elem_bitsize; i += value_bit)
5559 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5560 	    }
5561 	  else
5562 	    {
5563 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5564 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5565               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5566 		   i += value_bit)
5567 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5568 			>> (i - HOST_BITS_PER_WIDE_INT);
5569 	      for (; i < elem_bitsize; i += value_bit)
5570 		*vp++ = 0;
5571 	    }
5572           break;
5573 
5574 	default:
5575 	  gcc_unreachable ();
5576 	}
5577     }
5578 
5579   /* Now, pick the right byte to start with.  */
5580   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5581      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5582      will already have offset 0.  */
5583   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5584     {
5585       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5586 			- byte);
5587       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5588       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5589       byte = (subword_byte % UNITS_PER_WORD
5590 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5591     }
5592 
5593   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5594      so if it's become negative it will instead be very large.)  */
5595   gcc_assert (byte < GET_MODE_SIZE (innermode));
5596 
5597   /* Convert from bytes to chunks of size value_bit.  */
5598   value_start = byte * (BITS_PER_UNIT / value_bit);
5599 
5600   /* Re-pack the value.  */
5601 
5602   if (VECTOR_MODE_P (outermode))
5603     {
5604       num_elem = GET_MODE_NUNITS (outermode);
5605       result_v = rtvec_alloc (num_elem);
5606       elems = &RTVEC_ELT (result_v, 0);
5607       outer_submode = GET_MODE_INNER (outermode);
5608     }
5609   else
5610     {
5611       num_elem = 1;
5612       elems = &result_s;
5613       outer_submode = outermode;
5614     }
5615 
5616   outer_class = GET_MODE_CLASS (outer_submode);
5617   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5618 
5619   gcc_assert (elem_bitsize % value_bit == 0);
5620   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5621 
5622   for (elem = 0; elem < num_elem; elem++)
5623     {
5624       unsigned char *vp;
5625 
5626       /* Vectors are stored in target memory order.  (This is probably
5627 	 a mistake.)  */
5628       {
5629 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5630 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5631 			  / BITS_PER_UNIT);
5632 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5633 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5634 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5635 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5636 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5637       }
5638 
5639       switch (outer_class)
5640 	{
5641 	case MODE_INT:
5642 	case MODE_PARTIAL_INT:
5643 	  {
5644 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5645 
5646 	    for (i = 0;
5647 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5648 		 i += value_bit)
5649 	      lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5650 	    for (; i < elem_bitsize; i += value_bit)
5651 	      hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5652 		     << (i - HOST_BITS_PER_WIDE_INT);
5653 
5654 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5655 	       know why.  */
5656 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5657 	      elems[elem] = gen_int_mode (lo, outer_submode);
5658 	    else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5659 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5660 	    else
5661 	      return NULL_RTX;
5662 	  }
5663 	  break;
5664 
5665 	case MODE_FLOAT:
5666 	case MODE_DECIMAL_FLOAT:
5667 	  {
5668 	    REAL_VALUE_TYPE r;
5669 	    long tmp[max_bitsize / 32];
5670 
5671 	    /* real_from_target wants its input in words affected by
5672 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5673 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5674 	       of SUBREG in rtl.texi.  */
5675 	    for (i = 0; i < max_bitsize / 32; i++)
5676 	      tmp[i] = 0;
5677 	    for (i = 0; i < elem_bitsize; i += value_bit)
5678 	      {
5679 		int ibase;
5680 		if (WORDS_BIG_ENDIAN)
5681 		  ibase = elem_bitsize - 1 - i;
5682 		else
5683 		  ibase = i;
5684 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5685 	      }
5686 
5687 	    real_from_target (&r, tmp, outer_submode);
5688 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5689 	  }
5690 	  break;
5691 
5692 	case MODE_FRACT:
5693 	case MODE_UFRACT:
5694 	case MODE_ACCUM:
5695 	case MODE_UACCUM:
5696 	  {
5697 	    FIXED_VALUE_TYPE f;
5698 	    f.data.low = 0;
5699 	    f.data.high = 0;
5700 	    f.mode = outer_submode;
5701 
5702 	    for (i = 0;
5703 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5704 		 i += value_bit)
5705 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5706 	    for (; i < elem_bitsize; i += value_bit)
5707 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5708 			     << (i - HOST_BITS_PER_WIDE_INT));
5709 
5710 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5711           }
5712           break;
5713 
5714 	default:
5715 	  gcc_unreachable ();
5716 	}
5717     }
5718   if (VECTOR_MODE_P (outermode))
5719     return gen_rtx_CONST_VECTOR (outermode, result_v);
5720   else
5721     return result_s;
5722 }
5723 
5724 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5725    Return 0 if no simplifications are possible.  */
5726 rtx
5727 simplify_subreg (enum machine_mode outermode, rtx op,
5728 		 enum machine_mode innermode, unsigned int byte)
5729 {
5730   /* Little bit of sanity checking.  */
5731   gcc_assert (innermode != VOIDmode);
5732   gcc_assert (outermode != VOIDmode);
5733   gcc_assert (innermode != BLKmode);
5734   gcc_assert (outermode != BLKmode);
5735 
5736   gcc_assert (GET_MODE (op) == innermode
5737 	      || GET_MODE (op) == VOIDmode);
5738 
5739   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5740     return NULL_RTX;
5741 
5742   if (byte >= GET_MODE_SIZE (innermode))
5743     return NULL_RTX;
5744 
5745   if (outermode == innermode && !byte)
5746     return op;
5747 
5748   if (CONST_SCALAR_INT_P (op)
5749       || CONST_DOUBLE_AS_FLOAT_P (op)
5750       || GET_CODE (op) == CONST_FIXED
5751       || GET_CODE (op) == CONST_VECTOR)
5752     return simplify_immed_subreg (outermode, op, innermode, byte);
5753 
5754   /* Changing mode twice with SUBREG => just change it once,
5755      or not at all if changing back op starting mode.  */
5756   if (GET_CODE (op) == SUBREG)
5757     {
5758       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5759       int final_offset = byte + SUBREG_BYTE (op);
5760       rtx newx;
5761 
5762       if (outermode == innermostmode
5763 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5764 	return SUBREG_REG (op);
5765 
5766       /* The SUBREG_BYTE represents offset, as if the value were stored
5767 	 in memory.  Irritating exception is paradoxical subreg, where
5768 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5769 	 value should be negative.  For a moment, undo this exception.  */
5770       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5771 	{
5772 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5773 	  if (WORDS_BIG_ENDIAN)
5774 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5775 	  if (BYTES_BIG_ENDIAN)
5776 	    final_offset += difference % UNITS_PER_WORD;
5777 	}
5778       if (SUBREG_BYTE (op) == 0
5779 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5780 	{
5781 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5782 	  if (WORDS_BIG_ENDIAN)
5783 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5784 	  if (BYTES_BIG_ENDIAN)
5785 	    final_offset += difference % UNITS_PER_WORD;
5786 	}
5787 
5788       /* See whether resulting subreg will be paradoxical.  */
5789       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5790 	{
5791 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5792 	  if (final_offset < 0)
5793 	    return NULL_RTX;
5794 	  /* Bail out in case resulting subreg would be incorrect.  */
5795 	  if (final_offset % GET_MODE_SIZE (outermode)
5796 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5797 	    return NULL_RTX;
5798 	}
5799       else
5800 	{
5801 	  int offset = 0;
5802 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5803 
5804 	  /* In paradoxical subreg, see if we are still looking on lower part.
5805 	     If so, our SUBREG_BYTE will be 0.  */
5806 	  if (WORDS_BIG_ENDIAN)
5807 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5808 	  if (BYTES_BIG_ENDIAN)
5809 	    offset += difference % UNITS_PER_WORD;
5810 	  if (offset == final_offset)
5811 	    final_offset = 0;
5812 	  else
5813 	    return NULL_RTX;
5814 	}
5815 
5816       /* Recurse for further possible simplifications.  */
5817       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5818 			      final_offset);
5819       if (newx)
5820 	return newx;
5821       if (validate_subreg (outermode, innermostmode,
5822 			   SUBREG_REG (op), final_offset))
5823 	{
5824 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5825 	  if (SUBREG_PROMOTED_VAR_P (op)
5826 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5827 	      && GET_MODE_CLASS (outermode) == MODE_INT
5828 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5829 			   GET_MODE_SIZE (innermode),
5830 			   GET_MODE_SIZE (innermostmode))
5831 	      && subreg_lowpart_p (newx))
5832 	    {
5833 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5834 	      SUBREG_PROMOTED_UNSIGNED_SET
5835 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5836 	    }
5837 	  return newx;
5838 	}
5839       return NULL_RTX;
5840     }
5841 
5842   /* SUBREG of a hard register => just change the register number
5843      and/or mode.  If the hard register is not valid in that mode,
5844      suppress this simplification.  If the hard register is the stack,
5845      frame, or argument pointer, leave this as a SUBREG.  */
5846 
5847   if (REG_P (op) && HARD_REGISTER_P (op))
5848     {
5849       unsigned int regno, final_regno;
5850 
5851       regno = REGNO (op);
5852       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5853       if (HARD_REGISTER_NUM_P (final_regno))
5854 	{
5855 	  rtx x;
5856 	  int final_offset = byte;
5857 
5858 	  /* Adjust offset for paradoxical subregs.  */
5859 	  if (byte == 0
5860 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5861 	    {
5862 	      int difference = (GET_MODE_SIZE (innermode)
5863 				- GET_MODE_SIZE (outermode));
5864 	      if (WORDS_BIG_ENDIAN)
5865 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5866 	      if (BYTES_BIG_ENDIAN)
5867 		final_offset += difference % UNITS_PER_WORD;
5868 	    }
5869 
5870 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5871 
5872 	  /* Propagate original regno.  We don't have any way to specify
5873 	     the offset inside original regno, so do so only for lowpart.
5874 	     The information is used only by alias analysis that can not
5875 	     grog partial register anyway.  */
5876 
5877 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5878 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5879 	  return x;
5880 	}
5881     }
5882 
5883   /* If we have a SUBREG of a register that we are replacing and we are
5884      replacing it with a MEM, make a new MEM and try replacing the
5885      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5886      or if we would be widening it.  */
5887 
5888   if (MEM_P (op)
5889       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5890       /* Allow splitting of volatile memory references in case we don't
5891          have instruction to move the whole thing.  */
5892       && (! MEM_VOLATILE_P (op)
5893 	  || ! have_insn_for (SET, innermode))
5894       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5895     return adjust_address_nv (op, outermode, byte);
5896 
5897   /* Handle complex values represented as CONCAT
5898      of real and imaginary part.  */
5899   if (GET_CODE (op) == CONCAT)
5900     {
5901       unsigned int part_size, final_offset;
5902       rtx part, res;
5903 
5904       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5905       if (byte < part_size)
5906 	{
5907 	  part = XEXP (op, 0);
5908 	  final_offset = byte;
5909 	}
5910       else
5911 	{
5912 	  part = XEXP (op, 1);
5913 	  final_offset = byte - part_size;
5914 	}
5915 
5916       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5917 	return NULL_RTX;
5918 
5919       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5920       if (res)
5921 	return res;
5922       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5923 	return gen_rtx_SUBREG (outermode, part, final_offset);
5924       return NULL_RTX;
5925     }
5926 
5927   /* A SUBREG resulting from a zero extension may fold to zero if
5928      it extracts higher bits that the ZERO_EXTEND's source bits.  */
5929   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5930     {
5931       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5932       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5933 	return CONST0_RTX (outermode);
5934     }
5935 
5936   if (SCALAR_INT_MODE_P (outermode)
5937       && SCALAR_INT_MODE_P (innermode)
5938       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5939       && byte == subreg_lowpart_offset (outermode, innermode))
5940     {
5941       rtx tem = simplify_truncation (outermode, op, innermode);
5942       if (tem)
5943 	return tem;
5944     }
5945 
5946   return NULL_RTX;
5947 }
5948 
5949 /* Make a SUBREG operation or equivalent if it folds.  */
5950 
5951 rtx
5952 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5953 		     enum machine_mode innermode, unsigned int byte)
5954 {
5955   rtx newx;
5956 
5957   newx = simplify_subreg (outermode, op, innermode, byte);
5958   if (newx)
5959     return newx;
5960 
5961   if (GET_CODE (op) == SUBREG
5962       || GET_CODE (op) == CONCAT
5963       || GET_MODE (op) == VOIDmode)
5964     return NULL_RTX;
5965 
5966   if (validate_subreg (outermode, innermode, op, byte))
5967     return gen_rtx_SUBREG (outermode, op, byte);
5968 
5969   return NULL_RTX;
5970 }
5971 
5972 /* Simplify X, an rtx expression.
5973 
5974    Return the simplified expression or NULL if no simplifications
5975    were possible.
5976 
5977    This is the preferred entry point into the simplification routines;
5978    however, we still allow passes to call the more specific routines.
5979 
5980    Right now GCC has three (yes, three) major bodies of RTL simplification
5981    code that need to be unified.
5982 
5983 	1. fold_rtx in cse.c.  This code uses various CSE specific
5984 	   information to aid in RTL simplification.
5985 
5986 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5987 	   it uses combine specific information to aid in RTL
5988 	   simplification.
5989 
5990 	3. The routines in this file.
5991 
5992 
5993    Long term we want to only have one body of simplification code; to
5994    get to that state I recommend the following steps:
5995 
5996 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
5997 	   which are not pass dependent state into these routines.
5998 
5999 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6000 	   use this routine whenever possible.
6001 
6002 	3. Allow for pass dependent state to be provided to these
6003 	   routines and add simplifications based on the pass dependent
6004 	   state.  Remove code from cse.c & combine.c that becomes
6005 	   redundant/dead.
6006 
6007     It will take time, but ultimately the compiler will be easier to
6008     maintain and improve.  It's totally silly that when we add a
6009     simplification that it needs to be added to 4 places (3 for RTL
6010     simplification and 1 for tree simplification.  */
6011 
6012 rtx
6013 simplify_rtx (const_rtx x)
6014 {
6015   const enum rtx_code code = GET_CODE (x);
6016   const enum machine_mode mode = GET_MODE (x);
6017 
6018   switch (GET_RTX_CLASS (code))
6019     {
6020     case RTX_UNARY:
6021       return simplify_unary_operation (code, mode,
6022 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6023     case RTX_COMM_ARITH:
6024       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6025 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6026 
6027       /* Fall through....  */
6028 
6029     case RTX_BIN_ARITH:
6030       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6031 
6032     case RTX_TERNARY:
6033     case RTX_BITFIELD_OPS:
6034       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6035 					 XEXP (x, 0), XEXP (x, 1),
6036 					 XEXP (x, 2));
6037 
6038     case RTX_COMPARE:
6039     case RTX_COMM_COMPARE:
6040       return simplify_relational_operation (code, mode,
6041                                             ((GET_MODE (XEXP (x, 0))
6042                                              != VOIDmode)
6043                                             ? GET_MODE (XEXP (x, 0))
6044                                             : GET_MODE (XEXP (x, 1))),
6045                                             XEXP (x, 0),
6046                                             XEXP (x, 1));
6047 
6048     case RTX_EXTRA:
6049       if (code == SUBREG)
6050 	return simplify_subreg (mode, SUBREG_REG (x),
6051 				GET_MODE (SUBREG_REG (x)),
6052 				SUBREG_BYTE (x));
6053       break;
6054 
6055     case RTX_OBJ:
6056       if (code == LO_SUM)
6057 	{
6058 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6059 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6060 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6061 	  return XEXP (x, 1);
6062 	}
6063       break;
6064 
6065     default:
6066       break;
6067     }
6068   return NULL;
6069 }
6070