xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/simplify-rtx.c (revision 7c192b2a5e1093666e67801684f930ef49b3b363)
1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "fold-const.h"
37 #include "varasm.h"
38 #include "tm_p.h"
39 #include "regs.h"
40 #include "hard-reg-set.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "recog.h"
44 #include "function.h"
45 #include "insn-codes.h"
46 #include "optabs.h"
47 #include "hashtab.h"
48 #include "statistics.h"
49 #include "real.h"
50 #include "fixed-value.h"
51 #include "expmed.h"
52 #include "dojump.h"
53 #include "explow.h"
54 #include "calls.h"
55 #include "emit-rtl.h"
56 #include "stmt.h"
57 #include "expr.h"
58 #include "diagnostic-core.h"
59 #include "ggc.h"
60 #include "target.h"
61 #include "predict.h"
62 
63 /* Simplification and canonicalization of RTL.  */
64 
65 /* Much code operates on (low, high) pairs; the low value is an
66    unsigned wide int, the high value a signed wide int.  We
67    occasionally need to sign extend from low to high as if low were a
68    signed wide int.  */
69 #define HWI_SIGN_EXTEND(low) \
70  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
71 
72 static rtx neg_const_int (machine_mode, const_rtx);
73 static bool plus_minus_operand_p (const_rtx);
74 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
75 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
76 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
77 				  unsigned int);
78 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
79 					   rtx, rtx);
80 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
81 					    machine_mode, rtx, rtx);
82 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
83 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
84 					rtx, rtx, rtx, rtx);
85 
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87    maximally negative number can overflow).  */
88 static rtx
89 neg_const_int (machine_mode mode, const_rtx i)
90 {
91   return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
92 }
93 
94 /* Test whether expression, X, is an immediate constant that represents
95    the most significant bit of machine mode MODE.  */
96 
97 bool
98 mode_signbit_p (machine_mode mode, const_rtx x)
99 {
100   unsigned HOST_WIDE_INT val;
101   unsigned int width;
102 
103   if (GET_MODE_CLASS (mode) != MODE_INT)
104     return false;
105 
106   width = GET_MODE_PRECISION (mode);
107   if (width == 0)
108     return false;
109 
110   if (width <= HOST_BITS_PER_WIDE_INT
111       && CONST_INT_P (x))
112     val = INTVAL (x);
113 #if TARGET_SUPPORTS_WIDE_INT
114   else if (CONST_WIDE_INT_P (x))
115     {
116       unsigned int i;
117       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
118       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
119 	return false;
120       for (i = 0; i < elts - 1; i++)
121 	if (CONST_WIDE_INT_ELT (x, i) != 0)
122 	  return false;
123       val = CONST_WIDE_INT_ELT (x, elts - 1);
124       width %= HOST_BITS_PER_WIDE_INT;
125       if (width == 0)
126 	width = HOST_BITS_PER_WIDE_INT;
127     }
128 #else
129   else if (width <= HOST_BITS_PER_DOUBLE_INT
130 	   && CONST_DOUBLE_AS_INT_P (x)
131 	   && CONST_DOUBLE_LOW (x) == 0)
132     {
133       val = CONST_DOUBLE_HIGH (x);
134       width -= HOST_BITS_PER_WIDE_INT;
135     }
136 #endif
137   else
138     /* X is not an integer constant.  */
139     return false;
140 
141   if (width < HOST_BITS_PER_WIDE_INT)
142     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
143   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
144 }
145 
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147    (after masking with the mode mask of MODE).  Returns false if the
148    precision of MODE is too large to handle.  */
149 
150 bool
151 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
152 {
153   unsigned int width;
154 
155   if (GET_MODE_CLASS (mode) != MODE_INT)
156     return false;
157 
158   width = GET_MODE_PRECISION (mode);
159   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160     return false;
161 
162   val &= GET_MODE_MASK (mode);
163   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
164 }
165 
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167    Returns false if the precision of MODE is too large to handle.  */
168 bool
169 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 {
171   unsigned int width;
172 
173   if (GET_MODE_CLASS (mode) != MODE_INT)
174     return false;
175 
176   width = GET_MODE_PRECISION (mode);
177   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178     return false;
179 
180   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181   return val != 0;
182 }
183 
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185    Returns false if the precision of MODE is too large to handle.  */
186 bool
187 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
188 {
189   unsigned int width;
190 
191   if (GET_MODE_CLASS (mode) != MODE_INT)
192     return false;
193 
194   width = GET_MODE_PRECISION (mode);
195   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
196     return false;
197 
198   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
199   return val == 0;
200 }
201 
202 /* Make a binary operation by properly ordering the operands and
203    seeing if the expression folds.  */
204 
205 rtx
206 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
207 		     rtx op1)
208 {
209   rtx tem;
210 
211   /* If this simplifies, do it.  */
212   tem = simplify_binary_operation (code, mode, op0, op1);
213   if (tem)
214     return tem;
215 
216   /* Put complex operands first and constants second if commutative.  */
217   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
218       && swap_commutative_operands_p (op0, op1))
219     tem = op0, op0 = op1, op1 = tem;
220 
221   return gen_rtx_fmt_ee (code, mode, op0, op1);
222 }
223 
224 /* If X is a MEM referencing the constant pool, return the real value.
225    Otherwise return X.  */
226 rtx
227 avoid_constant_pool_reference (rtx x)
228 {
229   rtx c, tmp, addr;
230   machine_mode cmode;
231   HOST_WIDE_INT offset = 0;
232 
233   switch (GET_CODE (x))
234     {
235     case MEM:
236       break;
237 
238     case FLOAT_EXTEND:
239       /* Handle float extensions of constant pool references.  */
240       tmp = XEXP (x, 0);
241       c = avoid_constant_pool_reference (tmp);
242       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
243 	{
244 	  REAL_VALUE_TYPE d;
245 
246 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
247 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
248 	}
249       return x;
250 
251     default:
252       return x;
253     }
254 
255   if (GET_MODE (x) == BLKmode)
256     return x;
257 
258   addr = XEXP (x, 0);
259 
260   /* Call target hook to avoid the effects of -fpic etc....  */
261   addr = targetm.delegitimize_address (addr);
262 
263   /* Split the address into a base and integer offset.  */
264   if (GET_CODE (addr) == CONST
265       && GET_CODE (XEXP (addr, 0)) == PLUS
266       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
267     {
268       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
269       addr = XEXP (XEXP (addr, 0), 0);
270     }
271 
272   if (GET_CODE (addr) == LO_SUM)
273     addr = XEXP (addr, 1);
274 
275   /* If this is a constant pool reference, we can turn it into its
276      constant and hope that simplifications happen.  */
277   if (GET_CODE (addr) == SYMBOL_REF
278       && CONSTANT_POOL_ADDRESS_P (addr))
279     {
280       c = get_pool_constant (addr);
281       cmode = get_pool_mode (addr);
282 
283       /* If we're accessing the constant in a different mode than it was
284          originally stored, attempt to fix that up via subreg simplifications.
285          If that fails we have no choice but to return the original memory.  */
286       if ((offset != 0 || cmode != GET_MODE (x))
287 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
288         {
289           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
290           if (tem && CONSTANT_P (tem))
291             return tem;
292         }
293       else
294         return c;
295     }
296 
297   return x;
298 }
299 
300 /* Simplify a MEM based on its attributes.  This is the default
301    delegitimize_address target hook, and it's recommended that every
302    overrider call it.  */
303 
304 rtx
305 delegitimize_mem_from_attrs (rtx x)
306 {
307   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308      use their base addresses as equivalent.  */
309   if (MEM_P (x)
310       && MEM_EXPR (x)
311       && MEM_OFFSET_KNOWN_P (x))
312     {
313       tree decl = MEM_EXPR (x);
314       machine_mode mode = GET_MODE (x);
315       HOST_WIDE_INT offset = 0;
316 
317       switch (TREE_CODE (decl))
318 	{
319 	default:
320 	  decl = NULL;
321 	  break;
322 
323 	case VAR_DECL:
324 	  break;
325 
326 	case ARRAY_REF:
327 	case ARRAY_RANGE_REF:
328 	case COMPONENT_REF:
329 	case BIT_FIELD_REF:
330 	case REALPART_EXPR:
331 	case IMAGPART_EXPR:
332 	case VIEW_CONVERT_EXPR:
333 	  {
334 	    HOST_WIDE_INT bitsize, bitpos;
335 	    tree toffset;
336 	    int unsignedp, volatilep = 0;
337 
338 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
339 					&mode, &unsignedp, &volatilep, false);
340 	    if (bitsize != GET_MODE_BITSIZE (mode)
341 		|| (bitpos % BITS_PER_UNIT)
342 		|| (toffset && !tree_fits_shwi_p (toffset)))
343 	      decl = NULL;
344 	    else
345 	      {
346 		offset += bitpos / BITS_PER_UNIT;
347 		if (toffset)
348 		  offset += tree_to_shwi (toffset);
349 	      }
350 	    break;
351 	  }
352 	}
353 
354       if (decl
355 	  && mode == GET_MODE (x)
356 	  && TREE_CODE (decl) == VAR_DECL
357 	  && (TREE_STATIC (decl)
358 	      || DECL_THREAD_LOCAL_P (decl))
359 	  && DECL_RTL_SET_P (decl)
360 	  && MEM_P (DECL_RTL (decl)))
361 	{
362 	  rtx newx;
363 
364 	  offset += MEM_OFFSET (x);
365 
366 	  newx = DECL_RTL (decl);
367 
368 	  if (MEM_P (newx))
369 	    {
370 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
371 
372 	      /* Avoid creating a new MEM needlessly if we already had
373 		 the same address.  We do if there's no OFFSET and the
374 		 old address X is identical to NEWX, or if X is of the
375 		 form (plus NEWX OFFSET), or the NEWX is of the form
376 		 (plus Y (const_int Z)) and X is that with the offset
377 		 added: (plus Y (const_int Z+OFFSET)).  */
378 	      if (!((offset == 0
379 		     || (GET_CODE (o) == PLUS
380 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
381 			 && (offset == INTVAL (XEXP (o, 1))
382 			     || (GET_CODE (n) == PLUS
383 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
384 				 && (INTVAL (XEXP (n, 1)) + offset
385 				     == INTVAL (XEXP (o, 1)))
386 				 && (n = XEXP (n, 0))))
387 			 && (o = XEXP (o, 0))))
388 		    && rtx_equal_p (o, n)))
389 		x = adjust_address_nv (newx, mode, offset);
390 	    }
391 	  else if (GET_MODE (x) == GET_MODE (newx)
392 		   && offset == 0)
393 	    x = newx;
394 	}
395     }
396 
397   return x;
398 }
399 
400 /* Make a unary operation by first seeing if it folds and otherwise making
401    the specified operation.  */
402 
403 rtx
404 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
405 		    machine_mode op_mode)
406 {
407   rtx tem;
408 
409   /* If this simplifies, use it.  */
410   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
411     return tem;
412 
413   return gen_rtx_fmt_e (code, mode, op);
414 }
415 
416 /* Likewise for ternary operations.  */
417 
418 rtx
419 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
420 		      machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
421 {
422   rtx tem;
423 
424   /* If this simplifies, use it.  */
425   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
426 					      op0, op1, op2)))
427     return tem;
428 
429   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
430 }
431 
432 /* Likewise, for relational operations.
433    CMP_MODE specifies mode comparison is done in.  */
434 
435 rtx
436 simplify_gen_relational (enum rtx_code code, machine_mode mode,
437 			 machine_mode cmp_mode, rtx op0, rtx op1)
438 {
439   rtx tem;
440 
441   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
442 						 op0, op1)))
443     return tem;
444 
445   return gen_rtx_fmt_ee (code, mode, op0, op1);
446 }
447 
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449    and simplify the result.  If FN is non-NULL, call this callback on each
450    X, if it returns non-NULL, replace X with its return value and simplify the
451    result.  */
452 
453 rtx
454 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
455 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
456 {
457   enum rtx_code code = GET_CODE (x);
458   machine_mode mode = GET_MODE (x);
459   machine_mode op_mode;
460   const char *fmt;
461   rtx op0, op1, op2, newx, op;
462   rtvec vec, newvec;
463   int i, j;
464 
465   if (__builtin_expect (fn != NULL, 0))
466     {
467       newx = fn (x, old_rtx, data);
468       if (newx)
469 	return newx;
470     }
471   else if (rtx_equal_p (x, old_rtx))
472     return copy_rtx ((rtx) data);
473 
474   switch (GET_RTX_CLASS (code))
475     {
476     case RTX_UNARY:
477       op0 = XEXP (x, 0);
478       op_mode = GET_MODE (op0);
479       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
480       if (op0 == XEXP (x, 0))
481 	return x;
482       return simplify_gen_unary (code, mode, op0, op_mode);
483 
484     case RTX_BIN_ARITH:
485     case RTX_COMM_ARITH:
486       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
487       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
488       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
489 	return x;
490       return simplify_gen_binary (code, mode, op0, op1);
491 
492     case RTX_COMPARE:
493     case RTX_COMM_COMPARE:
494       op0 = XEXP (x, 0);
495       op1 = XEXP (x, 1);
496       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
497       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
498       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
499       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
500 	return x;
501       return simplify_gen_relational (code, mode, op_mode, op0, op1);
502 
503     case RTX_TERNARY:
504     case RTX_BITFIELD_OPS:
505       op0 = XEXP (x, 0);
506       op_mode = GET_MODE (op0);
507       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
508       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
510       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
511 	return x;
512       if (op_mode == VOIDmode)
513 	op_mode = GET_MODE (op0);
514       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
515 
516     case RTX_EXTRA:
517       if (code == SUBREG)
518 	{
519 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
520 	  if (op0 == SUBREG_REG (x))
521 	    return x;
522 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
523 				     GET_MODE (SUBREG_REG (x)),
524 				     SUBREG_BYTE (x));
525 	  return op0 ? op0 : x;
526 	}
527       break;
528 
529     case RTX_OBJ:
530       if (code == MEM)
531 	{
532 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
533 	  if (op0 == XEXP (x, 0))
534 	    return x;
535 	  return replace_equiv_address_nv (x, op0);
536 	}
537       else if (code == LO_SUM)
538 	{
539 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
540 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
541 
542 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
543 	  if (GET_CODE (op0) == HIGH)
544 	    {
545 	      rtx base0, base1, offset0, offset1;
546 	      split_const (XEXP (op0, 0), &base0, &offset0);
547 	      split_const (op1, &base1, &offset1);
548 	      if (rtx_equal_p (base0, base1))
549 		return op1;
550 	    }
551 
552 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
553 	    return x;
554 	  return gen_rtx_LO_SUM (mode, op0, op1);
555 	}
556       break;
557 
558     default:
559       break;
560     }
561 
562   newx = x;
563   fmt = GET_RTX_FORMAT (code);
564   for (i = 0; fmt[i]; i++)
565     switch (fmt[i])
566       {
567       case 'E':
568 	vec = XVEC (x, i);
569 	newvec = XVEC (newx, i);
570 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
571 	  {
572 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
573 					  old_rtx, fn, data);
574 	    if (op != RTVEC_ELT (vec, j))
575 	      {
576 		if (newvec == vec)
577 		  {
578 		    newvec = shallow_copy_rtvec (vec);
579 		    if (x == newx)
580 		      newx = shallow_copy_rtx (x);
581 		    XVEC (newx, i) = newvec;
582 		  }
583 		RTVEC_ELT (newvec, j) = op;
584 	      }
585 	  }
586 	break;
587 
588       case 'e':
589 	if (XEXP (x, i))
590 	  {
591 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
592 	    if (op != XEXP (x, i))
593 	      {
594 		if (x == newx)
595 		  newx = shallow_copy_rtx (x);
596 		XEXP (newx, i) = op;
597 	      }
598 	  }
599 	break;
600       }
601   return newx;
602 }
603 
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605    resulting RTX.  Return a new RTX which is as simplified as possible.  */
606 
607 rtx
608 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
609 {
610   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
611 }
612 
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614    Only handle cases where the truncated value is inherently an rvalue.
615 
616    RTL provides two ways of truncating a value:
617 
618    1. a lowpart subreg.  This form is only a truncation when both
619       the outer and inner modes (here MODE and OP_MODE respectively)
620       are scalar integers, and only then when the subreg is used as
621       an rvalue.
622 
623       It is only valid to form such truncating subregs if the
624       truncation requires no action by the target.  The onus for
625       proving this is on the creator of the subreg -- e.g. the
626       caller to simplify_subreg or simplify_gen_subreg -- and typically
627       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
628 
629    2. a TRUNCATE.  This form handles both scalar and compound integers.
630 
631    The first form is preferred where valid.  However, the TRUNCATE
632    handling in simplify_unary_operation turns the second form into the
633    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634    so it is generally safe to form rvalue truncations using:
635 
636       simplify_gen_unary (TRUNCATE, ...)
637 
638    and leave simplify_unary_operation to work out which representation
639    should be used.
640 
641    Because of the proof requirements on (1), simplify_truncation must
642    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643    regardless of whether the outer truncation came from a SUBREG or a
644    TRUNCATE.  For example, if the caller has proven that an SImode
645    truncation of:
646 
647       (and:DI X Y)
648 
649    is a no-op and can be represented as a subreg, it does not follow
650    that SImode truncations of X and Y are also no-ops.  On a target
651    like 64-bit MIPS that requires SImode values to be stored in
652    sign-extended form, an SImode truncation of:
653 
654       (and:DI (reg:DI X) (const_int 63))
655 
656    is trivially a no-op because only the lower 6 bits can be set.
657    However, X is still an arbitrary 64-bit number and so we cannot
658    assume that truncating it too is a no-op.  */
659 
660 static rtx
661 simplify_truncation (machine_mode mode, rtx op,
662 		     machine_mode op_mode)
663 {
664   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
665   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
666   gcc_assert (precision <= op_precision);
667 
668   /* Optimize truncations of zero and sign extended values.  */
669   if (GET_CODE (op) == ZERO_EXTEND
670       || GET_CODE (op) == SIGN_EXTEND)
671     {
672       /* There are three possibilities.  If MODE is the same as the
673 	 origmode, we can omit both the extension and the subreg.
674 	 If MODE is not larger than the origmode, we can apply the
675 	 truncation without the extension.  Finally, if the outermode
676 	 is larger than the origmode, we can just extend to the appropriate
677 	 mode.  */
678       machine_mode origmode = GET_MODE (XEXP (op, 0));
679       if (mode == origmode)
680 	return XEXP (op, 0);
681       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
682 	return simplify_gen_unary (TRUNCATE, mode,
683 				   XEXP (op, 0), origmode);
684       else
685 	return simplify_gen_unary (GET_CODE (op), mode,
686 				   XEXP (op, 0), origmode);
687     }
688 
689   /* If the machine can perform operations in the truncated mode, distribute
690      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
692   if (1
693 #ifdef WORD_REGISTER_OPERATIONS
694       && precision >= BITS_PER_WORD
695 #endif
696       && (GET_CODE (op) == PLUS
697 	  || GET_CODE (op) == MINUS
698 	  || GET_CODE (op) == MULT))
699     {
700       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
701       if (op0)
702 	{
703 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
704 	  if (op1)
705 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
706 	}
707     }
708 
709   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711      the outer subreg is effectively a truncation to the original mode.  */
712   if ((GET_CODE (op) == LSHIFTRT
713        || GET_CODE (op) == ASHIFTRT)
714       /* Ensure that OP_MODE is at least twice as wide as MODE
715 	 to avoid the possibility that an outer LSHIFTRT shifts by more
716 	 than the sign extension's sign_bit_copies and introduces zeros
717 	 into the high bits of the result.  */
718       && 2 * precision <= op_precision
719       && CONST_INT_P (XEXP (op, 1))
720       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
721       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722       && UINTVAL (XEXP (op, 1)) < precision)
723     return simplify_gen_binary (ASHIFTRT, mode,
724 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
725 
726   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728      the outer subreg is effectively a truncation to the original mode.  */
729   if ((GET_CODE (op) == LSHIFTRT
730        || GET_CODE (op) == ASHIFTRT)
731       && CONST_INT_P (XEXP (op, 1))
732       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
733       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
734       && UINTVAL (XEXP (op, 1)) < precision)
735     return simplify_gen_binary (LSHIFTRT, mode,
736 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
737 
738   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739      to (ashift:QI (x:QI) C), where C is a suitable small constant and
740      the outer subreg is effectively a truncation to the original mode.  */
741   if (GET_CODE (op) == ASHIFT
742       && CONST_INT_P (XEXP (op, 1))
743       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
744 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
745       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
746       && UINTVAL (XEXP (op, 1)) < precision)
747     return simplify_gen_binary (ASHIFT, mode,
748 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
749 
750   /* Recognize a word extraction from a multi-word subreg.  */
751   if ((GET_CODE (op) == LSHIFTRT
752        || GET_CODE (op) == ASHIFTRT)
753       && SCALAR_INT_MODE_P (mode)
754       && SCALAR_INT_MODE_P (op_mode)
755       && precision >= BITS_PER_WORD
756       && 2 * precision <= op_precision
757       && CONST_INT_P (XEXP (op, 1))
758       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
759       && UINTVAL (XEXP (op, 1)) < op_precision)
760     {
761       int byte = subreg_lowpart_offset (mode, op_mode);
762       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
763       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
764 				  (WORDS_BIG_ENDIAN
765 				   ? byte - shifted_bytes
766 				   : byte + shifted_bytes));
767     }
768 
769   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770      and try replacing the TRUNCATE and shift with it.  Don't do this
771      if the MEM has a mode-dependent address.  */
772   if ((GET_CODE (op) == LSHIFTRT
773        || GET_CODE (op) == ASHIFTRT)
774       && SCALAR_INT_MODE_P (op_mode)
775       && MEM_P (XEXP (op, 0))
776       && CONST_INT_P (XEXP (op, 1))
777       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
778       && INTVAL (XEXP (op, 1)) > 0
779       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
780       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
781 				     MEM_ADDR_SPACE (XEXP (op, 0)))
782       && ! MEM_VOLATILE_P (XEXP (op, 0))
783       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
784 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
785     {
786       int byte = subreg_lowpart_offset (mode, op_mode);
787       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
788       return adjust_address_nv (XEXP (op, 0), mode,
789 				(WORDS_BIG_ENDIAN
790 				 ? byte - shifted_bytes
791 				 : byte + shifted_bytes));
792     }
793 
794   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795      (OP:SI foo:SI) if OP is NEG or ABS.  */
796   if ((GET_CODE (op) == ABS
797        || GET_CODE (op) == NEG)
798       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
799 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
800       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
801     return simplify_gen_unary (GET_CODE (op), mode,
802 			       XEXP (XEXP (op, 0), 0), mode);
803 
804   /* (truncate:A (subreg:B (truncate:C X) 0)) is
805      (truncate:A X).  */
806   if (GET_CODE (op) == SUBREG
807       && SCALAR_INT_MODE_P (mode)
808       && SCALAR_INT_MODE_P (op_mode)
809       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
810       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
811       && subreg_lowpart_p (op))
812     {
813       rtx inner = XEXP (SUBREG_REG (op), 0);
814       if (GET_MODE_PRECISION (mode)
815 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
816 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
817       else
818 	/* If subreg above is paradoxical and C is narrower
819 	   than A, return (subreg:A (truncate:C X) 0).  */
820 	return simplify_gen_subreg (mode, SUBREG_REG (op),
821 				    GET_MODE (SUBREG_REG (op)), 0);
822     }
823 
824   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
825   if (GET_CODE (op) == TRUNCATE)
826     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
827 			       GET_MODE (XEXP (op, 0)));
828 
829   return NULL_RTX;
830 }
831 
832 /* Try to simplify a unary operation CODE whose output mode is to be
833    MODE with input operand OP whose mode was originally OP_MODE.
834    Return zero if no simplification can be made.  */
835 rtx
836 simplify_unary_operation (enum rtx_code code, machine_mode mode,
837 			  rtx op, machine_mode op_mode)
838 {
839   rtx trueop, tem;
840 
841   trueop = avoid_constant_pool_reference (op);
842 
843   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
844   if (tem)
845     return tem;
846 
847   return simplify_unary_operation_1 (code, mode, op);
848 }
849 
850 /* Perform some simplifications we can do even if the operands
851    aren't constant.  */
852 static rtx
853 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
854 {
855   enum rtx_code reversed;
856   rtx temp;
857 
858   switch (code)
859     {
860     case NOT:
861       /* (not (not X)) == X.  */
862       if (GET_CODE (op) == NOT)
863 	return XEXP (op, 0);
864 
865       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 	 comparison is all ones.   */
867       if (COMPARISON_P (op)
868 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
869 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
870 	return simplify_gen_relational (reversed, mode, VOIDmode,
871 					XEXP (op, 0), XEXP (op, 1));
872 
873       /* (not (plus X -1)) can become (neg X).  */
874       if (GET_CODE (op) == PLUS
875 	  && XEXP (op, 1) == constm1_rtx)
876 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
877 
878       /* Similarly, (not (neg X)) is (plus X -1).  */
879       if (GET_CODE (op) == NEG)
880 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
881 				    CONSTM1_RTX (mode));
882 
883       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
884       if (GET_CODE (op) == XOR
885 	  && CONST_INT_P (XEXP (op, 1))
886 	  && (temp = simplify_unary_operation (NOT, mode,
887 					       XEXP (op, 1), mode)) != 0)
888 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
889 
890       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
891       if (GET_CODE (op) == PLUS
892 	  && CONST_INT_P (XEXP (op, 1))
893 	  && mode_signbit_p (mode, XEXP (op, 1))
894 	  && (temp = simplify_unary_operation (NOT, mode,
895 					       XEXP (op, 1), mode)) != 0)
896 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
897 
898 
899       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
900 	 operands other than 1, but that is not valid.  We could do a
901 	 similar simplification for (not (lshiftrt C X)) where C is
902 	 just the sign bit, but this doesn't seem common enough to
903 	 bother with.  */
904       if (GET_CODE (op) == ASHIFT
905 	  && XEXP (op, 0) == const1_rtx)
906 	{
907 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
908 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
909 	}
910 
911       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 	 so we can perform the above simplification.  */
914       if (STORE_FLAG_VALUE == -1
915 	  && GET_CODE (op) == ASHIFTRT
916 	  && CONST_INT_P (XEXP (op, 1))
917 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
918 	return simplify_gen_relational (GE, mode, VOIDmode,
919 					XEXP (op, 0), const0_rtx);
920 
921 
922       if (GET_CODE (op) == SUBREG
923 	  && subreg_lowpart_p (op)
924 	  && (GET_MODE_SIZE (GET_MODE (op))
925 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
926 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
927 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
928 	{
929 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
930 	  rtx x;
931 
932 	  x = gen_rtx_ROTATE (inner_mode,
933 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
934 						  inner_mode),
935 			      XEXP (SUBREG_REG (op), 1));
936 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
937 	  if (temp)
938 	    return temp;
939 	}
940 
941       /* Apply De Morgan's laws to reduce number of patterns for machines
942 	 with negating logical insns (and-not, nand, etc.).  If result has
943 	 only one NOT, put it first, since that is how the patterns are
944 	 coded.  */
945       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
946 	{
947 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
948 	  machine_mode op_mode;
949 
950 	  op_mode = GET_MODE (in1);
951 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
952 
953 	  op_mode = GET_MODE (in2);
954 	  if (op_mode == VOIDmode)
955 	    op_mode = mode;
956 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
957 
958 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
959 	    {
960 	      rtx tem = in2;
961 	      in2 = in1; in1 = tem;
962 	    }
963 
964 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
965 				 mode, in1, in2);
966 	}
967 
968       /* (not (bswap x)) -> (bswap (not x)).  */
969       if (GET_CODE (op) == BSWAP)
970 	{
971 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
972 	  return simplify_gen_unary (BSWAP, mode, x, mode);
973 	}
974       break;
975 
976     case NEG:
977       /* (neg (neg X)) == X.  */
978       if (GET_CODE (op) == NEG)
979 	return XEXP (op, 0);
980 
981       /* (neg (plus X 1)) can become (not X).  */
982       if (GET_CODE (op) == PLUS
983 	  && XEXP (op, 1) == const1_rtx)
984 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
985 
986       /* Similarly, (neg (not X)) is (plus X 1).  */
987       if (GET_CODE (op) == NOT)
988 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
989 				    CONST1_RTX (mode));
990 
991       /* (neg (minus X Y)) can become (minus Y X).  This transformation
992 	 isn't safe for modes with signed zeros, since if X and Y are
993 	 both +0, (minus Y X) is the same as (minus X Y).  If the
994 	 rounding mode is towards +infinity (or -infinity) then the two
995 	 expressions will be rounded differently.  */
996       if (GET_CODE (op) == MINUS
997 	  && !HONOR_SIGNED_ZEROS (mode)
998 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
999 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1000 
1001       if (GET_CODE (op) == PLUS
1002 	  && !HONOR_SIGNED_ZEROS (mode)
1003 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 	{
1005 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1006 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1007 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1008 	    {
1009 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1010 	      if (temp)
1011 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1012 	    }
1013 
1014 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1015 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1016 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1017 	}
1018 
1019       /* (neg (mult A B)) becomes (mult A (neg B)).
1020 	 This works even for floating-point values.  */
1021       if (GET_CODE (op) == MULT
1022 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1023 	{
1024 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1025 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1026 	}
1027 
1028       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1029 	 this if we can then eliminate the NEG (e.g., if the operand
1030 	 is a constant).  */
1031       if (GET_CODE (op) == ASHIFT)
1032 	{
1033 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1034 	  if (temp)
1035 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1036 	}
1037 
1038       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 	 C is equal to the width of MODE minus 1.  */
1040       if (GET_CODE (op) == ASHIFTRT
1041 	  && CONST_INT_P (XEXP (op, 1))
1042 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1043 	return simplify_gen_binary (LSHIFTRT, mode,
1044 				    XEXP (op, 0), XEXP (op, 1));
1045 
1046       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 	 C is equal to the width of MODE minus 1.  */
1048       if (GET_CODE (op) == LSHIFTRT
1049 	  && CONST_INT_P (XEXP (op, 1))
1050 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1051 	return simplify_gen_binary (ASHIFTRT, mode,
1052 				    XEXP (op, 0), XEXP (op, 1));
1053 
1054       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1055       if (GET_CODE (op) == XOR
1056 	  && XEXP (op, 1) == const1_rtx
1057 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1058 	return plus_constant (mode, XEXP (op, 0), -1);
1059 
1060       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1061       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1062       if (GET_CODE (op) == LT
1063 	  && XEXP (op, 1) == const0_rtx
1064 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1065 	{
1066 	  machine_mode inner = GET_MODE (XEXP (op, 0));
1067 	  int isize = GET_MODE_PRECISION (inner);
1068 	  if (STORE_FLAG_VALUE == 1)
1069 	    {
1070 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1071 					  GEN_INT (isize - 1));
1072 	      if (mode == inner)
1073 		return temp;
1074 	      if (GET_MODE_PRECISION (mode) > isize)
1075 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1076 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1077 	    }
1078 	  else if (STORE_FLAG_VALUE == -1)
1079 	    {
1080 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1081 					  GEN_INT (isize - 1));
1082 	      if (mode == inner)
1083 		return temp;
1084 	      if (GET_MODE_PRECISION (mode) > isize)
1085 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1086 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1087 	    }
1088 	}
1089       break;
1090 
1091     case TRUNCATE:
1092       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 	 with the umulXi3_highpart patterns.  */
1094       if (GET_CODE (op) == LSHIFTRT
1095 	  && GET_CODE (XEXP (op, 0)) == MULT)
1096 	break;
1097 
1098       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1099 	{
1100 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1101 	    {
1102 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1103 	      if (temp)
1104 		return temp;
1105 	    }
1106 	  /* We can't handle truncation to a partial integer mode here
1107 	     because we don't know the real bitsize of the partial
1108 	     integer mode.  */
1109 	  break;
1110 	}
1111 
1112       if (GET_MODE (op) != VOIDmode)
1113 	{
1114 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1115 	  if (temp)
1116 	    return temp;
1117 	}
1118 
1119       /* If we know that the value is already truncated, we can
1120 	 replace the TRUNCATE with a SUBREG.  */
1121       if (GET_MODE_NUNITS (mode) == 1
1122 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1123 	      || truncated_to_mode (mode, op)))
1124 	{
1125 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1126 	  if (temp)
1127 	    return temp;
1128 	}
1129 
1130       /* A truncate of a comparison can be replaced with a subreg if
1131          STORE_FLAG_VALUE permits.  This is like the previous test,
1132          but it works even if the comparison is done in a mode larger
1133          than HOST_BITS_PER_WIDE_INT.  */
1134       if (HWI_COMPUTABLE_MODE_P (mode)
1135 	  && COMPARISON_P (op)
1136 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1137 	{
1138 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1139 	  if (temp)
1140 	    return temp;
1141 	}
1142 
1143       /* A truncate of a memory is just loading the low part of the memory
1144 	 if we are not changing the meaning of the address. */
1145       if (GET_CODE (op) == MEM
1146 	  && !VECTOR_MODE_P (mode)
1147 	  && !MEM_VOLATILE_P (op)
1148 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1149 	{
1150 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1151 	  if (temp)
1152 	    return temp;
1153 	}
1154 
1155       break;
1156 
1157     case FLOAT_TRUNCATE:
1158       if (DECIMAL_FLOAT_MODE_P (mode))
1159 	break;
1160 
1161       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1162       if (GET_CODE (op) == FLOAT_EXTEND
1163 	  && GET_MODE (XEXP (op, 0)) == mode)
1164 	return XEXP (op, 0);
1165 
1166       /* (float_truncate:SF (float_truncate:DF foo:XF))
1167          = (float_truncate:SF foo:XF).
1168 	 This may eliminate double rounding, so it is unsafe.
1169 
1170          (float_truncate:SF (float_extend:XF foo:DF))
1171          = (float_truncate:SF foo:DF).
1172 
1173          (float_truncate:DF (float_extend:XF foo:SF))
1174          = (float_extend:SF foo:DF).  */
1175       if ((GET_CODE (op) == FLOAT_TRUNCATE
1176 	   && flag_unsafe_math_optimizations)
1177 	  || GET_CODE (op) == FLOAT_EXTEND)
1178 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1179 							    0)))
1180 				   > GET_MODE_SIZE (mode)
1181 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1182 				   mode,
1183 				   XEXP (op, 0), mode);
1184 
1185       /*  (float_truncate (float x)) is (float x)  */
1186       if (GET_CODE (op) == FLOAT
1187 	  && (flag_unsafe_math_optimizations
1188 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1189 		  && ((unsigned)significand_size (GET_MODE (op))
1190 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1191 			  - num_sign_bit_copies (XEXP (op, 0),
1192 						 GET_MODE (XEXP (op, 0))))))))
1193 	return simplify_gen_unary (FLOAT, mode,
1194 				   XEXP (op, 0),
1195 				   GET_MODE (XEXP (op, 0)));
1196 
1197       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1199       if ((GET_CODE (op) == ABS
1200 	   || GET_CODE (op) == NEG)
1201 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1202 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1203 	return simplify_gen_unary (GET_CODE (op), mode,
1204 				   XEXP (XEXP (op, 0), 0), mode);
1205 
1206       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 	 is (float_truncate:SF x).  */
1208       if (GET_CODE (op) == SUBREG
1209 	  && subreg_lowpart_p (op)
1210 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1211 	return SUBREG_REG (op);
1212       break;
1213 
1214     case FLOAT_EXTEND:
1215       if (DECIMAL_FLOAT_MODE_P (mode))
1216 	break;
1217 
1218       /*  (float_extend (float_extend x)) is (float_extend x)
1219 
1220 	  (float_extend (float x)) is (float x) assuming that double
1221 	  rounding can't happen.
1222           */
1223       if (GET_CODE (op) == FLOAT_EXTEND
1224 	  || (GET_CODE (op) == FLOAT
1225 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1226 	      && ((unsigned)significand_size (GET_MODE (op))
1227 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1228 		      - num_sign_bit_copies (XEXP (op, 0),
1229 					     GET_MODE (XEXP (op, 0)))))))
1230 	return simplify_gen_unary (GET_CODE (op), mode,
1231 				   XEXP (op, 0),
1232 				   GET_MODE (XEXP (op, 0)));
1233 
1234       break;
1235 
1236     case ABS:
1237       /* (abs (neg <foo>)) -> (abs <foo>) */
1238       if (GET_CODE (op) == NEG)
1239 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1240 				   GET_MODE (XEXP (op, 0)));
1241 
1242       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1243          do nothing.  */
1244       if (GET_MODE (op) == VOIDmode)
1245 	break;
1246 
1247       /* If operand is something known to be positive, ignore the ABS.  */
1248       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1249 	  || val_signbit_known_clear_p (GET_MODE (op),
1250 					nonzero_bits (op, GET_MODE (op))))
1251 	return op;
1252 
1253       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1254       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1255 	return gen_rtx_NEG (mode, op);
1256 
1257       break;
1258 
1259     case FFS:
1260       /* (ffs (*_extend <X>)) = (ffs <X>) */
1261       if (GET_CODE (op) == SIGN_EXTEND
1262 	  || GET_CODE (op) == ZERO_EXTEND)
1263 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1264 				   GET_MODE (XEXP (op, 0)));
1265       break;
1266 
1267     case POPCOUNT:
1268       switch (GET_CODE (op))
1269 	{
1270 	case BSWAP:
1271 	case ZERO_EXTEND:
1272 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1274 				     GET_MODE (XEXP (op, 0)));
1275 
1276 	case ROTATE:
1277 	case ROTATERT:
1278 	  /* Rotations don't affect popcount.  */
1279 	  if (!side_effects_p (XEXP (op, 1)))
1280 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1281 				       GET_MODE (XEXP (op, 0)));
1282 	  break;
1283 
1284 	default:
1285 	  break;
1286 	}
1287       break;
1288 
1289     case PARITY:
1290       switch (GET_CODE (op))
1291 	{
1292 	case NOT:
1293 	case BSWAP:
1294 	case ZERO_EXTEND:
1295 	case SIGN_EXTEND:
1296 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1297 				     GET_MODE (XEXP (op, 0)));
1298 
1299 	case ROTATE:
1300 	case ROTATERT:
1301 	  /* Rotations don't affect parity.  */
1302 	  if (!side_effects_p (XEXP (op, 1)))
1303 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1304 				       GET_MODE (XEXP (op, 0)));
1305 	  break;
1306 
1307 	default:
1308 	  break;
1309 	}
1310       break;
1311 
1312     case BSWAP:
1313       /* (bswap (bswap x)) -> x.  */
1314       if (GET_CODE (op) == BSWAP)
1315 	return XEXP (op, 0);
1316       break;
1317 
1318     case FLOAT:
1319       /* (float (sign_extend <X>)) = (float <X>).  */
1320       if (GET_CODE (op) == SIGN_EXTEND)
1321 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1322 				   GET_MODE (XEXP (op, 0)));
1323       break;
1324 
1325     case SIGN_EXTEND:
1326       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 	 becomes just the MINUS if its mode is MODE.  This allows
1328 	 folding switch statements on machines using casesi (such as
1329 	 the VAX).  */
1330       if (GET_CODE (op) == TRUNCATE
1331 	  && GET_MODE (XEXP (op, 0)) == mode
1332 	  && GET_CODE (XEXP (op, 0)) == MINUS
1333 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1334 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1335 	return XEXP (op, 0);
1336 
1337       /* Extending a widening multiplication should be canonicalized to
1338 	 a wider widening multiplication.  */
1339       if (GET_CODE (op) == MULT)
1340 	{
1341 	  rtx lhs = XEXP (op, 0);
1342 	  rtx rhs = XEXP (op, 1);
1343 	  enum rtx_code lcode = GET_CODE (lhs);
1344 	  enum rtx_code rcode = GET_CODE (rhs);
1345 
1346 	  /* Widening multiplies usually extend both operands, but sometimes
1347 	     they use a shift to extract a portion of a register.  */
1348 	  if ((lcode == SIGN_EXTEND
1349 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1350 	      && (rcode == SIGN_EXTEND
1351 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1352 	    {
1353 	      machine_mode lmode = GET_MODE (lhs);
1354 	      machine_mode rmode = GET_MODE (rhs);
1355 	      int bits;
1356 
1357 	      if (lcode == ASHIFTRT)
1358 		/* Number of bits not shifted off the end.  */
1359 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1360 	      else /* lcode == SIGN_EXTEND */
1361 		/* Size of inner mode.  */
1362 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1363 
1364 	      if (rcode == ASHIFTRT)
1365 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1366 	      else /* rcode == SIGN_EXTEND */
1367 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1368 
1369 	      /* We can only widen multiplies if the result is mathematiclly
1370 		 equivalent.  I.e. if overflow was impossible.  */
1371 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1372 		return simplify_gen_binary
1373 			 (MULT, mode,
1374 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1375 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1376 	    }
1377 	}
1378 
1379       /* Check for a sign extension of a subreg of a promoted
1380 	 variable, where the promotion is sign-extended, and the
1381 	 target mode is the same as the variable's promotion.  */
1382       if (GET_CODE (op) == SUBREG
1383 	  && SUBREG_PROMOTED_VAR_P (op)
1384 	  && SUBREG_PROMOTED_SIGNED_P (op)
1385 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1386 	{
1387 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1388 	  if (temp)
1389 	    return temp;
1390 	}
1391 
1392       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1394       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1395 	{
1396 	  gcc_assert (GET_MODE_PRECISION (mode)
1397 		      > GET_MODE_PRECISION (GET_MODE (op)));
1398 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1399 				     GET_MODE (XEXP (op, 0)));
1400 	}
1401 
1402       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 	 GET_MODE_BITSIZE (N) - I bits.
1405 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1407       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1408 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1409 	  && CONST_INT_P (XEXP (op, 1))
1410 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1411 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1412 	{
1413 	  machine_mode tmode
1414 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1415 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1416 	  gcc_assert (GET_MODE_BITSIZE (mode)
1417 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1418 	  if (tmode != BLKmode)
1419 	    {
1420 	      rtx inner =
1421 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1422 	      if (inner)
1423 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1424 					   ? SIGN_EXTEND : ZERO_EXTEND,
1425 					   mode, inner, tmode);
1426 	    }
1427 	}
1428 
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430       /* As we do not know which address space the pointer is referring to,
1431 	 we can do this only if the target does not support different pointer
1432 	 or address modes depending on the address space.  */
1433       if (target_default_pointer_address_modes_p ()
1434 	  && ! POINTERS_EXTEND_UNSIGNED
1435 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1436 	  && (CONSTANT_P (op)
1437 	      || (GET_CODE (op) == SUBREG
1438 		  && REG_P (SUBREG_REG (op))
1439 		  && REG_POINTER (SUBREG_REG (op))
1440 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1441 	return convert_memory_address (Pmode, op);
1442 #endif
1443       break;
1444 
1445     case ZERO_EXTEND:
1446       /* Check for a zero extension of a subreg of a promoted
1447 	 variable, where the promotion is zero-extended, and the
1448 	 target mode is the same as the variable's promotion.  */
1449       if (GET_CODE (op) == SUBREG
1450 	  && SUBREG_PROMOTED_VAR_P (op)
1451 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1452 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1453 	{
1454 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1455 	  if (temp)
1456 	    return temp;
1457 	}
1458 
1459       /* Extending a widening multiplication should be canonicalized to
1460 	 a wider widening multiplication.  */
1461       if (GET_CODE (op) == MULT)
1462 	{
1463 	  rtx lhs = XEXP (op, 0);
1464 	  rtx rhs = XEXP (op, 1);
1465 	  enum rtx_code lcode = GET_CODE (lhs);
1466 	  enum rtx_code rcode = GET_CODE (rhs);
1467 
1468 	  /* Widening multiplies usually extend both operands, but sometimes
1469 	     they use a shift to extract a portion of a register.  */
1470 	  if ((lcode == ZERO_EXTEND
1471 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1472 	      && (rcode == ZERO_EXTEND
1473 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1474 	    {
1475 	      machine_mode lmode = GET_MODE (lhs);
1476 	      machine_mode rmode = GET_MODE (rhs);
1477 	      int bits;
1478 
1479 	      if (lcode == LSHIFTRT)
1480 		/* Number of bits not shifted off the end.  */
1481 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1482 	      else /* lcode == ZERO_EXTEND */
1483 		/* Size of inner mode.  */
1484 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1485 
1486 	      if (rcode == LSHIFTRT)
1487 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1488 	      else /* rcode == ZERO_EXTEND */
1489 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1490 
1491 	      /* We can only widen multiplies if the result is mathematiclly
1492 		 equivalent.  I.e. if overflow was impossible.  */
1493 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1494 		return simplify_gen_binary
1495 			 (MULT, mode,
1496 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1497 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1498 	    }
1499 	}
1500 
1501       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1502       if (GET_CODE (op) == ZERO_EXTEND)
1503 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1504 				   GET_MODE (XEXP (op, 0)));
1505 
1506       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 	 GET_MODE_PRECISION (N) - I bits.  */
1509       if (GET_CODE (op) == LSHIFTRT
1510 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1511 	  && CONST_INT_P (XEXP (op, 1))
1512 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1513 	  && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1514 	{
1515 	  machine_mode tmode
1516 	    = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1517 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1518 	  if (tmode != BLKmode)
1519 	    {
1520 	      rtx inner =
1521 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1522 	      if (inner)
1523 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1524 	    }
1525 	}
1526 
1527       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1529 	 of mode N.  E.g.
1530 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 	 (and:SI (reg:SI) (const_int 63)).  */
1532       if (GET_CODE (op) == SUBREG
1533 	  && GET_MODE_PRECISION (GET_MODE (op))
1534 	     < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1535 	  && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1536 	     <= HOST_BITS_PER_WIDE_INT
1537 	  && GET_MODE_PRECISION (mode)
1538 	     >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1539 	  && subreg_lowpart_p (op)
1540 	  && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1541 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1542 	{
1543 	  if (GET_MODE_PRECISION (mode)
1544 	      == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1545 	    return SUBREG_REG (op);
1546 	  return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1547 				     GET_MODE (SUBREG_REG (op)));
1548 	}
1549 
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551       /* As we do not know which address space the pointer is referring to,
1552 	 we can do this only if the target does not support different pointer
1553 	 or address modes depending on the address space.  */
1554       if (target_default_pointer_address_modes_p ()
1555 	  && POINTERS_EXTEND_UNSIGNED > 0
1556 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1557 	  && (CONSTANT_P (op)
1558 	      || (GET_CODE (op) == SUBREG
1559 		  && REG_P (SUBREG_REG (op))
1560 		  && REG_POINTER (SUBREG_REG (op))
1561 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1562 	return convert_memory_address (Pmode, op);
1563 #endif
1564       break;
1565 
1566     default:
1567       break;
1568     }
1569 
1570   return 0;
1571 }
1572 
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574    be MODE with input operand OP whose mode was originally OP_MODE.
1575    Return zero if the value cannot be computed.  */
1576 rtx
1577 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1578 				rtx op, machine_mode op_mode)
1579 {
1580   unsigned int width = GET_MODE_PRECISION (mode);
1581 
1582   if (code == VEC_DUPLICATE)
1583     {
1584       gcc_assert (VECTOR_MODE_P (mode));
1585       if (GET_MODE (op) != VOIDmode)
1586       {
1587 	if (!VECTOR_MODE_P (GET_MODE (op)))
1588 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1589 	else
1590 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1591 						(GET_MODE (op)));
1592       }
1593       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1594 	  || GET_CODE (op) == CONST_VECTOR)
1595 	{
1596           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1597           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1598 	  rtvec v = rtvec_alloc (n_elts);
1599 	  unsigned int i;
1600 
1601 	  if (GET_CODE (op) != CONST_VECTOR)
1602 	    for (i = 0; i < n_elts; i++)
1603 	      RTVEC_ELT (v, i) = op;
1604 	  else
1605 	    {
1606 	      machine_mode inmode = GET_MODE (op);
1607               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1608               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1609 
1610 	      gcc_assert (in_n_elts < n_elts);
1611 	      gcc_assert ((n_elts % in_n_elts) == 0);
1612 	      for (i = 0; i < n_elts; i++)
1613 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1614 	    }
1615 	  return gen_rtx_CONST_VECTOR (mode, v);
1616 	}
1617     }
1618 
1619   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1620     {
1621       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1622       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1623       machine_mode opmode = GET_MODE (op);
1624       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1625       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1626       rtvec v = rtvec_alloc (n_elts);
1627       unsigned int i;
1628 
1629       gcc_assert (op_n_elts == n_elts);
1630       for (i = 0; i < n_elts; i++)
1631 	{
1632 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1633 					    CONST_VECTOR_ELT (op, i),
1634 					    GET_MODE_INNER (opmode));
1635 	  if (!x)
1636 	    return 0;
1637 	  RTVEC_ELT (v, i) = x;
1638 	}
1639       return gen_rtx_CONST_VECTOR (mode, v);
1640     }
1641 
1642   /* The order of these tests is critical so that, for example, we don't
1643      check the wrong mode (input vs. output) for a conversion operation,
1644      such as FIX.  At some point, this should be simplified.  */
1645 
1646   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1647     {
1648       REAL_VALUE_TYPE d;
1649 
1650       if (op_mode == VOIDmode)
1651 	{
1652 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1653 	     the bits of the constant are significant, though, this is
1654 	     a dangerous assumption as many times CONST_INTs are
1655 	     created and used with garbage in the bits outside of the
1656 	     precision of the implied mode of the const_int.  */
1657 	  op_mode = MAX_MODE_INT;
1658 	}
1659 
1660       real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1661       d = real_value_truncate (mode, d);
1662       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1663     }
1664   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1665     {
1666       REAL_VALUE_TYPE d;
1667 
1668       if (op_mode == VOIDmode)
1669 	{
1670 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1671 	     the bits of the constant are significant, though, this is
1672 	     a dangerous assumption as many times CONST_INTs are
1673 	     created and used with garbage in the bits outside of the
1674 	     precision of the implied mode of the const_int.  */
1675 	  op_mode = MAX_MODE_INT;
1676 	}
1677 
1678       real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1679       d = real_value_truncate (mode, d);
1680       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1681     }
1682 
1683   if (CONST_SCALAR_INT_P (op) && width > 0)
1684     {
1685       wide_int result;
1686       machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1687       rtx_mode_t op0 = std::make_pair (op, imode);
1688       int int_value;
1689 
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691       /* This assert keeps the simplification from producing a result
1692 	 that cannot be represented in a CONST_DOUBLE but a lot of
1693 	 upstream callers expect that this function never fails to
1694 	 simplify something and so you if you added this to the test
1695 	 above the code would die later anyway.  If this assert
1696 	 happens, you just need to make the port support wide int.  */
1697       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1698 #endif
1699 
1700       switch (code)
1701 	{
1702 	case NOT:
1703 	  result = wi::bit_not (op0);
1704 	  break;
1705 
1706 	case NEG:
1707 	  result = wi::neg (op0);
1708 	  break;
1709 
1710 	case ABS:
1711 	  result = wi::abs (op0);
1712 	  break;
1713 
1714 	case FFS:
1715 	  result = wi::shwi (wi::ffs (op0), mode);
1716 	  break;
1717 
1718 	case CLZ:
1719 	  if (wi::ne_p (op0, 0))
1720 	    int_value = wi::clz (op0);
1721 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1722 	    int_value = GET_MODE_PRECISION (mode);
1723 	  result = wi::shwi (int_value, mode);
1724 	  break;
1725 
1726 	case CLRSB:
1727 	  result = wi::shwi (wi::clrsb (op0), mode);
1728 	  break;
1729 
1730 	case CTZ:
1731 	  if (wi::ne_p (op0, 0))
1732 	    int_value = wi::ctz (op0);
1733 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1734 	    int_value = GET_MODE_PRECISION (mode);
1735 	  result = wi::shwi (int_value, mode);
1736 	  break;
1737 
1738 	case POPCOUNT:
1739 	  result = wi::shwi (wi::popcount (op0), mode);
1740 	  break;
1741 
1742 	case PARITY:
1743 	  result = wi::shwi (wi::parity (op0), mode);
1744 	  break;
1745 
1746 	case BSWAP:
1747 	  result = wide_int (op0).bswap ();
1748 	  break;
1749 
1750 	case TRUNCATE:
1751 	case ZERO_EXTEND:
1752 	  result = wide_int::from (op0, width, UNSIGNED);
1753 	  break;
1754 
1755 	case SIGN_EXTEND:
1756 	  result = wide_int::from (op0, width, SIGNED);
1757 	  break;
1758 
1759 	case SQRT:
1760 	default:
1761 	  return 0;
1762 	}
1763 
1764       return immed_wide_int_const (result, mode);
1765     }
1766 
1767   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1768 	   && SCALAR_FLOAT_MODE_P (mode)
1769 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1770     {
1771       REAL_VALUE_TYPE d;
1772       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1773 
1774       switch (code)
1775 	{
1776 	case SQRT:
1777 	  return 0;
1778 	case ABS:
1779 	  d = real_value_abs (&d);
1780 	  break;
1781 	case NEG:
1782 	  d = real_value_negate (&d);
1783 	  break;
1784 	case FLOAT_TRUNCATE:
1785 	  d = real_value_truncate (mode, d);
1786 	  break;
1787 	case FLOAT_EXTEND:
1788 	  /* All this does is change the mode, unless changing
1789 	     mode class.  */
1790 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1791 	    real_convert (&d, mode, &d);
1792 	  break;
1793 	case FIX:
1794 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1795 	  break;
1796 	case NOT:
1797 	  {
1798 	    long tmp[4];
1799 	    int i;
1800 
1801 	    real_to_target (tmp, &d, GET_MODE (op));
1802 	    for (i = 0; i < 4; i++)
1803 	      tmp[i] = ~tmp[i];
1804 	    real_from_target (&d, tmp, mode);
1805 	    break;
1806 	  }
1807 	default:
1808 	  gcc_unreachable ();
1809 	}
1810       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1811     }
1812   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1813 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1814 	   && GET_MODE_CLASS (mode) == MODE_INT
1815 	   && width > 0)
1816     {
1817       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 	 operators are intentionally left unspecified (to ease implementation
1819 	 by target backends), for consistency, this routine implements the
1820 	 same semantics for constant folding as used by the middle-end.  */
1821 
1822       /* This was formerly used only for non-IEEE float.
1823 	 eggert@twinsun.com says it is safe for IEEE also.  */
1824       REAL_VALUE_TYPE x, t;
1825       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1826       wide_int wmax, wmin;
1827       /* This is part of the abi to real_to_integer, but we check
1828 	 things before making this call.  */
1829       bool fail;
1830 
1831       switch (code)
1832 	{
1833 	case FIX:
1834 	  if (REAL_VALUE_ISNAN (x))
1835 	    return const0_rtx;
1836 
1837 	  /* Test against the signed upper bound.  */
1838 	  wmax = wi::max_value (width, SIGNED);
1839 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
1840 	  if (REAL_VALUES_LESS (t, x))
1841 	    return immed_wide_int_const (wmax, mode);
1842 
1843 	  /* Test against the signed lower bound.  */
1844 	  wmin = wi::min_value (width, SIGNED);
1845 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
1846 	  if (REAL_VALUES_LESS (x, t))
1847 	    return immed_wide_int_const (wmin, mode);
1848 
1849 	  return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1850 	  break;
1851 
1852 	case UNSIGNED_FIX:
1853 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1854 	    return const0_rtx;
1855 
1856 	  /* Test against the unsigned upper bound.  */
1857 	  wmax = wi::max_value (width, UNSIGNED);
1858 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1859 	  if (REAL_VALUES_LESS (t, x))
1860 	    return immed_wide_int_const (wmax, mode);
1861 
1862 	  return immed_wide_int_const (real_to_integer (&x, &fail, width),
1863 				       mode);
1864 	  break;
1865 
1866 	default:
1867 	  gcc_unreachable ();
1868 	}
1869     }
1870 
1871   return NULL_RTX;
1872 }
1873 
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875    CODE that can commute with byte swapping, with result mode MODE and
1876    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
1877    Return zero if no simplification or canonicalization is possible.  */
1878 
1879 static rtx
1880 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1881 				  rtx op0, rtx op1)
1882 {
1883   rtx tem;
1884 
1885   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
1886   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1887     {
1888       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1889 				 simplify_gen_unary (BSWAP, mode, op1, mode));
1890       return simplify_gen_unary (BSWAP, mode, tem, mode);
1891     }
1892 
1893   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
1894   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1895     {
1896       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1897       return simplify_gen_unary (BSWAP, mode, tem, mode);
1898     }
1899 
1900   return NULL_RTX;
1901 }
1902 
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904    associative binary operation CODE with result mode MODE, operating
1905    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1907    canonicalization is possible.  */
1908 
1909 static rtx
1910 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1911 				rtx op0, rtx op1)
1912 {
1913   rtx tem;
1914 
1915   /* Linearize the operator to the left.  */
1916   if (GET_CODE (op1) == code)
1917     {
1918       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1919       if (GET_CODE (op0) == code)
1920 	{
1921 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1922 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1923 	}
1924 
1925       /* "a op (b op c)" becomes "(b op c) op a".  */
1926       if (! swap_commutative_operands_p (op1, op0))
1927 	return simplify_gen_binary (code, mode, op1, op0);
1928 
1929       tem = op0;
1930       op0 = op1;
1931       op1 = tem;
1932     }
1933 
1934   if (GET_CODE (op0) == code)
1935     {
1936       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1937       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1938 	{
1939 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1940 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1941 	}
1942 
1943       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1944       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1945       if (tem != 0)
1946         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1947 
1948       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1949       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1950       if (tem != 0)
1951         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1952     }
1953 
1954   return 0;
1955 }
1956 
1957 
1958 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1959    and OP1.  Return 0 if no simplification is possible.
1960 
1961    Don't use this for relational operations such as EQ or LT.
1962    Use simplify_relational_operation instead.  */
1963 rtx
1964 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1965 			   rtx op0, rtx op1)
1966 {
1967   rtx trueop0, trueop1;
1968   rtx tem;
1969 
1970   /* Relational operations don't work here.  We must know the mode
1971      of the operands in order to do the comparison correctly.
1972      Assuming a full word can give incorrect results.
1973      Consider comparing 128 with -128 in QImode.  */
1974   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1975   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1976 
1977   /* Make sure the constant is second.  */
1978   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1979       && swap_commutative_operands_p (op0, op1))
1980     {
1981       tem = op0, op0 = op1, op1 = tem;
1982     }
1983 
1984   trueop0 = avoid_constant_pool_reference (op0);
1985   trueop1 = avoid_constant_pool_reference (op1);
1986 
1987   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1988   if (tem)
1989     return tem;
1990   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1991 }
1992 
1993 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
1994    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1995    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1996    actual constants.  */
1997 
1998 static rtx
1999 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2000 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2001 {
2002   rtx tem, reversed, opleft, opright;
2003   HOST_WIDE_INT val;
2004   unsigned int width = GET_MODE_PRECISION (mode);
2005 
2006   /* Even if we can't compute a constant result,
2007      there are some cases worth simplifying.  */
2008 
2009   switch (code)
2010     {
2011     case PLUS:
2012       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2013 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2014 	 when x is -0 and the rounding mode is not towards -infinity,
2015 	 since (-0) + 0 is then 0.  */
2016       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2017 	return op0;
2018 
2019       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2020 	 transformations are safe even for IEEE.  */
2021       if (GET_CODE (op0) == NEG)
2022 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2023       else if (GET_CODE (op1) == NEG)
2024 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2025 
2026       /* (~a) + 1 -> -a */
2027       if (INTEGRAL_MODE_P (mode)
2028 	  && GET_CODE (op0) == NOT
2029 	  && trueop1 == const1_rtx)
2030 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2031 
2032       /* Handle both-operands-constant cases.  We can only add
2033 	 CONST_INTs to constants since the sum of relocatable symbols
2034 	 can't be handled by most assemblers.  Don't add CONST_INT
2035 	 to CONST_INT since overflow won't be computed properly if wider
2036 	 than HOST_BITS_PER_WIDE_INT.  */
2037 
2038       if ((GET_CODE (op0) == CONST
2039 	   || GET_CODE (op0) == SYMBOL_REF
2040 	   || GET_CODE (op0) == LABEL_REF)
2041 	  && CONST_INT_P (op1))
2042 	return plus_constant (mode, op0, INTVAL (op1));
2043       else if ((GET_CODE (op1) == CONST
2044 		|| GET_CODE (op1) == SYMBOL_REF
2045 		|| GET_CODE (op1) == LABEL_REF)
2046 	       && CONST_INT_P (op0))
2047 	return plus_constant (mode, op1, INTVAL (op0));
2048 
2049       /* See if this is something like X * C - X or vice versa or
2050 	 if the multiplication is written as a shift.  If so, we can
2051 	 distribute and make a new multiply, shift, or maybe just
2052 	 have X (if C is 2 in the example above).  But don't make
2053 	 something more expensive than we had before.  */
2054 
2055       if (SCALAR_INT_MODE_P (mode))
2056 	{
2057 	  rtx lhs = op0, rhs = op1;
2058 
2059 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2060 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2061 
2062 	  if (GET_CODE (lhs) == NEG)
2063 	    {
2064 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2065 	      lhs = XEXP (lhs, 0);
2066 	    }
2067 	  else if (GET_CODE (lhs) == MULT
2068 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2069 	    {
2070 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2071 	      lhs = XEXP (lhs, 0);
2072 	    }
2073 	  else if (GET_CODE (lhs) == ASHIFT
2074 		   && CONST_INT_P (XEXP (lhs, 1))
2075                    && INTVAL (XEXP (lhs, 1)) >= 0
2076 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2077 	    {
2078 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2079 					    GET_MODE_PRECISION (mode));
2080 	      lhs = XEXP (lhs, 0);
2081 	    }
2082 
2083 	  if (GET_CODE (rhs) == NEG)
2084 	    {
2085 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2086 	      rhs = XEXP (rhs, 0);
2087 	    }
2088 	  else if (GET_CODE (rhs) == MULT
2089 		   && CONST_INT_P (XEXP (rhs, 1)))
2090 	    {
2091 	      coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2092 	      rhs = XEXP (rhs, 0);
2093 	    }
2094 	  else if (GET_CODE (rhs) == ASHIFT
2095 		   && CONST_INT_P (XEXP (rhs, 1))
2096 		   && INTVAL (XEXP (rhs, 1)) >= 0
2097 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2098 	    {
2099 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2100 					    GET_MODE_PRECISION (mode));
2101 	      rhs = XEXP (rhs, 0);
2102 	    }
2103 
2104 	  if (rtx_equal_p (lhs, rhs))
2105 	    {
2106 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2107 	      rtx coeff;
2108 	      bool speed = optimize_function_for_speed_p (cfun);
2109 
2110 	      coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2111 
2112 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2113 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2114 		? tem : 0;
2115 	    }
2116 	}
2117 
2118       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2119       if (CONST_SCALAR_INT_P (op1)
2120 	  && GET_CODE (op0) == XOR
2121 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2122 	  && mode_signbit_p (mode, op1))
2123 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2124 				    simplify_gen_binary (XOR, mode, op1,
2125 							 XEXP (op0, 1)));
2126 
2127       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2128       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2129 	  && GET_CODE (op0) == MULT
2130 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2131 	{
2132 	  rtx in1, in2;
2133 
2134 	  in1 = XEXP (XEXP (op0, 0), 0);
2135 	  in2 = XEXP (op0, 1);
2136 	  return simplify_gen_binary (MINUS, mode, op1,
2137 				      simplify_gen_binary (MULT, mode,
2138 							   in1, in2));
2139 	}
2140 
2141       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2142 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2143 	 is 1.  */
2144       if (COMPARISON_P (op0)
2145 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2146 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2147 	  && (reversed = reversed_comparison (op0, mode)))
2148 	return
2149 	  simplify_gen_unary (NEG, mode, reversed, mode);
2150 
2151       /* If one of the operands is a PLUS or a MINUS, see if we can
2152 	 simplify this by the associative law.
2153 	 Don't use the associative law for floating point.
2154 	 The inaccuracy makes it nonassociative,
2155 	 and subtle programs can break if operations are associated.  */
2156 
2157       if (INTEGRAL_MODE_P (mode)
2158 	  && (plus_minus_operand_p (op0)
2159 	      || plus_minus_operand_p (op1))
2160 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2161 	return tem;
2162 
2163       /* Reassociate floating point addition only when the user
2164 	 specifies associative math operations.  */
2165       if (FLOAT_MODE_P (mode)
2166 	  && flag_associative_math)
2167 	{
2168 	  tem = simplify_associative_operation (code, mode, op0, op1);
2169 	  if (tem)
2170 	    return tem;
2171 	}
2172       break;
2173 
2174     case COMPARE:
2175       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2176       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2177 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2178 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2179 	{
2180 	  rtx xop00 = XEXP (op0, 0);
2181 	  rtx xop10 = XEXP (op1, 0);
2182 
2183 #ifdef HAVE_cc0
2184 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2185 #else
2186 	    if (REG_P (xop00) && REG_P (xop10)
2187 		&& GET_MODE (xop00) == GET_MODE (xop10)
2188 		&& REGNO (xop00) == REGNO (xop10)
2189 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2190 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2191 #endif
2192 	      return xop00;
2193 	}
2194       break;
2195 
2196     case MINUS:
2197       /* We can't assume x-x is 0 even with non-IEEE floating point,
2198 	 but since it is zero except in very strange circumstances, we
2199 	 will treat it as zero with -ffinite-math-only.  */
2200       if (rtx_equal_p (trueop0, trueop1)
2201 	  && ! side_effects_p (op0)
2202 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2203 	return CONST0_RTX (mode);
2204 
2205       /* Change subtraction from zero into negation.  (0 - x) is the
2206 	 same as -x when x is NaN, infinite, or finite and nonzero.
2207 	 But if the mode has signed zeros, and does not round towards
2208 	 -infinity, then 0 - 0 is 0, not -0.  */
2209       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2210 	return simplify_gen_unary (NEG, mode, op1, mode);
2211 
2212       /* (-1 - a) is ~a.  */
2213       if (trueop0 == constm1_rtx)
2214 	return simplify_gen_unary (NOT, mode, op1, mode);
2215 
2216       /* Subtracting 0 has no effect unless the mode has signed zeros
2217 	 and supports rounding towards -infinity.  In such a case,
2218 	 0 - 0 is -0.  */
2219       if (!(HONOR_SIGNED_ZEROS (mode)
2220 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2221 	  && trueop1 == CONST0_RTX (mode))
2222 	return op0;
2223 
2224       /* See if this is something like X * C - X or vice versa or
2225 	 if the multiplication is written as a shift.  If so, we can
2226 	 distribute and make a new multiply, shift, or maybe just
2227 	 have X (if C is 2 in the example above).  But don't make
2228 	 something more expensive than we had before.  */
2229 
2230       if (SCALAR_INT_MODE_P (mode))
2231 	{
2232 	  rtx lhs = op0, rhs = op1;
2233 
2234 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2235 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2236 
2237 	  if (GET_CODE (lhs) == NEG)
2238 	    {
2239 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2240 	      lhs = XEXP (lhs, 0);
2241 	    }
2242 	  else if (GET_CODE (lhs) == MULT
2243 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2244 	    {
2245 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2246 	      lhs = XEXP (lhs, 0);
2247 	    }
2248 	  else if (GET_CODE (lhs) == ASHIFT
2249 		   && CONST_INT_P (XEXP (lhs, 1))
2250 		   && INTVAL (XEXP (lhs, 1)) >= 0
2251 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2252 	    {
2253 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2254 					    GET_MODE_PRECISION (mode));
2255 	      lhs = XEXP (lhs, 0);
2256 	    }
2257 
2258 	  if (GET_CODE (rhs) == NEG)
2259 	    {
2260 	      negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2261 	      rhs = XEXP (rhs, 0);
2262 	    }
2263 	  else if (GET_CODE (rhs) == MULT
2264 		   && CONST_INT_P (XEXP (rhs, 1)))
2265 	    {
2266 	      negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2267 	      rhs = XEXP (rhs, 0);
2268 	    }
2269 	  else if (GET_CODE (rhs) == ASHIFT
2270 		   && CONST_INT_P (XEXP (rhs, 1))
2271 		   && INTVAL (XEXP (rhs, 1)) >= 0
2272 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2273 	    {
2274 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2275 					       GET_MODE_PRECISION (mode));
2276 	      negcoeff1 = -negcoeff1;
2277 	      rhs = XEXP (rhs, 0);
2278 	    }
2279 
2280 	  if (rtx_equal_p (lhs, rhs))
2281 	    {
2282 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2283 	      rtx coeff;
2284 	      bool speed = optimize_function_for_speed_p (cfun);
2285 
2286 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2287 
2288 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2289 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2290 		? tem : 0;
2291 	    }
2292 	}
2293 
2294       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2295       if (GET_CODE (op1) == NEG)
2296 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2297 
2298       /* (-x - c) may be simplified as (-c - x).  */
2299       if (GET_CODE (op0) == NEG
2300 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2301 	{
2302 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2303 	  if (tem)
2304 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2305 	}
2306 
2307       /* Don't let a relocatable value get a negative coeff.  */
2308       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2309 	return simplify_gen_binary (PLUS, mode,
2310 				    op0,
2311 				    neg_const_int (mode, op1));
2312 
2313       /* (x - (x & y)) -> (x & ~y) */
2314       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2315 	{
2316 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2317 	    {
2318 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2319 					GET_MODE (XEXP (op1, 1)));
2320 	      return simplify_gen_binary (AND, mode, op0, tem);
2321 	    }
2322 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2323 	    {
2324 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2325 					GET_MODE (XEXP (op1, 0)));
2326 	      return simplify_gen_binary (AND, mode, op0, tem);
2327 	    }
2328 	}
2329 
2330       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2331 	 by reversing the comparison code if valid.  */
2332       if (STORE_FLAG_VALUE == 1
2333 	  && trueop0 == const1_rtx
2334 	  && COMPARISON_P (op1)
2335 	  && (reversed = reversed_comparison (op1, mode)))
2336 	return reversed;
2337 
2338       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2339       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2340 	  && GET_CODE (op1) == MULT
2341 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2342 	{
2343 	  rtx in1, in2;
2344 
2345 	  in1 = XEXP (XEXP (op1, 0), 0);
2346 	  in2 = XEXP (op1, 1);
2347 	  return simplify_gen_binary (PLUS, mode,
2348 				      simplify_gen_binary (MULT, mode,
2349 							   in1, in2),
2350 				      op0);
2351 	}
2352 
2353       /* Canonicalize (minus (neg A) (mult B C)) to
2354 	 (minus (mult (neg B) C) A).  */
2355       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2356 	  && GET_CODE (op1) == MULT
2357 	  && GET_CODE (op0) == NEG)
2358 	{
2359 	  rtx in1, in2;
2360 
2361 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2362 	  in2 = XEXP (op1, 1);
2363 	  return simplify_gen_binary (MINUS, mode,
2364 				      simplify_gen_binary (MULT, mode,
2365 							   in1, in2),
2366 				      XEXP (op0, 0));
2367 	}
2368 
2369       /* If one of the operands is a PLUS or a MINUS, see if we can
2370 	 simplify this by the associative law.  This will, for example,
2371          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2372 	 Don't use the associative law for floating point.
2373 	 The inaccuracy makes it nonassociative,
2374 	 and subtle programs can break if operations are associated.  */
2375 
2376       if (INTEGRAL_MODE_P (mode)
2377 	  && (plus_minus_operand_p (op0)
2378 	      || plus_minus_operand_p (op1))
2379 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2380 	return tem;
2381       break;
2382 
2383     case MULT:
2384       if (trueop1 == constm1_rtx)
2385 	return simplify_gen_unary (NEG, mode, op0, mode);
2386 
2387       if (GET_CODE (op0) == NEG)
2388 	{
2389 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2390 	  /* If op1 is a MULT as well and simplify_unary_operation
2391 	     just moved the NEG to the second operand, simplify_gen_binary
2392 	     below could through simplify_associative_operation move
2393 	     the NEG around again and recurse endlessly.  */
2394 	  if (temp
2395 	      && GET_CODE (op1) == MULT
2396 	      && GET_CODE (temp) == MULT
2397 	      && XEXP (op1, 0) == XEXP (temp, 0)
2398 	      && GET_CODE (XEXP (temp, 1)) == NEG
2399 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2400 	    temp = NULL_RTX;
2401 	  if (temp)
2402 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2403 	}
2404       if (GET_CODE (op1) == NEG)
2405 	{
2406 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2407 	  /* If op0 is a MULT as well and simplify_unary_operation
2408 	     just moved the NEG to the second operand, simplify_gen_binary
2409 	     below could through simplify_associative_operation move
2410 	     the NEG around again and recurse endlessly.  */
2411 	  if (temp
2412 	      && GET_CODE (op0) == MULT
2413 	      && GET_CODE (temp) == MULT
2414 	      && XEXP (op0, 0) == XEXP (temp, 0)
2415 	      && GET_CODE (XEXP (temp, 1)) == NEG
2416 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2417 	    temp = NULL_RTX;
2418 	  if (temp)
2419 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2420 	}
2421 
2422       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2423 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2424 	 when the mode has signed zeros, since multiplying a negative
2425 	 number by 0 will give -0, not 0.  */
2426       if (!HONOR_NANS (mode)
2427 	  && !HONOR_SIGNED_ZEROS (mode)
2428 	  && trueop1 == CONST0_RTX (mode)
2429 	  && ! side_effects_p (op0))
2430 	return op1;
2431 
2432       /* In IEEE floating point, x*1 is not equivalent to x for
2433 	 signalling NaNs.  */
2434       if (!HONOR_SNANS (mode)
2435 	  && trueop1 == CONST1_RTX (mode))
2436 	return op0;
2437 
2438       /* Convert multiply by constant power of two into shift.  */
2439       if (CONST_SCALAR_INT_P (trueop1))
2440 	{
2441 	  val = wi::exact_log2 (std::make_pair (trueop1, mode));
2442 	  if (val >= 0)
2443 	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2444 	}
2445 
2446       /* x*2 is x+x and x*(-1) is -x */
2447       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2448 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2449 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2450 	  && GET_MODE (op0) == mode)
2451 	{
2452 	  REAL_VALUE_TYPE d;
2453 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2454 
2455 	  if (REAL_VALUES_EQUAL (d, dconst2))
2456 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2457 
2458 	  if (!HONOR_SNANS (mode)
2459 	      && REAL_VALUES_EQUAL (d, dconstm1))
2460 	    return simplify_gen_unary (NEG, mode, op0, mode);
2461 	}
2462 
2463       /* Optimize -x * -x as x * x.  */
2464       if (FLOAT_MODE_P (mode)
2465 	  && GET_CODE (op0) == NEG
2466 	  && GET_CODE (op1) == NEG
2467 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2468 	  && !side_effects_p (XEXP (op0, 0)))
2469 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2470 
2471       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2472       if (SCALAR_FLOAT_MODE_P (mode)
2473 	  && GET_CODE (op0) == ABS
2474 	  && GET_CODE (op1) == ABS
2475 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2476 	  && !side_effects_p (XEXP (op0, 0)))
2477 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2478 
2479       /* Reassociate multiplication, but for floating point MULTs
2480 	 only when the user specifies unsafe math optimizations.  */
2481       if (! FLOAT_MODE_P (mode)
2482 	  || flag_unsafe_math_optimizations)
2483 	{
2484 	  tem = simplify_associative_operation (code, mode, op0, op1);
2485 	  if (tem)
2486 	    return tem;
2487 	}
2488       break;
2489 
2490     case IOR:
2491       if (trueop1 == CONST0_RTX (mode))
2492 	return op0;
2493       if (INTEGRAL_MODE_P (mode)
2494 	  && trueop1 == CONSTM1_RTX (mode)
2495 	  && !side_effects_p (op0))
2496 	return op1;
2497       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2498 	return op0;
2499       /* A | (~A) -> -1 */
2500       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2501 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2502 	  && ! side_effects_p (op0)
2503 	  && SCALAR_INT_MODE_P (mode))
2504 	return constm1_rtx;
2505 
2506       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2507       if (CONST_INT_P (op1)
2508 	  && HWI_COMPUTABLE_MODE_P (mode)
2509 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2510 	  && !side_effects_p (op0))
2511 	return op1;
2512 
2513       /* Canonicalize (X & C1) | C2.  */
2514       if (GET_CODE (op0) == AND
2515 	  && CONST_INT_P (trueop1)
2516 	  && CONST_INT_P (XEXP (op0, 1)))
2517 	{
2518 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2519 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2520 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2521 
2522 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2523 	  if ((c1 & c2) == c1
2524 	      && !side_effects_p (XEXP (op0, 0)))
2525 	    return trueop1;
2526 
2527 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2528 	  if (((c1|c2) & mask) == mask)
2529 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2530 
2531 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2532 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2533 	    {
2534 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2535 					 gen_int_mode (c1 & ~c2, mode));
2536 	      return simplify_gen_binary (IOR, mode, tem, op1);
2537 	    }
2538 	}
2539 
2540       /* Convert (A & B) | A to A.  */
2541       if (GET_CODE (op0) == AND
2542 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2543 	      || rtx_equal_p (XEXP (op0, 1), op1))
2544 	  && ! side_effects_p (XEXP (op0, 0))
2545 	  && ! side_effects_p (XEXP (op0, 1)))
2546 	return op1;
2547 
2548       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2549          mode size to (rotate A CX).  */
2550 
2551       if (GET_CODE (op1) == ASHIFT
2552           || GET_CODE (op1) == SUBREG)
2553         {
2554 	  opleft = op1;
2555 	  opright = op0;
2556 	}
2557       else
2558         {
2559 	  opright = op1;
2560 	  opleft = op0;
2561 	}
2562 
2563       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2564           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2565           && CONST_INT_P (XEXP (opleft, 1))
2566           && CONST_INT_P (XEXP (opright, 1))
2567           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2568               == GET_MODE_PRECISION (mode)))
2569         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2570 
2571       /* Same, but for ashift that has been "simplified" to a wider mode
2572         by simplify_shift_const.  */
2573 
2574       if (GET_CODE (opleft) == SUBREG
2575           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2576           && GET_CODE (opright) == LSHIFTRT
2577           && GET_CODE (XEXP (opright, 0)) == SUBREG
2578           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2579           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2580           && (GET_MODE_SIZE (GET_MODE (opleft))
2581               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2582           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2583                           SUBREG_REG (XEXP (opright, 0)))
2584           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2585           && CONST_INT_P (XEXP (opright, 1))
2586           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2587               == GET_MODE_PRECISION (mode)))
2588         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2589                                XEXP (SUBREG_REG (opleft), 1));
2590 
2591       /* If we have (ior (and (X C1) C2)), simplify this by making
2592 	 C1 as small as possible if C1 actually changes.  */
2593       if (CONST_INT_P (op1)
2594 	  && (HWI_COMPUTABLE_MODE_P (mode)
2595 	      || INTVAL (op1) > 0)
2596 	  && GET_CODE (op0) == AND
2597 	  && CONST_INT_P (XEXP (op0, 1))
2598 	  && CONST_INT_P (op1)
2599 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2600 	{
2601 	  rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2602 					 gen_int_mode (UINTVAL (XEXP (op0, 1))
2603 						       & ~UINTVAL (op1),
2604 						       mode));
2605 	  return simplify_gen_binary (IOR, mode, tmp, op1);
2606 	}
2607 
2608       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2609          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2610 	 the PLUS does not affect any of the bits in OP1: then we can do
2611 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2612          can be safely shifted left C bits.  */
2613       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2614           && GET_CODE (XEXP (op0, 0)) == PLUS
2615           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2616           && CONST_INT_P (XEXP (op0, 1))
2617           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2618         {
2619           int count = INTVAL (XEXP (op0, 1));
2620           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2621 
2622           if (mask >> count == INTVAL (trueop1)
2623 	      && trunc_int_for_mode (mask, mode) == mask
2624               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2625 	    return simplify_gen_binary (ASHIFTRT, mode,
2626 					plus_constant (mode, XEXP (op0, 0),
2627 						       mask),
2628 					XEXP (op0, 1));
2629         }
2630 
2631       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2632       if (tem)
2633 	return tem;
2634 
2635       tem = simplify_associative_operation (code, mode, op0, op1);
2636       if (tem)
2637 	return tem;
2638       break;
2639 
2640     case XOR:
2641       if (trueop1 == CONST0_RTX (mode))
2642 	return op0;
2643       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2644 	return simplify_gen_unary (NOT, mode, op0, mode);
2645       if (rtx_equal_p (trueop0, trueop1)
2646 	  && ! side_effects_p (op0)
2647 	  && GET_MODE_CLASS (mode) != MODE_CC)
2648 	 return CONST0_RTX (mode);
2649 
2650       /* Canonicalize XOR of the most significant bit to PLUS.  */
2651       if (CONST_SCALAR_INT_P (op1)
2652 	  && mode_signbit_p (mode, op1))
2653 	return simplify_gen_binary (PLUS, mode, op0, op1);
2654       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2655       if (CONST_SCALAR_INT_P (op1)
2656 	  && GET_CODE (op0) == PLUS
2657 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2658 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2659 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2660 				    simplify_gen_binary (XOR, mode, op1,
2661 							 XEXP (op0, 1)));
2662 
2663       /* If we are XORing two things that have no bits in common,
2664 	 convert them into an IOR.  This helps to detect rotation encoded
2665 	 using those methods and possibly other simplifications.  */
2666 
2667       if (HWI_COMPUTABLE_MODE_P (mode)
2668 	  && (nonzero_bits (op0, mode)
2669 	      & nonzero_bits (op1, mode)) == 0)
2670 	return (simplify_gen_binary (IOR, mode, op0, op1));
2671 
2672       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2673 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2674 	 (NOT y).  */
2675       {
2676 	int num_negated = 0;
2677 
2678 	if (GET_CODE (op0) == NOT)
2679 	  num_negated++, op0 = XEXP (op0, 0);
2680 	if (GET_CODE (op1) == NOT)
2681 	  num_negated++, op1 = XEXP (op1, 0);
2682 
2683 	if (num_negated == 2)
2684 	  return simplify_gen_binary (XOR, mode, op0, op1);
2685 	else if (num_negated == 1)
2686 	  return simplify_gen_unary (NOT, mode,
2687 				     simplify_gen_binary (XOR, mode, op0, op1),
2688 				     mode);
2689       }
2690 
2691       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2692 	 correspond to a machine insn or result in further simplifications
2693 	 if B is a constant.  */
2694 
2695       if (GET_CODE (op0) == AND
2696 	  && rtx_equal_p (XEXP (op0, 1), op1)
2697 	  && ! side_effects_p (op1))
2698 	return simplify_gen_binary (AND, mode,
2699 				    simplify_gen_unary (NOT, mode,
2700 							XEXP (op0, 0), mode),
2701 				    op1);
2702 
2703       else if (GET_CODE (op0) == AND
2704 	       && rtx_equal_p (XEXP (op0, 0), op1)
2705 	       && ! side_effects_p (op1))
2706 	return simplify_gen_binary (AND, mode,
2707 				    simplify_gen_unary (NOT, mode,
2708 							XEXP (op0, 1), mode),
2709 				    op1);
2710 
2711       /* Given (xor (ior (xor A B) C) D), where B, C and D are
2712 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2713 	 out bits inverted twice and not set by C.  Similarly, given
2714 	 (xor (and (xor A B) C) D), simplify without inverting C in
2715 	 the xor operand: (xor (and A C) (B&C)^D).
2716       */
2717       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2718 	       && GET_CODE (XEXP (op0, 0)) == XOR
2719 	       && CONST_INT_P (op1)
2720 	       && CONST_INT_P (XEXP (op0, 1))
2721 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2722 	{
2723 	  enum rtx_code op = GET_CODE (op0);
2724 	  rtx a = XEXP (XEXP (op0, 0), 0);
2725 	  rtx b = XEXP (XEXP (op0, 0), 1);
2726 	  rtx c = XEXP (op0, 1);
2727 	  rtx d = op1;
2728 	  HOST_WIDE_INT bval = INTVAL (b);
2729 	  HOST_WIDE_INT cval = INTVAL (c);
2730 	  HOST_WIDE_INT dval = INTVAL (d);
2731 	  HOST_WIDE_INT xcval;
2732 
2733 	  if (op == IOR)
2734 	    xcval = ~cval;
2735 	  else
2736 	    xcval = cval;
2737 
2738 	  return simplify_gen_binary (XOR, mode,
2739 				      simplify_gen_binary (op, mode, a, c),
2740 				      gen_int_mode ((bval & xcval) ^ dval,
2741 						    mode));
2742 	}
2743 
2744       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2745 	 we can transform like this:
2746             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2747                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2748                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2749 	 Attempt a few simplifications when B and C are both constants.  */
2750       if (GET_CODE (op0) == AND
2751 	  && CONST_INT_P (op1)
2752 	  && CONST_INT_P (XEXP (op0, 1)))
2753 	{
2754 	  rtx a = XEXP (op0, 0);
2755 	  rtx b = XEXP (op0, 1);
2756 	  rtx c = op1;
2757 	  HOST_WIDE_INT bval = INTVAL (b);
2758 	  HOST_WIDE_INT cval = INTVAL (c);
2759 
2760 	  /* Instead of computing ~A&C, we compute its negated value,
2761 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
2762 	     optimize for sure.  If it does not simplify, we still try
2763 	     to compute ~A&C below, but since that always allocates
2764 	     RTL, we don't try that before committing to returning a
2765 	     simplified expression.  */
2766 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2767 						  GEN_INT (~cval));
2768 
2769 	  if ((~cval & bval) == 0)
2770 	    {
2771 	      rtx na_c = NULL_RTX;
2772 	      if (n_na_c)
2773 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2774 	      else
2775 		{
2776 		  /* If ~A does not simplify, don't bother: we don't
2777 		     want to simplify 2 operations into 3, and if na_c
2778 		     were to simplify with na, n_na_c would have
2779 		     simplified as well.  */
2780 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
2781 		  if (na)
2782 		    na_c = simplify_gen_binary (AND, mode, na, c);
2783 		}
2784 
2785 	      /* Try to simplify ~A&C | ~B&C.  */
2786 	      if (na_c != NULL_RTX)
2787 		return simplify_gen_binary (IOR, mode, na_c,
2788 					    gen_int_mode (~bval & cval, mode));
2789 	    }
2790 	  else
2791 	    {
2792 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2793 	      if (n_na_c == CONSTM1_RTX (mode))
2794 		{
2795 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2796 						    gen_int_mode (~cval & bval,
2797 								  mode));
2798 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2799 					      gen_int_mode (~bval & cval,
2800 							    mode));
2801 		}
2802 	    }
2803 	}
2804 
2805       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2806 	 comparison if STORE_FLAG_VALUE is 1.  */
2807       if (STORE_FLAG_VALUE == 1
2808 	  && trueop1 == const1_rtx
2809 	  && COMPARISON_P (op0)
2810 	  && (reversed = reversed_comparison (op0, mode)))
2811 	return reversed;
2812 
2813       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2814 	 is (lt foo (const_int 0)), so we can perform the above
2815 	 simplification if STORE_FLAG_VALUE is 1.  */
2816 
2817       if (STORE_FLAG_VALUE == 1
2818 	  && trueop1 == const1_rtx
2819 	  && GET_CODE (op0) == LSHIFTRT
2820 	  && CONST_INT_P (XEXP (op0, 1))
2821 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2822 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2823 
2824       /* (xor (comparison foo bar) (const_int sign-bit))
2825 	 when STORE_FLAG_VALUE is the sign bit.  */
2826       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2827 	  && trueop1 == const_true_rtx
2828 	  && COMPARISON_P (op0)
2829 	  && (reversed = reversed_comparison (op0, mode)))
2830 	return reversed;
2831 
2832       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2833       if (tem)
2834 	return tem;
2835 
2836       tem = simplify_associative_operation (code, mode, op0, op1);
2837       if (tem)
2838 	return tem;
2839       break;
2840 
2841     case AND:
2842       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2843 	return trueop1;
2844       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2845 	return op0;
2846       if (HWI_COMPUTABLE_MODE_P (mode))
2847 	{
2848 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2849 	  HOST_WIDE_INT nzop1;
2850 	  if (CONST_INT_P (trueop1))
2851 	    {
2852 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2853 	      /* If we are turning off bits already known off in OP0, we need
2854 		 not do an AND.  */
2855 	      if ((nzop0 & ~val1) == 0)
2856 		return op0;
2857 	    }
2858 	  nzop1 = nonzero_bits (trueop1, mode);
2859 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2860 	  if ((nzop1 & nzop0) == 0
2861 	      && !side_effects_p (op0) && !side_effects_p (op1))
2862 	    return CONST0_RTX (mode);
2863 	}
2864       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2865 	  && GET_MODE_CLASS (mode) != MODE_CC)
2866 	return op0;
2867       /* A & (~A) -> 0 */
2868       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2869 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2870 	  && ! side_effects_p (op0)
2871 	  && GET_MODE_CLASS (mode) != MODE_CC)
2872 	return CONST0_RTX (mode);
2873 
2874       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2875 	 there are no nonzero bits of C outside of X's mode.  */
2876       if ((GET_CODE (op0) == SIGN_EXTEND
2877 	   || GET_CODE (op0) == ZERO_EXTEND)
2878 	  && CONST_INT_P (trueop1)
2879 	  && HWI_COMPUTABLE_MODE_P (mode)
2880 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2881 	      & UINTVAL (trueop1)) == 0)
2882 	{
2883 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
2884 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2885 				     gen_int_mode (INTVAL (trueop1),
2886 						   imode));
2887 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2888 	}
2889 
2890       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2891 	 we might be able to further simplify the AND with X and potentially
2892 	 remove the truncation altogether.  */
2893       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2894 	{
2895 	  rtx x = XEXP (op0, 0);
2896 	  machine_mode xmode = GET_MODE (x);
2897 	  tem = simplify_gen_binary (AND, xmode, x,
2898 				     gen_int_mode (INTVAL (trueop1), xmode));
2899 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2900 	}
2901 
2902       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2903       if (GET_CODE (op0) == IOR
2904 	  && CONST_INT_P (trueop1)
2905 	  && CONST_INT_P (XEXP (op0, 1)))
2906 	{
2907 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2908 	  return simplify_gen_binary (IOR, mode,
2909 				      simplify_gen_binary (AND, mode,
2910 							   XEXP (op0, 0), op1),
2911 				      gen_int_mode (tmp, mode));
2912 	}
2913 
2914       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2915 	 insn (and may simplify more).  */
2916       if (GET_CODE (op0) == XOR
2917 	  && rtx_equal_p (XEXP (op0, 0), op1)
2918 	  && ! side_effects_p (op1))
2919 	return simplify_gen_binary (AND, mode,
2920 				    simplify_gen_unary (NOT, mode,
2921 							XEXP (op0, 1), mode),
2922 				    op1);
2923 
2924       if (GET_CODE (op0) == XOR
2925 	  && rtx_equal_p (XEXP (op0, 1), op1)
2926 	  && ! side_effects_p (op1))
2927 	return simplify_gen_binary (AND, mode,
2928 				    simplify_gen_unary (NOT, mode,
2929 							XEXP (op0, 0), mode),
2930 				    op1);
2931 
2932       /* Similarly for (~(A ^ B)) & A.  */
2933       if (GET_CODE (op0) == NOT
2934 	  && GET_CODE (XEXP (op0, 0)) == XOR
2935 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2936 	  && ! side_effects_p (op1))
2937 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2938 
2939       if (GET_CODE (op0) == NOT
2940 	  && GET_CODE (XEXP (op0, 0)) == XOR
2941 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2942 	  && ! side_effects_p (op1))
2943 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2944 
2945       /* Convert (A | B) & A to A.  */
2946       if (GET_CODE (op0) == IOR
2947 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2948 	      || rtx_equal_p (XEXP (op0, 1), op1))
2949 	  && ! side_effects_p (XEXP (op0, 0))
2950 	  && ! side_effects_p (XEXP (op0, 1)))
2951 	return op1;
2952 
2953       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2954 	 ((A & N) + B) & M -> (A + B) & M
2955 	 Similarly if (N & M) == 0,
2956 	 ((A | N) + B) & M -> (A + B) & M
2957 	 and for - instead of + and/or ^ instead of |.
2958          Also, if (N & M) == 0, then
2959 	 (A +- N) & M -> A & M.  */
2960       if (CONST_INT_P (trueop1)
2961 	  && HWI_COMPUTABLE_MODE_P (mode)
2962 	  && ~UINTVAL (trueop1)
2963 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2964 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2965 	{
2966 	  rtx pmop[2];
2967 	  int which;
2968 
2969 	  pmop[0] = XEXP (op0, 0);
2970 	  pmop[1] = XEXP (op0, 1);
2971 
2972 	  if (CONST_INT_P (pmop[1])
2973 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2974 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
2975 
2976 	  for (which = 0; which < 2; which++)
2977 	    {
2978 	      tem = pmop[which];
2979 	      switch (GET_CODE (tem))
2980 		{
2981 		case AND:
2982 		  if (CONST_INT_P (XEXP (tem, 1))
2983 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2984 		      == UINTVAL (trueop1))
2985 		    pmop[which] = XEXP (tem, 0);
2986 		  break;
2987 		case IOR:
2988 		case XOR:
2989 		  if (CONST_INT_P (XEXP (tem, 1))
2990 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2991 		    pmop[which] = XEXP (tem, 0);
2992 		  break;
2993 		default:
2994 		  break;
2995 		}
2996 	    }
2997 
2998 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2999 	    {
3000 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3001 					 pmop[0], pmop[1]);
3002 	      return simplify_gen_binary (code, mode, tem, op1);
3003 	    }
3004 	}
3005 
3006       /* (and X (ior (not X) Y) -> (and X Y) */
3007       if (GET_CODE (op1) == IOR
3008 	  && GET_CODE (XEXP (op1, 0)) == NOT
3009 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3010        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3011 
3012       /* (and (ior (not X) Y) X) -> (and X Y) */
3013       if (GET_CODE (op0) == IOR
3014 	  && GET_CODE (XEXP (op0, 0)) == NOT
3015 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3016 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3017 
3018       /* (and X (ior Y (not X)) -> (and X Y) */
3019       if (GET_CODE (op1) == IOR
3020 	  && GET_CODE (XEXP (op1, 1)) == NOT
3021 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3022        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3023 
3024       /* (and (ior Y (not X)) X) -> (and X Y) */
3025       if (GET_CODE (op0) == IOR
3026 	  && GET_CODE (XEXP (op0, 1)) == NOT
3027 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3028 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3029 
3030       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3031       if (tem)
3032 	return tem;
3033 
3034       tem = simplify_associative_operation (code, mode, op0, op1);
3035       if (tem)
3036 	return tem;
3037       break;
3038 
3039     case UDIV:
3040       /* 0/x is 0 (or x&0 if x has side-effects).  */
3041       if (trueop0 == CONST0_RTX (mode))
3042 	{
3043 	  if (side_effects_p (op1))
3044 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3045 	  return trueop0;
3046 	}
3047       /* x/1 is x.  */
3048       if (trueop1 == CONST1_RTX (mode))
3049 	{
3050 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3051 	  if (tem)
3052 	    return tem;
3053 	}
3054       /* Convert divide by power of two into shift.  */
3055       if (CONST_INT_P (trueop1)
3056 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3057 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3058       break;
3059 
3060     case DIV:
3061       /* Handle floating point and integers separately.  */
3062       if (SCALAR_FLOAT_MODE_P (mode))
3063 	{
3064 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3065 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3066 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3067 	     zeros, since dividing 0 by a negative number gives -0.0  */
3068 	  if (trueop0 == CONST0_RTX (mode)
3069 	      && !HONOR_NANS (mode)
3070 	      && !HONOR_SIGNED_ZEROS (mode)
3071 	      && ! side_effects_p (op1))
3072 	    return op0;
3073 	  /* x/1.0 is x.  */
3074 	  if (trueop1 == CONST1_RTX (mode)
3075 	      && !HONOR_SNANS (mode))
3076 	    return op0;
3077 
3078 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3079 	      && trueop1 != CONST0_RTX (mode))
3080 	    {
3081 	      REAL_VALUE_TYPE d;
3082 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3083 
3084 	      /* x/-1.0 is -x.  */
3085 	      if (REAL_VALUES_EQUAL (d, dconstm1)
3086 		  && !HONOR_SNANS (mode))
3087 		return simplify_gen_unary (NEG, mode, op0, mode);
3088 
3089 	      /* Change FP division by a constant into multiplication.
3090 		 Only do this with -freciprocal-math.  */
3091 	      if (flag_reciprocal_math
3092 		  && !REAL_VALUES_EQUAL (d, dconst0))
3093 		{
3094 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3095 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3096 		  return simplify_gen_binary (MULT, mode, op0, tem);
3097 		}
3098 	    }
3099 	}
3100       else if (SCALAR_INT_MODE_P (mode))
3101 	{
3102 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3103 	  if (trueop0 == CONST0_RTX (mode)
3104 	      && !cfun->can_throw_non_call_exceptions)
3105 	    {
3106 	      if (side_effects_p (op1))
3107 		return simplify_gen_binary (AND, mode, op1, trueop0);
3108 	      return trueop0;
3109 	    }
3110 	  /* x/1 is x.  */
3111 	  if (trueop1 == CONST1_RTX (mode))
3112 	    {
3113 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3114 	      if (tem)
3115 		return tem;
3116 	    }
3117 	  /* x/-1 is -x.  */
3118 	  if (trueop1 == constm1_rtx)
3119 	    {
3120 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3121 	      if (x)
3122 		return simplify_gen_unary (NEG, mode, x, mode);
3123 	    }
3124 	}
3125       break;
3126 
3127     case UMOD:
3128       /* 0%x is 0 (or x&0 if x has side-effects).  */
3129       if (trueop0 == CONST0_RTX (mode))
3130 	{
3131 	  if (side_effects_p (op1))
3132 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3133 	  return trueop0;
3134 	}
3135       /* x%1 is 0 (of x&0 if x has side-effects).  */
3136       if (trueop1 == CONST1_RTX (mode))
3137 	{
3138 	  if (side_effects_p (op0))
3139 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3140 	  return CONST0_RTX (mode);
3141 	}
3142       /* Implement modulus by power of two as AND.  */
3143       if (CONST_INT_P (trueop1)
3144 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3145 	return simplify_gen_binary (AND, mode, op0,
3146 				    gen_int_mode (INTVAL (op1) - 1, mode));
3147       break;
3148 
3149     case MOD:
3150       /* 0%x is 0 (or x&0 if x has side-effects).  */
3151       if (trueop0 == CONST0_RTX (mode))
3152 	{
3153 	  if (side_effects_p (op1))
3154 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3155 	  return trueop0;
3156 	}
3157       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3158       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3159 	{
3160 	  if (side_effects_p (op0))
3161 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3162 	  return CONST0_RTX (mode);
3163 	}
3164       break;
3165 
3166     case ROTATERT:
3167     case ROTATE:
3168       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3169 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3170 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3171 	 amount instead.  */
3172 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3173       if (CONST_INT_P (trueop1)
3174 	  && IN_RANGE (INTVAL (trueop1),
3175 		       GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3176 		       GET_MODE_PRECISION (mode) - 1))
3177 	return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3178 				    mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3179 							- INTVAL (trueop1)));
3180 #endif
3181       /* FALLTHRU */
3182     case ASHIFTRT:
3183       if (trueop1 == CONST0_RTX (mode))
3184 	return op0;
3185       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3186 	return op0;
3187       /* Rotating ~0 always results in ~0.  */
3188       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3189 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3190 	  && ! side_effects_p (op1))
3191 	return op0;
3192       /* Given:
3193 	 scalar modes M1, M2
3194 	 scalar constants c1, c2
3195 	 size (M2) > size (M1)
3196 	 c1 == size (M2) - size (M1)
3197 	 optimize:
3198 	 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3199 				 <low_part>)
3200 		      (const_int <c2>))
3201 	 to:
3202 	 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3203 		    <low_part>).  */
3204       if (code == ASHIFTRT
3205 	  && !VECTOR_MODE_P (mode)
3206 	  && SUBREG_P (op0)
3207 	  && CONST_INT_P (op1)
3208 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3209 	  && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3210 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3211 	  && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3212 	      > GET_MODE_BITSIZE (mode))
3213 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3214 	      == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3215 		  - GET_MODE_BITSIZE (mode)))
3216 	  && subreg_lowpart_p (op0))
3217 	{
3218 	  rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3219 			     + INTVAL (op1));
3220 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3221 	  tmp = simplify_gen_binary (ASHIFTRT,
3222 				     GET_MODE (SUBREG_REG (op0)),
3223 				     XEXP (SUBREG_REG (op0), 0),
3224 				     tmp);
3225 	  return simplify_gen_subreg (mode, tmp, inner_mode,
3226 				      subreg_lowpart_offset (mode,
3227 							     inner_mode));
3228 	}
3229     canonicalize_shift:
3230       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3231 	{
3232 	  val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3233 	  if (val != INTVAL (op1))
3234 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3235 	}
3236       break;
3237 
3238     case ASHIFT:
3239     case SS_ASHIFT:
3240     case US_ASHIFT:
3241       if (trueop1 == CONST0_RTX (mode))
3242 	return op0;
3243       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3244 	return op0;
3245       goto canonicalize_shift;
3246 
3247     case LSHIFTRT:
3248       if (trueop1 == CONST0_RTX (mode))
3249 	return op0;
3250       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3251 	return op0;
3252       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3253       if (GET_CODE (op0) == CLZ
3254 	  && CONST_INT_P (trueop1)
3255 	  && STORE_FLAG_VALUE == 1
3256 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3257 	{
3258 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3259 	  unsigned HOST_WIDE_INT zero_val = 0;
3260 
3261 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3262 	      && zero_val == GET_MODE_PRECISION (imode)
3263 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3264 	    return simplify_gen_relational (EQ, mode, imode,
3265 					    XEXP (op0, 0), const0_rtx);
3266 	}
3267       goto canonicalize_shift;
3268 
3269     case SMIN:
3270       if (width <= HOST_BITS_PER_WIDE_INT
3271 	  && mode_signbit_p (mode, trueop1)
3272 	  && ! side_effects_p (op0))
3273 	return op1;
3274       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3275 	return op0;
3276       tem = simplify_associative_operation (code, mode, op0, op1);
3277       if (tem)
3278 	return tem;
3279       break;
3280 
3281     case SMAX:
3282       if (width <= HOST_BITS_PER_WIDE_INT
3283 	  && CONST_INT_P (trueop1)
3284 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3285 	  && ! side_effects_p (op0))
3286 	return op1;
3287       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3288 	return op0;
3289       tem = simplify_associative_operation (code, mode, op0, op1);
3290       if (tem)
3291 	return tem;
3292       break;
3293 
3294     case UMIN:
3295       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3296 	return op1;
3297       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3298 	return op0;
3299       tem = simplify_associative_operation (code, mode, op0, op1);
3300       if (tem)
3301 	return tem;
3302       break;
3303 
3304     case UMAX:
3305       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3306 	return op1;
3307       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3308 	return op0;
3309       tem = simplify_associative_operation (code, mode, op0, op1);
3310       if (tem)
3311 	return tem;
3312       break;
3313 
3314     case SS_PLUS:
3315     case US_PLUS:
3316     case SS_MINUS:
3317     case US_MINUS:
3318     case SS_MULT:
3319     case US_MULT:
3320     case SS_DIV:
3321     case US_DIV:
3322       /* ??? There are simplifications that can be done.  */
3323       return 0;
3324 
3325     case VEC_SELECT:
3326       if (!VECTOR_MODE_P (mode))
3327 	{
3328 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3329 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3330 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3331 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3332 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3333 
3334 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3335 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3336 						      (trueop1, 0, 0)));
3337 
3338 	  /* Extract a scalar element from a nested VEC_SELECT expression
3339 	     (with optional nested VEC_CONCAT expression).  Some targets
3340 	     (i386) extract scalar element from a vector using chain of
3341 	     nested VEC_SELECT expressions.  When input operand is a memory
3342 	     operand, this operation can be simplified to a simple scalar
3343 	     load from an offseted memory address.  */
3344 	  if (GET_CODE (trueop0) == VEC_SELECT)
3345 	    {
3346 	      rtx op0 = XEXP (trueop0, 0);
3347 	      rtx op1 = XEXP (trueop0, 1);
3348 
3349 	      machine_mode opmode = GET_MODE (op0);
3350 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3351 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3352 
3353 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3354 	      int elem;
3355 
3356 	      rtvec vec;
3357 	      rtx tmp_op, tmp;
3358 
3359 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3360 	      gcc_assert (i < n_elts);
3361 
3362 	      /* Select element, pointed by nested selector.  */
3363 	      elem = INTVAL (XVECEXP (op1, 0, i));
3364 
3365 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3366 	      if (GET_CODE (op0) == VEC_CONCAT)
3367 		{
3368 		  rtx op00 = XEXP (op0, 0);
3369 		  rtx op01 = XEXP (op0, 1);
3370 
3371 		  machine_mode mode00, mode01;
3372 		  int n_elts00, n_elts01;
3373 
3374 		  mode00 = GET_MODE (op00);
3375 		  mode01 = GET_MODE (op01);
3376 
3377 		  /* Find out number of elements of each operand.  */
3378 		  if (VECTOR_MODE_P (mode00))
3379 		    {
3380 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3381 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3382 		    }
3383 		  else
3384 		    n_elts00 = 1;
3385 
3386 		  if (VECTOR_MODE_P (mode01))
3387 		    {
3388 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3389 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3390 		    }
3391 		  else
3392 		    n_elts01 = 1;
3393 
3394 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3395 
3396 		  /* Select correct operand of VEC_CONCAT
3397 		     and adjust selector. */
3398 		  if (elem < n_elts01)
3399 		    tmp_op = op00;
3400 		  else
3401 		    {
3402 		      tmp_op = op01;
3403 		      elem -= n_elts00;
3404 		    }
3405 		}
3406 	      else
3407 		tmp_op = op0;
3408 
3409 	      vec = rtvec_alloc (1);
3410 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3411 
3412 	      tmp = gen_rtx_fmt_ee (code, mode,
3413 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3414 	      return tmp;
3415 	    }
3416 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3417 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3418 	    return XEXP (trueop0, 0);
3419 	}
3420       else
3421 	{
3422 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3423 	  gcc_assert (GET_MODE_INNER (mode)
3424 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3425 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3426 
3427 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3428 	    {
3429 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3430 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3431 	      rtvec v = rtvec_alloc (n_elts);
3432 	      unsigned int i;
3433 
3434 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3435 	      for (i = 0; i < n_elts; i++)
3436 		{
3437 		  rtx x = XVECEXP (trueop1, 0, i);
3438 
3439 		  gcc_assert (CONST_INT_P (x));
3440 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3441 						       INTVAL (x));
3442 		}
3443 
3444 	      return gen_rtx_CONST_VECTOR (mode, v);
3445 	    }
3446 
3447 	  /* Recognize the identity.  */
3448 	  if (GET_MODE (trueop0) == mode)
3449 	    {
3450 	      bool maybe_ident = true;
3451 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3452 		{
3453 		  rtx j = XVECEXP (trueop1, 0, i);
3454 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3455 		    {
3456 		      maybe_ident = false;
3457 		      break;
3458 		    }
3459 		}
3460 	      if (maybe_ident)
3461 		return trueop0;
3462 	    }
3463 
3464 	  /* If we build {a,b} then permute it, build the result directly.  */
3465 	  if (XVECLEN (trueop1, 0) == 2
3466 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3467 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3468 	      && GET_CODE (trueop0) == VEC_CONCAT
3469 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3470 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3471 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3472 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3473 	    {
3474 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3475 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3476 	      rtx subop0, subop1;
3477 
3478 	      gcc_assert (i0 < 4 && i1 < 4);
3479 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3480 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3481 
3482 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3483 	    }
3484 
3485 	  if (XVECLEN (trueop1, 0) == 2
3486 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3487 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3488 	      && GET_CODE (trueop0) == VEC_CONCAT
3489 	      && GET_MODE (trueop0) == mode)
3490 	    {
3491 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3492 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3493 	      rtx subop0, subop1;
3494 
3495 	      gcc_assert (i0 < 2 && i1 < 2);
3496 	      subop0 = XEXP (trueop0, i0);
3497 	      subop1 = XEXP (trueop0, i1);
3498 
3499 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3500 	    }
3501 
3502 	  /* If we select one half of a vec_concat, return that.  */
3503 	  if (GET_CODE (trueop0) == VEC_CONCAT
3504 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3505 	    {
3506 	      rtx subop0 = XEXP (trueop0, 0);
3507 	      rtx subop1 = XEXP (trueop0, 1);
3508 	      machine_mode mode0 = GET_MODE (subop0);
3509 	      machine_mode mode1 = GET_MODE (subop1);
3510 	      int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3511 	      int l0 = GET_MODE_SIZE (mode0) / li;
3512 	      int l1 = GET_MODE_SIZE (mode1) / li;
3513 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3514 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3515 		{
3516 		  bool success = true;
3517 		  for (int i = 1; i < l0; ++i)
3518 		    {
3519 		      rtx j = XVECEXP (trueop1, 0, i);
3520 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
3521 			{
3522 			  success = false;
3523 			  break;
3524 			}
3525 		    }
3526 		  if (success)
3527 		    return subop0;
3528 		}
3529 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3530 		{
3531 		  bool success = true;
3532 		  for (int i = 1; i < l1; ++i)
3533 		    {
3534 		      rtx j = XVECEXP (trueop1, 0, i);
3535 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3536 			{
3537 			  success = false;
3538 			  break;
3539 			}
3540 		    }
3541 		  if (success)
3542 		    return subop1;
3543 		}
3544 	    }
3545 	}
3546 
3547       if (XVECLEN (trueop1, 0) == 1
3548 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3549 	  && GET_CODE (trueop0) == VEC_CONCAT)
3550 	{
3551 	  rtx vec = trueop0;
3552 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3553 
3554 	  /* Try to find the element in the VEC_CONCAT.  */
3555 	  while (GET_MODE (vec) != mode
3556 		 && GET_CODE (vec) == VEC_CONCAT)
3557 	    {
3558 	      HOST_WIDE_INT vec_size;
3559 
3560 	      if (CONST_INT_P (XEXP (vec, 0)))
3561 	        {
3562 	          /* vec_concat of two const_ints doesn't make sense with
3563 	             respect to modes.  */
3564 	          if (CONST_INT_P (XEXP (vec, 1)))
3565 	            return 0;
3566 
3567 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3568 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3569 	        }
3570 	      else
3571 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3572 
3573 	      if (offset < vec_size)
3574 		vec = XEXP (vec, 0);
3575 	      else
3576 		{
3577 		  offset -= vec_size;
3578 		  vec = XEXP (vec, 1);
3579 		}
3580 	      vec = avoid_constant_pool_reference (vec);
3581 	    }
3582 
3583 	  if (GET_MODE (vec) == mode)
3584 	    return vec;
3585 	}
3586 
3587       /* If we select elements in a vec_merge that all come from the same
3588 	 operand, select from that operand directly.  */
3589       if (GET_CODE (op0) == VEC_MERGE)
3590 	{
3591 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3592 	  if (CONST_INT_P (trueop02))
3593 	    {
3594 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3595 	      bool all_operand0 = true;
3596 	      bool all_operand1 = true;
3597 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3598 		{
3599 		  rtx j = XVECEXP (trueop1, 0, i);
3600 		  if (sel & (1 << UINTVAL (j)))
3601 		    all_operand1 = false;
3602 		  else
3603 		    all_operand0 = false;
3604 		}
3605 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3606 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3607 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3608 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3609 	    }
3610 	}
3611 
3612       /* If we have two nested selects that are inverses of each
3613 	 other, replace them with the source operand.  */
3614       if (GET_CODE (trueop0) == VEC_SELECT
3615 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
3616 	{
3617 	  rtx op0_subop1 = XEXP (trueop0, 1);
3618 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3619 	  gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3620 
3621 	  /* Apply the outer ordering vector to the inner one.  (The inner
3622 	     ordering vector is expressly permitted to be of a different
3623 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
3624 	     then the two VEC_SELECTs cancel.  */
3625 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3626 	    {
3627 	      rtx x = XVECEXP (trueop1, 0, i);
3628 	      if (!CONST_INT_P (x))
3629 		return 0;
3630 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3631 	      if (!CONST_INT_P (y) || i != INTVAL (y))
3632 		return 0;
3633 	    }
3634 	  return XEXP (trueop0, 0);
3635 	}
3636 
3637       return 0;
3638     case VEC_CONCAT:
3639       {
3640 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3641 				      ? GET_MODE (trueop0)
3642 				      : GET_MODE_INNER (mode));
3643 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3644 				      ? GET_MODE (trueop1)
3645 				      : GET_MODE_INNER (mode));
3646 
3647 	gcc_assert (VECTOR_MODE_P (mode));
3648 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3649 		    == GET_MODE_SIZE (mode));
3650 
3651 	if (VECTOR_MODE_P (op0_mode))
3652 	  gcc_assert (GET_MODE_INNER (mode)
3653 		      == GET_MODE_INNER (op0_mode));
3654 	else
3655 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3656 
3657 	if (VECTOR_MODE_P (op1_mode))
3658 	  gcc_assert (GET_MODE_INNER (mode)
3659 		      == GET_MODE_INNER (op1_mode));
3660 	else
3661 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3662 
3663 	if ((GET_CODE (trueop0) == CONST_VECTOR
3664 	     || CONST_SCALAR_INT_P (trueop0)
3665 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3666 	    && (GET_CODE (trueop1) == CONST_VECTOR
3667 		|| CONST_SCALAR_INT_P (trueop1)
3668 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3669 	  {
3670 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3671 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3672 	    rtvec v = rtvec_alloc (n_elts);
3673 	    unsigned int i;
3674 	    unsigned in_n_elts = 1;
3675 
3676 	    if (VECTOR_MODE_P (op0_mode))
3677 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3678 	    for (i = 0; i < n_elts; i++)
3679 	      {
3680 		if (i < in_n_elts)
3681 		  {
3682 		    if (!VECTOR_MODE_P (op0_mode))
3683 		      RTVEC_ELT (v, i) = trueop0;
3684 		    else
3685 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3686 		  }
3687 		else
3688 		  {
3689 		    if (!VECTOR_MODE_P (op1_mode))
3690 		      RTVEC_ELT (v, i) = trueop1;
3691 		    else
3692 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3693 							   i - in_n_elts);
3694 		  }
3695 	      }
3696 
3697 	    return gen_rtx_CONST_VECTOR (mode, v);
3698 	  }
3699 
3700 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
3701 	   Restrict the transformation to avoid generating a VEC_SELECT with a
3702 	   mode unrelated to its operand.  */
3703 	if (GET_CODE (trueop0) == VEC_SELECT
3704 	    && GET_CODE (trueop1) == VEC_SELECT
3705 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3706 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
3707 	  {
3708 	    rtx par0 = XEXP (trueop0, 1);
3709 	    rtx par1 = XEXP (trueop1, 1);
3710 	    int len0 = XVECLEN (par0, 0);
3711 	    int len1 = XVECLEN (par1, 0);
3712 	    rtvec vec = rtvec_alloc (len0 + len1);
3713 	    for (int i = 0; i < len0; i++)
3714 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3715 	    for (int i = 0; i < len1; i++)
3716 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3717 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3718 					gen_rtx_PARALLEL (VOIDmode, vec));
3719 	  }
3720       }
3721       return 0;
3722 
3723     default:
3724       gcc_unreachable ();
3725     }
3726 
3727   return 0;
3728 }
3729 
3730 rtx
3731 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3732 				 rtx op0, rtx op1)
3733 {
3734   unsigned int width = GET_MODE_PRECISION (mode);
3735 
3736   if (VECTOR_MODE_P (mode)
3737       && code != VEC_CONCAT
3738       && GET_CODE (op0) == CONST_VECTOR
3739       && GET_CODE (op1) == CONST_VECTOR)
3740     {
3741       unsigned n_elts = GET_MODE_NUNITS (mode);
3742       machine_mode op0mode = GET_MODE (op0);
3743       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3744       machine_mode op1mode = GET_MODE (op1);
3745       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3746       rtvec v = rtvec_alloc (n_elts);
3747       unsigned int i;
3748 
3749       gcc_assert (op0_n_elts == n_elts);
3750       gcc_assert (op1_n_elts == n_elts);
3751       for (i = 0; i < n_elts; i++)
3752 	{
3753 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3754 					     CONST_VECTOR_ELT (op0, i),
3755 					     CONST_VECTOR_ELT (op1, i));
3756 	  if (!x)
3757 	    return 0;
3758 	  RTVEC_ELT (v, i) = x;
3759 	}
3760 
3761       return gen_rtx_CONST_VECTOR (mode, v);
3762     }
3763 
3764   if (VECTOR_MODE_P (mode)
3765       && code == VEC_CONCAT
3766       && (CONST_SCALAR_INT_P (op0)
3767 	  || GET_CODE (op0) == CONST_FIXED
3768 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3769       && (CONST_SCALAR_INT_P (op1)
3770 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3771 	  || GET_CODE (op1) == CONST_FIXED))
3772     {
3773       unsigned n_elts = GET_MODE_NUNITS (mode);
3774       rtvec v = rtvec_alloc (n_elts);
3775 
3776       gcc_assert (n_elts >= 2);
3777       if (n_elts == 2)
3778 	{
3779 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3780 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3781 
3782 	  RTVEC_ELT (v, 0) = op0;
3783 	  RTVEC_ELT (v, 1) = op1;
3784 	}
3785       else
3786 	{
3787 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3788 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3789 	  unsigned i;
3790 
3791 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3792 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3793 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3794 
3795 	  for (i = 0; i < op0_n_elts; ++i)
3796 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3797 	  for (i = 0; i < op1_n_elts; ++i)
3798 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3799 	}
3800 
3801       return gen_rtx_CONST_VECTOR (mode, v);
3802     }
3803 
3804   if (SCALAR_FLOAT_MODE_P (mode)
3805       && CONST_DOUBLE_AS_FLOAT_P (op0)
3806       && CONST_DOUBLE_AS_FLOAT_P (op1)
3807       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3808     {
3809       if (code == AND
3810 	  || code == IOR
3811 	  || code == XOR)
3812 	{
3813 	  long tmp0[4];
3814 	  long tmp1[4];
3815 	  REAL_VALUE_TYPE r;
3816 	  int i;
3817 
3818 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3819 			  GET_MODE (op0));
3820 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3821 			  GET_MODE (op1));
3822 	  for (i = 0; i < 4; i++)
3823 	    {
3824 	      switch (code)
3825 	      {
3826 	      case AND:
3827 		tmp0[i] &= tmp1[i];
3828 		break;
3829 	      case IOR:
3830 		tmp0[i] |= tmp1[i];
3831 		break;
3832 	      case XOR:
3833 		tmp0[i] ^= tmp1[i];
3834 		break;
3835 	      default:
3836 		gcc_unreachable ();
3837 	      }
3838 	    }
3839 	   real_from_target (&r, tmp0, mode);
3840 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3841 	}
3842       else
3843 	{
3844 	  REAL_VALUE_TYPE f0, f1, value, result;
3845 	  bool inexact;
3846 
3847 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3848 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3849 	  real_convert (&f0, mode, &f0);
3850 	  real_convert (&f1, mode, &f1);
3851 
3852 	  if (HONOR_SNANS (mode)
3853 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3854 	    return 0;
3855 
3856 	  if (code == DIV
3857 	      && REAL_VALUES_EQUAL (f1, dconst0)
3858 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3859 	    return 0;
3860 
3861 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3862 	      && flag_trapping_math
3863 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3864 	    {
3865 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3866 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3867 
3868 	      switch (code)
3869 		{
3870 		case PLUS:
3871 		  /* Inf + -Inf = NaN plus exception.  */
3872 		  if (s0 != s1)
3873 		    return 0;
3874 		  break;
3875 		case MINUS:
3876 		  /* Inf - Inf = NaN plus exception.  */
3877 		  if (s0 == s1)
3878 		    return 0;
3879 		  break;
3880 		case DIV:
3881 		  /* Inf / Inf = NaN plus exception.  */
3882 		  return 0;
3883 		default:
3884 		  break;
3885 		}
3886 	    }
3887 
3888 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3889 	      && flag_trapping_math
3890 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3891 		  || (REAL_VALUE_ISINF (f1)
3892 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3893 	    /* Inf * 0 = NaN plus exception.  */
3894 	    return 0;
3895 
3896 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3897 				     &f0, &f1);
3898 	  real_convert (&result, mode, &value);
3899 
3900 	  /* Don't constant fold this floating point operation if
3901 	     the result has overflowed and flag_trapping_math.  */
3902 
3903 	  if (flag_trapping_math
3904 	      && MODE_HAS_INFINITIES (mode)
3905 	      && REAL_VALUE_ISINF (result)
3906 	      && !REAL_VALUE_ISINF (f0)
3907 	      && !REAL_VALUE_ISINF (f1))
3908 	    /* Overflow plus exception.  */
3909 	    return 0;
3910 
3911 	  /* Don't constant fold this floating point operation if the
3912 	     result may dependent upon the run-time rounding mode and
3913 	     flag_rounding_math is set, or if GCC's software emulation
3914 	     is unable to accurately represent the result.  */
3915 
3916 	  if ((flag_rounding_math
3917 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3918 	      && (inexact || !real_identical (&result, &value)))
3919 	    return NULL_RTX;
3920 
3921 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3922 	}
3923     }
3924 
3925   /* We can fold some multi-word operations.  */
3926   if ((GET_MODE_CLASS (mode) == MODE_INT
3927        || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3928       && CONST_SCALAR_INT_P (op0)
3929       && CONST_SCALAR_INT_P (op1))
3930     {
3931       wide_int result;
3932       bool overflow;
3933       rtx_mode_t pop0 = std::make_pair (op0, mode);
3934       rtx_mode_t pop1 = std::make_pair (op1, mode);
3935 
3936 #if TARGET_SUPPORTS_WIDE_INT == 0
3937       /* This assert keeps the simplification from producing a result
3938 	 that cannot be represented in a CONST_DOUBLE but a lot of
3939 	 upstream callers expect that this function never fails to
3940 	 simplify something and so you if you added this to the test
3941 	 above the code would die later anyway.  If this assert
3942 	 happens, you just need to make the port support wide int.  */
3943       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3944 #endif
3945       switch (code)
3946 	{
3947 	case MINUS:
3948 	  result = wi::sub (pop0, pop1);
3949 	  break;
3950 
3951 	case PLUS:
3952 	  result = wi::add (pop0, pop1);
3953 	  break;
3954 
3955 	case MULT:
3956 	  result = wi::mul (pop0, pop1);
3957 	  break;
3958 
3959 	case DIV:
3960 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3961 	  if (overflow)
3962 	    return NULL_RTX;
3963 	  break;
3964 
3965 	case MOD:
3966 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3967 	  if (overflow)
3968 	    return NULL_RTX;
3969 	  break;
3970 
3971 	case UDIV:
3972 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3973 	  if (overflow)
3974 	    return NULL_RTX;
3975 	  break;
3976 
3977 	case UMOD:
3978 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3979 	  if (overflow)
3980 	    return NULL_RTX;
3981 	  break;
3982 
3983 	case AND:
3984 	  result = wi::bit_and (pop0, pop1);
3985 	  break;
3986 
3987 	case IOR:
3988 	  result = wi::bit_or (pop0, pop1);
3989 	  break;
3990 
3991 	case XOR:
3992 	  result = wi::bit_xor (pop0, pop1);
3993 	  break;
3994 
3995 	case SMIN:
3996 	  result = wi::smin (pop0, pop1);
3997 	  break;
3998 
3999 	case SMAX:
4000 	  result = wi::smax (pop0, pop1);
4001 	  break;
4002 
4003 	case UMIN:
4004 	  result = wi::umin (pop0, pop1);
4005 	  break;
4006 
4007 	case UMAX:
4008 	  result = wi::umax (pop0, pop1);
4009 	  break;
4010 
4011 	case LSHIFTRT:
4012 	case ASHIFTRT:
4013 	case ASHIFT:
4014 	  {
4015 	    wide_int wop1 = pop1;
4016 	    if (SHIFT_COUNT_TRUNCATED)
4017 	      wop1 = wi::umod_trunc (wop1, width);
4018 	    else if (wi::geu_p (wop1, width))
4019 	      return NULL_RTX;
4020 
4021 	    switch (code)
4022 	      {
4023 	      case LSHIFTRT:
4024 		result = wi::lrshift (pop0, wop1);
4025 		break;
4026 
4027 	      case ASHIFTRT:
4028 		result = wi::arshift (pop0, wop1);
4029 		break;
4030 
4031 	      case ASHIFT:
4032 		result = wi::lshift (pop0, wop1);
4033 		break;
4034 
4035 	      default:
4036 		gcc_unreachable ();
4037 	      }
4038 	    break;
4039 	  }
4040 	case ROTATE:
4041 	case ROTATERT:
4042 	  {
4043 	    if (wi::neg_p (pop1))
4044 	      return NULL_RTX;
4045 
4046 	    switch (code)
4047 	      {
4048 	      case ROTATE:
4049 		result = wi::lrotate (pop0, pop1);
4050 		break;
4051 
4052 	      case ROTATERT:
4053 		result = wi::rrotate (pop0, pop1);
4054 		break;
4055 
4056 	      default:
4057 		gcc_unreachable ();
4058 	      }
4059 	    break;
4060 	  }
4061 	default:
4062 	  return NULL_RTX;
4063 	}
4064       return immed_wide_int_const (result, mode);
4065     }
4066 
4067   return NULL_RTX;
4068 }
4069 
4070 
4071 
4072 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4073    PLUS or MINUS.
4074 
4075    Rather than test for specific case, we do this by a brute-force method
4076    and do all possible simplifications until no more changes occur.  Then
4077    we rebuild the operation.  */
4078 
4079 struct simplify_plus_minus_op_data
4080 {
4081   rtx op;
4082   short neg;
4083 };
4084 
4085 static bool
4086 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4087 {
4088   int result;
4089 
4090   result = (commutative_operand_precedence (y)
4091 	    - commutative_operand_precedence (x));
4092   if (result)
4093     return result > 0;
4094 
4095   /* Group together equal REGs to do more simplification.  */
4096   if (REG_P (x) && REG_P (y))
4097     return REGNO (x) > REGNO (y);
4098   else
4099     return false;
4100 }
4101 
4102 static rtx
4103 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4104 		     rtx op1)
4105 {
4106   struct simplify_plus_minus_op_data ops[16];
4107   rtx result, tem;
4108   int n_ops = 2;
4109   int changed, n_constants, canonicalized = 0;
4110   int i, j;
4111 
4112   memset (ops, 0, sizeof ops);
4113 
4114   /* Set up the two operands and then expand them until nothing has been
4115      changed.  If we run out of room in our array, give up; this should
4116      almost never happen.  */
4117 
4118   ops[0].op = op0;
4119   ops[0].neg = 0;
4120   ops[1].op = op1;
4121   ops[1].neg = (code == MINUS);
4122 
4123   do
4124     {
4125       changed = 0;
4126       n_constants = 0;
4127 
4128       for (i = 0; i < n_ops; i++)
4129 	{
4130 	  rtx this_op = ops[i].op;
4131 	  int this_neg = ops[i].neg;
4132 	  enum rtx_code this_code = GET_CODE (this_op);
4133 
4134 	  switch (this_code)
4135 	    {
4136 	    case PLUS:
4137 	    case MINUS:
4138 	      if (n_ops == ARRAY_SIZE (ops))
4139 		return NULL_RTX;
4140 
4141 	      ops[n_ops].op = XEXP (this_op, 1);
4142 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4143 	      n_ops++;
4144 
4145 	      ops[i].op = XEXP (this_op, 0);
4146 	      changed = 1;
4147 	      canonicalized |= this_neg || i != n_ops - 2;
4148 	      break;
4149 
4150 	    case NEG:
4151 	      ops[i].op = XEXP (this_op, 0);
4152 	      ops[i].neg = ! this_neg;
4153 	      changed = 1;
4154 	      canonicalized = 1;
4155 	      break;
4156 
4157 	    case CONST:
4158 	      if (n_ops != ARRAY_SIZE (ops)
4159 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4160 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4161 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4162 		{
4163 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4164 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4165 		  ops[n_ops].neg = this_neg;
4166 		  n_ops++;
4167 		  changed = 1;
4168 	          canonicalized = 1;
4169 		}
4170 	      break;
4171 
4172 	    case NOT:
4173 	      /* ~a -> (-a - 1) */
4174 	      if (n_ops != ARRAY_SIZE (ops))
4175 		{
4176 		  ops[n_ops].op = CONSTM1_RTX (mode);
4177 		  ops[n_ops++].neg = this_neg;
4178 		  ops[i].op = XEXP (this_op, 0);
4179 		  ops[i].neg = !this_neg;
4180 		  changed = 1;
4181 	          canonicalized = 1;
4182 		}
4183 	      break;
4184 
4185 	    case CONST_INT:
4186 	      n_constants++;
4187 	      if (this_neg)
4188 		{
4189 		  ops[i].op = neg_const_int (mode, this_op);
4190 		  ops[i].neg = 0;
4191 		  changed = 1;
4192 	          canonicalized = 1;
4193 		}
4194 	      break;
4195 
4196 	    default:
4197 	      break;
4198 	    }
4199 	}
4200     }
4201   while (changed);
4202 
4203   if (n_constants > 1)
4204     canonicalized = 1;
4205 
4206   gcc_assert (n_ops >= 2);
4207 
4208   /* If we only have two operands, we can avoid the loops.  */
4209   if (n_ops == 2)
4210     {
4211       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4212       rtx lhs, rhs;
4213 
4214       /* Get the two operands.  Be careful with the order, especially for
4215 	 the cases where code == MINUS.  */
4216       if (ops[0].neg && ops[1].neg)
4217 	{
4218 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4219 	  rhs = ops[1].op;
4220 	}
4221       else if (ops[0].neg)
4222 	{
4223 	  lhs = ops[1].op;
4224 	  rhs = ops[0].op;
4225 	}
4226       else
4227 	{
4228 	  lhs = ops[0].op;
4229 	  rhs = ops[1].op;
4230 	}
4231 
4232       return simplify_const_binary_operation (code, mode, lhs, rhs);
4233     }
4234 
4235   /* Now simplify each pair of operands until nothing changes.  */
4236   do
4237     {
4238       /* Insertion sort is good enough for a small array.  */
4239       for (i = 1; i < n_ops; i++)
4240         {
4241           struct simplify_plus_minus_op_data save;
4242           j = i - 1;
4243           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4244 	    continue;
4245 
4246           canonicalized = 1;
4247           save = ops[i];
4248           do
4249 	    ops[j + 1] = ops[j];
4250           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4251           ops[j + 1] = save;
4252         }
4253 
4254       changed = 0;
4255       for (i = n_ops - 1; i > 0; i--)
4256 	for (j = i - 1; j >= 0; j--)
4257 	  {
4258 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4259 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4260 
4261 	    if (lhs != 0 && rhs != 0)
4262 	      {
4263 		enum rtx_code ncode = PLUS;
4264 
4265 		if (lneg != rneg)
4266 		  {
4267 		    ncode = MINUS;
4268 		    if (lneg)
4269 		      tem = lhs, lhs = rhs, rhs = tem;
4270 		  }
4271 		else if (swap_commutative_operands_p (lhs, rhs))
4272 		  tem = lhs, lhs = rhs, rhs = tem;
4273 
4274 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4275 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4276 		  {
4277 		    rtx tem_lhs, tem_rhs;
4278 
4279 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4280 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4281 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4282 
4283 		    if (tem && !CONSTANT_P (tem))
4284 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4285 		  }
4286 		else
4287 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4288 
4289 		if (tem)
4290 		  {
4291 		    /* Reject "simplifications" that just wrap the two
4292 		       arguments in a CONST.  Failure to do so can result
4293 		       in infinite recursion with simplify_binary_operation
4294 		       when it calls us to simplify CONST operations.
4295 		       Also, if we find such a simplification, don't try
4296 		       any more combinations with this rhs:  We must have
4297 		       something like symbol+offset, ie. one of the
4298 		       trivial CONST expressions we handle later.  */
4299 		    if (GET_CODE (tem) == CONST
4300 			&& GET_CODE (XEXP (tem, 0)) == ncode
4301 			&& XEXP (XEXP (tem, 0), 0) == lhs
4302 			&& XEXP (XEXP (tem, 0), 1) == rhs)
4303 		      break;
4304 		    lneg &= rneg;
4305 		    if (GET_CODE (tem) == NEG)
4306 		      tem = XEXP (tem, 0), lneg = !lneg;
4307 		    if (CONST_INT_P (tem) && lneg)
4308 		      tem = neg_const_int (mode, tem), lneg = 0;
4309 
4310 		    ops[i].op = tem;
4311 		    ops[i].neg = lneg;
4312 		    ops[j].op = NULL_RTX;
4313 		    changed = 1;
4314 		    canonicalized = 1;
4315 		  }
4316 	      }
4317 	  }
4318 
4319       /* If nothing changed, fail.  */
4320       if (!canonicalized)
4321         return NULL_RTX;
4322 
4323       /* Pack all the operands to the lower-numbered entries.  */
4324       for (i = 0, j = 0; j < n_ops; j++)
4325         if (ops[j].op)
4326           {
4327 	    ops[i] = ops[j];
4328 	    i++;
4329           }
4330       n_ops = i;
4331     }
4332   while (changed);
4333 
4334   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4335   if (n_ops == 2
4336       && CONST_INT_P (ops[1].op)
4337       && CONSTANT_P (ops[0].op)
4338       && ops[0].neg)
4339     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4340 
4341   /* We suppressed creation of trivial CONST expressions in the
4342      combination loop to avoid recursion.  Create one manually now.
4343      The combination loop should have ensured that there is exactly
4344      one CONST_INT, and the sort will have ensured that it is last
4345      in the array and that any other constant will be next-to-last.  */
4346 
4347   if (n_ops > 1
4348       && CONST_INT_P (ops[n_ops - 1].op)
4349       && CONSTANT_P (ops[n_ops - 2].op))
4350     {
4351       rtx value = ops[n_ops - 1].op;
4352       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4353 	value = neg_const_int (mode, value);
4354       ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4355 					 INTVAL (value));
4356       n_ops--;
4357     }
4358 
4359   /* Put a non-negated operand first, if possible.  */
4360 
4361   for (i = 0; i < n_ops && ops[i].neg; i++)
4362     continue;
4363   if (i == n_ops)
4364     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4365   else if (i != 0)
4366     {
4367       tem = ops[0].op;
4368       ops[0] = ops[i];
4369       ops[i].op = tem;
4370       ops[i].neg = 1;
4371     }
4372 
4373   /* Now make the result by performing the requested operations.  */
4374   result = ops[0].op;
4375   for (i = 1; i < n_ops; i++)
4376     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4377 			     mode, result, ops[i].op);
4378 
4379   return result;
4380 }
4381 
4382 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4383 static bool
4384 plus_minus_operand_p (const_rtx x)
4385 {
4386   return GET_CODE (x) == PLUS
4387          || GET_CODE (x) == MINUS
4388 	 || (GET_CODE (x) == CONST
4389 	     && GET_CODE (XEXP (x, 0)) == PLUS
4390 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4391 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4392 }
4393 
4394 /* Like simplify_binary_operation except used for relational operators.
4395    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4396    not also be VOIDmode.
4397 
4398    CMP_MODE specifies in which mode the comparison is done in, so it is
4399    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4400    the operands or, if both are VOIDmode, the operands are compared in
4401    "infinite precision".  */
4402 rtx
4403 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4404 			       machine_mode cmp_mode, rtx op0, rtx op1)
4405 {
4406   rtx tem, trueop0, trueop1;
4407 
4408   if (cmp_mode == VOIDmode)
4409     cmp_mode = GET_MODE (op0);
4410   if (cmp_mode == VOIDmode)
4411     cmp_mode = GET_MODE (op1);
4412 
4413   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4414   if (tem)
4415     {
4416       if (SCALAR_FLOAT_MODE_P (mode))
4417 	{
4418           if (tem == const0_rtx)
4419             return CONST0_RTX (mode);
4420 #ifdef FLOAT_STORE_FLAG_VALUE
4421 	  {
4422 	    REAL_VALUE_TYPE val;
4423 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4424 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4425 	  }
4426 #else
4427 	  return NULL_RTX;
4428 #endif
4429 	}
4430       if (VECTOR_MODE_P (mode))
4431 	{
4432 	  if (tem == const0_rtx)
4433 	    return CONST0_RTX (mode);
4434 #ifdef VECTOR_STORE_FLAG_VALUE
4435 	  {
4436 	    int i, units;
4437 	    rtvec v;
4438 
4439 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4440 	    if (val == NULL_RTX)
4441 	      return NULL_RTX;
4442 	    if (val == const1_rtx)
4443 	      return CONST1_RTX (mode);
4444 
4445 	    units = GET_MODE_NUNITS (mode);
4446 	    v = rtvec_alloc (units);
4447 	    for (i = 0; i < units; i++)
4448 	      RTVEC_ELT (v, i) = val;
4449 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4450 	  }
4451 #else
4452 	  return NULL_RTX;
4453 #endif
4454 	}
4455 
4456       return tem;
4457     }
4458 
4459   /* For the following tests, ensure const0_rtx is op1.  */
4460   if (swap_commutative_operands_p (op0, op1)
4461       || (op0 == const0_rtx && op1 != const0_rtx))
4462     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4463 
4464   /* If op0 is a compare, extract the comparison arguments from it.  */
4465   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4466     return simplify_gen_relational (code, mode, VOIDmode,
4467 				    XEXP (op0, 0), XEXP (op0, 1));
4468 
4469   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4470       || CC0_P (op0))
4471     return NULL_RTX;
4472 
4473   trueop0 = avoid_constant_pool_reference (op0);
4474   trueop1 = avoid_constant_pool_reference (op1);
4475   return simplify_relational_operation_1 (code, mode, cmp_mode,
4476 		  			  trueop0, trueop1);
4477 }
4478 
4479 /* This part of simplify_relational_operation is only used when CMP_MODE
4480    is not in class MODE_CC (i.e. it is a real comparison).
4481 
4482    MODE is the mode of the result, while CMP_MODE specifies in which
4483    mode the comparison is done in, so it is the mode of the operands.  */
4484 
4485 static rtx
4486 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4487 				 machine_mode cmp_mode, rtx op0, rtx op1)
4488 {
4489   enum rtx_code op0code = GET_CODE (op0);
4490 
4491   if (op1 == const0_rtx && COMPARISON_P (op0))
4492     {
4493       /* If op0 is a comparison, extract the comparison arguments
4494          from it.  */
4495       if (code == NE)
4496 	{
4497 	  if (GET_MODE (op0) == mode)
4498 	    return simplify_rtx (op0);
4499 	  else
4500 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4501 					    XEXP (op0, 0), XEXP (op0, 1));
4502 	}
4503       else if (code == EQ)
4504 	{
4505 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4506 	  if (new_code != UNKNOWN)
4507 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4508 					    XEXP (op0, 0), XEXP (op0, 1));
4509 	}
4510     }
4511 
4512   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4513      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4514   if ((code == LTU || code == GEU)
4515       && GET_CODE (op0) == PLUS
4516       && CONST_INT_P (XEXP (op0, 1))
4517       && (rtx_equal_p (op1, XEXP (op0, 0))
4518 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4519       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4520       && XEXP (op0, 1) != const0_rtx)
4521     {
4522       rtx new_cmp
4523 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4524       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4525 				      cmp_mode, XEXP (op0, 0), new_cmp);
4526     }
4527 
4528   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4529   if ((code == LTU || code == GEU)
4530       && GET_CODE (op0) == PLUS
4531       && rtx_equal_p (op1, XEXP (op0, 1))
4532       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4533       && !rtx_equal_p (op1, XEXP (op0, 0)))
4534     return simplify_gen_relational (code, mode, cmp_mode, op0,
4535 				    copy_rtx (XEXP (op0, 0)));
4536 
4537   if (op1 == const0_rtx)
4538     {
4539       /* Canonicalize (GTU x 0) as (NE x 0).  */
4540       if (code == GTU)
4541         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4542       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4543       if (code == LEU)
4544         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4545     }
4546   else if (op1 == const1_rtx)
4547     {
4548       switch (code)
4549         {
4550         case GE:
4551 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4552 	  return simplify_gen_relational (GT, mode, cmp_mode,
4553 					  op0, const0_rtx);
4554 	case GEU:
4555 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4556 	  return simplify_gen_relational (NE, mode, cmp_mode,
4557 					  op0, const0_rtx);
4558 	case LT:
4559 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4560 	  return simplify_gen_relational (LE, mode, cmp_mode,
4561 					  op0, const0_rtx);
4562 	case LTU:
4563 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4564 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4565 					  op0, const0_rtx);
4566 	default:
4567 	  break;
4568 	}
4569     }
4570   else if (op1 == constm1_rtx)
4571     {
4572       /* Canonicalize (LE x -1) as (LT x 0).  */
4573       if (code == LE)
4574         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4575       /* Canonicalize (GT x -1) as (GE x 0).  */
4576       if (code == GT)
4577         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4578     }
4579 
4580   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4581   if ((code == EQ || code == NE)
4582       && (op0code == PLUS || op0code == MINUS)
4583       && CONSTANT_P (op1)
4584       && CONSTANT_P (XEXP (op0, 1))
4585       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4586     {
4587       rtx x = XEXP (op0, 0);
4588       rtx c = XEXP (op0, 1);
4589       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4590       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4591 
4592       /* Detect an infinite recursive condition, where we oscillate at this
4593 	 simplification case between:
4594 	    A + B == C  <--->  C - B == A,
4595 	 where A, B, and C are all constants with non-simplifiable expressions,
4596 	 usually SYMBOL_REFs.  */
4597       if (GET_CODE (tem) == invcode
4598 	  && CONSTANT_P (x)
4599 	  && rtx_equal_p (c, XEXP (tem, 1)))
4600 	return NULL_RTX;
4601 
4602       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4603     }
4604 
4605   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4606      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4607   if (code == NE
4608       && op1 == const0_rtx
4609       && GET_MODE_CLASS (mode) == MODE_INT
4610       && cmp_mode != VOIDmode
4611       /* ??? Work-around BImode bugs in the ia64 backend.  */
4612       && mode != BImode
4613       && cmp_mode != BImode
4614       && nonzero_bits (op0, cmp_mode) == 1
4615       && STORE_FLAG_VALUE == 1)
4616     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4617 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4618 	   : lowpart_subreg (mode, op0, cmp_mode);
4619 
4620   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4621   if ((code == EQ || code == NE)
4622       && op1 == const0_rtx
4623       && op0code == XOR)
4624     return simplify_gen_relational (code, mode, cmp_mode,
4625 				    XEXP (op0, 0), XEXP (op0, 1));
4626 
4627   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4628   if ((code == EQ || code == NE)
4629       && op0code == XOR
4630       && rtx_equal_p (XEXP (op0, 0), op1)
4631       && !side_effects_p (XEXP (op0, 0)))
4632     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4633 				    CONST0_RTX (mode));
4634 
4635   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4636   if ((code == EQ || code == NE)
4637       && op0code == XOR
4638       && rtx_equal_p (XEXP (op0, 1), op1)
4639       && !side_effects_p (XEXP (op0, 1)))
4640     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4641 				    CONST0_RTX (mode));
4642 
4643   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4644   if ((code == EQ || code == NE)
4645       && op0code == XOR
4646       && CONST_SCALAR_INT_P (op1)
4647       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4648     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4649 				    simplify_gen_binary (XOR, cmp_mode,
4650 							 XEXP (op0, 1), op1));
4651 
4652   /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4653      can be implemented with a BICS instruction on some targets, or
4654      constant-folded if y is a constant.  */
4655   if ((code == EQ || code == NE)
4656       && op0code == AND
4657       && rtx_equal_p (XEXP (op0, 0), op1)
4658       && !side_effects_p (op1)
4659       && op1 != CONST0_RTX (cmp_mode))
4660     {
4661       rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4662       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4663 
4664       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4665 				      CONST0_RTX (cmp_mode));
4666     }
4667 
4668   /* Likewise for (eq/ne (and x y) y).  */
4669   if ((code == EQ || code == NE)
4670       && op0code == AND
4671       && rtx_equal_p (XEXP (op0, 1), op1)
4672       && !side_effects_p (op1)
4673       && op1 != CONST0_RTX (cmp_mode))
4674     {
4675       rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4676       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4677 
4678       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4679 				      CONST0_RTX (cmp_mode));
4680     }
4681 
4682   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
4683   if ((code == EQ || code == NE)
4684       && GET_CODE (op0) == BSWAP
4685       && CONST_SCALAR_INT_P (op1))
4686     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4687 				    simplify_gen_unary (BSWAP, cmp_mode,
4688 							op1, cmp_mode));
4689 
4690   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
4691   if ((code == EQ || code == NE)
4692       && GET_CODE (op0) == BSWAP
4693       && GET_CODE (op1) == BSWAP)
4694     return simplify_gen_relational (code, mode, cmp_mode,
4695 				    XEXP (op0, 0), XEXP (op1, 0));
4696 
4697   if (op0code == POPCOUNT && op1 == const0_rtx)
4698     switch (code)
4699       {
4700       case EQ:
4701       case LE:
4702       case LEU:
4703 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4704 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4705 					XEXP (op0, 0), const0_rtx);
4706 
4707       case NE:
4708       case GT:
4709       case GTU:
4710 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4711 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4712 					XEXP (op0, 0), const0_rtx);
4713 
4714       default:
4715 	break;
4716       }
4717 
4718   return NULL_RTX;
4719 }
4720 
4721 enum
4722 {
4723   CMP_EQ = 1,
4724   CMP_LT = 2,
4725   CMP_GT = 4,
4726   CMP_LTU = 8,
4727   CMP_GTU = 16
4728 };
4729 
4730 
4731 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4732    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4733    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4734    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4735    For floating-point comparisons, assume that the operands were ordered.  */
4736 
4737 static rtx
4738 comparison_result (enum rtx_code code, int known_results)
4739 {
4740   switch (code)
4741     {
4742     case EQ:
4743     case UNEQ:
4744       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4745     case NE:
4746     case LTGT:
4747       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4748 
4749     case LT:
4750     case UNLT:
4751       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4752     case GE:
4753     case UNGE:
4754       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4755 
4756     case GT:
4757     case UNGT:
4758       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4759     case LE:
4760     case UNLE:
4761       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4762 
4763     case LTU:
4764       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4765     case GEU:
4766       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4767 
4768     case GTU:
4769       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4770     case LEU:
4771       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4772 
4773     case ORDERED:
4774       return const_true_rtx;
4775     case UNORDERED:
4776       return const0_rtx;
4777     default:
4778       gcc_unreachable ();
4779     }
4780 }
4781 
4782 /* Check if the given comparison (done in the given MODE) is actually
4783    a tautology or a contradiction.  If the mode is VOID_mode, the
4784    comparison is done in "infinite precision".  If no simplification
4785    is possible, this function returns zero.  Otherwise, it returns
4786    either const_true_rtx or const0_rtx.  */
4787 
4788 rtx
4789 simplify_const_relational_operation (enum rtx_code code,
4790 				     machine_mode mode,
4791 				     rtx op0, rtx op1)
4792 {
4793   rtx tem;
4794   rtx trueop0;
4795   rtx trueop1;
4796 
4797   gcc_assert (mode != VOIDmode
4798 	      || (GET_MODE (op0) == VOIDmode
4799 		  && GET_MODE (op1) == VOIDmode));
4800 
4801   /* If op0 is a compare, extract the comparison arguments from it.  */
4802   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4803     {
4804       op1 = XEXP (op0, 1);
4805       op0 = XEXP (op0, 0);
4806 
4807       if (GET_MODE (op0) != VOIDmode)
4808 	mode = GET_MODE (op0);
4809       else if (GET_MODE (op1) != VOIDmode)
4810 	mode = GET_MODE (op1);
4811       else
4812 	return 0;
4813     }
4814 
4815   /* We can't simplify MODE_CC values since we don't know what the
4816      actual comparison is.  */
4817   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4818     return 0;
4819 
4820   /* Make sure the constant is second.  */
4821   if (swap_commutative_operands_p (op0, op1))
4822     {
4823       tem = op0, op0 = op1, op1 = tem;
4824       code = swap_condition (code);
4825     }
4826 
4827   trueop0 = avoid_constant_pool_reference (op0);
4828   trueop1 = avoid_constant_pool_reference (op1);
4829 
4830   /* For integer comparisons of A and B maybe we can simplify A - B and can
4831      then simplify a comparison of that with zero.  If A and B are both either
4832      a register or a CONST_INT, this can't help; testing for these cases will
4833      prevent infinite recursion here and speed things up.
4834 
4835      We can only do this for EQ and NE comparisons as otherwise we may
4836      lose or introduce overflow which we cannot disregard as undefined as
4837      we do not know the signedness of the operation on either the left or
4838      the right hand side of the comparison.  */
4839 
4840   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4841       && (code == EQ || code == NE)
4842       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4843 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4844       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4845       /* We cannot do this if tem is a nonzero address.  */
4846       && ! nonzero_address_p (tem))
4847     return simplify_const_relational_operation (signed_condition (code),
4848 						mode, tem, const0_rtx);
4849 
4850   if (! HONOR_NANS (mode) && code == ORDERED)
4851     return const_true_rtx;
4852 
4853   if (! HONOR_NANS (mode) && code == UNORDERED)
4854     return const0_rtx;
4855 
4856   /* For modes without NaNs, if the two operands are equal, we know the
4857      result except if they have side-effects.  Even with NaNs we know
4858      the result of unordered comparisons and, if signaling NaNs are
4859      irrelevant, also the result of LT/GT/LTGT.  */
4860   if ((! HONOR_NANS (trueop0)
4861        || code == UNEQ || code == UNLE || code == UNGE
4862        || ((code == LT || code == GT || code == LTGT)
4863 	   && ! HONOR_SNANS (trueop0)))
4864       && rtx_equal_p (trueop0, trueop1)
4865       && ! side_effects_p (trueop0))
4866     return comparison_result (code, CMP_EQ);
4867 
4868   /* If the operands are floating-point constants, see if we can fold
4869      the result.  */
4870   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4871       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4872       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4873     {
4874       REAL_VALUE_TYPE d0, d1;
4875 
4876       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4877       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4878 
4879       /* Comparisons are unordered iff at least one of the values is NaN.  */
4880       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4881 	switch (code)
4882 	  {
4883 	  case UNEQ:
4884 	  case UNLT:
4885 	  case UNGT:
4886 	  case UNLE:
4887 	  case UNGE:
4888 	  case NE:
4889 	  case UNORDERED:
4890 	    return const_true_rtx;
4891 	  case EQ:
4892 	  case LT:
4893 	  case GT:
4894 	  case LE:
4895 	  case GE:
4896 	  case LTGT:
4897 	  case ORDERED:
4898 	    return const0_rtx;
4899 	  default:
4900 	    return 0;
4901 	  }
4902 
4903       return comparison_result (code,
4904 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4905 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4906     }
4907 
4908   /* Otherwise, see if the operands are both integers.  */
4909   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4910       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4911     {
4912       /* It would be nice if we really had a mode here.  However, the
4913 	 largest int representable on the target is as good as
4914 	 infinite.  */
4915       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4916       rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4917       rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4918 
4919       if (wi::eq_p (ptrueop0, ptrueop1))
4920 	return comparison_result (code, CMP_EQ);
4921       else
4922 	{
4923 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4924 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4925 	  return comparison_result (code, cr);
4926 	}
4927     }
4928 
4929   /* Optimize comparisons with upper and lower bounds.  */
4930   if (HWI_COMPUTABLE_MODE_P (mode)
4931       && CONST_INT_P (trueop1)
4932       && !side_effects_p (trueop0))
4933     {
4934       int sign;
4935       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4936       HOST_WIDE_INT val = INTVAL (trueop1);
4937       HOST_WIDE_INT mmin, mmax;
4938 
4939       if (code == GEU
4940 	  || code == LEU
4941 	  || code == GTU
4942 	  || code == LTU)
4943 	sign = 0;
4944       else
4945 	sign = 1;
4946 
4947       /* Get a reduced range if the sign bit is zero.  */
4948       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4949 	{
4950 	  mmin = 0;
4951 	  mmax = nonzero;
4952 	}
4953       else
4954 	{
4955 	  rtx mmin_rtx, mmax_rtx;
4956 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4957 
4958 	  mmin = INTVAL (mmin_rtx);
4959 	  mmax = INTVAL (mmax_rtx);
4960 	  if (sign)
4961 	    {
4962 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4963 
4964 	      mmin >>= (sign_copies - 1);
4965 	      mmax >>= (sign_copies - 1);
4966 	    }
4967 	}
4968 
4969       switch (code)
4970 	{
4971 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
4972 	case GEU:
4973 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4974 	    return const_true_rtx;
4975 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4976 	    return const0_rtx;
4977 	  break;
4978 	case GE:
4979 	  if (val <= mmin)
4980 	    return const_true_rtx;
4981 	  if (val > mmax)
4982 	    return const0_rtx;
4983 	  break;
4984 
4985 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
4986 	case LEU:
4987 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4988 	    return const_true_rtx;
4989 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4990 	    return const0_rtx;
4991 	  break;
4992 	case LE:
4993 	  if (val >= mmax)
4994 	    return const_true_rtx;
4995 	  if (val < mmin)
4996 	    return const0_rtx;
4997 	  break;
4998 
4999 	case EQ:
5000 	  /* x == y is always false for y out of range.  */
5001 	  if (val < mmin || val > mmax)
5002 	    return const0_rtx;
5003 	  break;
5004 
5005 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5006 	case GTU:
5007 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5008 	    return const0_rtx;
5009 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5010 	    return const_true_rtx;
5011 	  break;
5012 	case GT:
5013 	  if (val >= mmax)
5014 	    return const0_rtx;
5015 	  if (val < mmin)
5016 	    return const_true_rtx;
5017 	  break;
5018 
5019 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5020 	case LTU:
5021 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5022 	    return const0_rtx;
5023 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5024 	    return const_true_rtx;
5025 	  break;
5026 	case LT:
5027 	  if (val <= mmin)
5028 	    return const0_rtx;
5029 	  if (val > mmax)
5030 	    return const_true_rtx;
5031 	  break;
5032 
5033 	case NE:
5034 	  /* x != y is always true for y out of range.  */
5035 	  if (val < mmin || val > mmax)
5036 	    return const_true_rtx;
5037 	  break;
5038 
5039 	default:
5040 	  break;
5041 	}
5042     }
5043 
5044   /* Optimize integer comparisons with zero.  */
5045   if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5046     {
5047       /* Some addresses are known to be nonzero.  We don't know
5048 	 their sign, but equality comparisons are known.  */
5049       if (nonzero_address_p (trueop0))
5050 	{
5051 	  if (code == EQ || code == LEU)
5052 	    return const0_rtx;
5053 	  if (code == NE || code == GTU)
5054 	    return const_true_rtx;
5055 	}
5056 
5057       /* See if the first operand is an IOR with a constant.  If so, we
5058 	 may be able to determine the result of this comparison.  */
5059       if (GET_CODE (op0) == IOR)
5060 	{
5061 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5062 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5063 	    {
5064 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5065 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5066 			      && (UINTVAL (inner_const)
5067 				  & ((unsigned HOST_WIDE_INT) 1
5068 				     << sign_bitnum)));
5069 
5070 	      switch (code)
5071 		{
5072 		case EQ:
5073 		case LEU:
5074 		  return const0_rtx;
5075 		case NE:
5076 		case GTU:
5077 		  return const_true_rtx;
5078 		case LT:
5079 		case LE:
5080 		  if (has_sign)
5081 		    return const_true_rtx;
5082 		  break;
5083 		case GT:
5084 		case GE:
5085 		  if (has_sign)
5086 		    return const0_rtx;
5087 		  break;
5088 		default:
5089 		  break;
5090 		}
5091 	    }
5092 	}
5093     }
5094 
5095   /* Optimize comparison of ABS with zero.  */
5096   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5097       && (GET_CODE (trueop0) == ABS
5098 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5099 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5100     {
5101       switch (code)
5102 	{
5103 	case LT:
5104 	  /* Optimize abs(x) < 0.0.  */
5105 	  if (!HONOR_SNANS (mode)
5106 	      && (!INTEGRAL_MODE_P (mode)
5107 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5108 	    {
5109 	      if (INTEGRAL_MODE_P (mode)
5110 		  && (issue_strict_overflow_warning
5111 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5112 		warning (OPT_Wstrict_overflow,
5113 			 ("assuming signed overflow does not occur when "
5114 			  "assuming abs (x) < 0 is false"));
5115 	       return const0_rtx;
5116 	    }
5117 	  break;
5118 
5119 	case GE:
5120 	  /* Optimize abs(x) >= 0.0.  */
5121 	  if (!HONOR_NANS (mode)
5122 	      && (!INTEGRAL_MODE_P (mode)
5123 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5124 	    {
5125 	      if (INTEGRAL_MODE_P (mode)
5126 	          && (issue_strict_overflow_warning
5127 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5128 	        warning (OPT_Wstrict_overflow,
5129 			 ("assuming signed overflow does not occur when "
5130 			  "assuming abs (x) >= 0 is true"));
5131 	      return const_true_rtx;
5132 	    }
5133 	  break;
5134 
5135 	case UNGE:
5136 	  /* Optimize ! (abs(x) < 0.0).  */
5137 	  return const_true_rtx;
5138 
5139 	default:
5140 	  break;
5141 	}
5142     }
5143 
5144   return 0;
5145 }
5146 
5147 /* Simplify CODE, an operation with result mode MODE and three operands,
5148    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5149    a constant.  Return 0 if no simplifications is possible.  */
5150 
5151 rtx
5152 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5153 			    machine_mode op0_mode, rtx op0, rtx op1,
5154 			    rtx op2)
5155 {
5156   unsigned int width = GET_MODE_PRECISION (mode);
5157   bool any_change = false;
5158   rtx tem, trueop2;
5159 
5160   /* VOIDmode means "infinite" precision.  */
5161   if (width == 0)
5162     width = HOST_BITS_PER_WIDE_INT;
5163 
5164   switch (code)
5165     {
5166     case FMA:
5167       /* Simplify negations around the multiplication.  */
5168       /* -a * -b + c  =>  a * b + c.  */
5169       if (GET_CODE (op0) == NEG)
5170 	{
5171 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5172 	  if (tem)
5173 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5174 	}
5175       else if (GET_CODE (op1) == NEG)
5176 	{
5177 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5178 	  if (tem)
5179 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5180 	}
5181 
5182       /* Canonicalize the two multiplication operands.  */
5183       /* a * -b + c  =>  -b * a + c.  */
5184       if (swap_commutative_operands_p (op0, op1))
5185 	tem = op0, op0 = op1, op1 = tem, any_change = true;
5186 
5187       if (any_change)
5188 	return gen_rtx_FMA (mode, op0, op1, op2);
5189       return NULL_RTX;
5190 
5191     case SIGN_EXTRACT:
5192     case ZERO_EXTRACT:
5193       if (CONST_INT_P (op0)
5194 	  && CONST_INT_P (op1)
5195 	  && CONST_INT_P (op2)
5196 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5197 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5198 	{
5199 	  /* Extracting a bit-field from a constant */
5200 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5201 	  HOST_WIDE_INT op1val = INTVAL (op1);
5202 	  HOST_WIDE_INT op2val = INTVAL (op2);
5203 	  if (BITS_BIG_ENDIAN)
5204 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5205 	  else
5206 	    val >>= op2val;
5207 
5208 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5209 	    {
5210 	      /* First zero-extend.  */
5211 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5212 	      /* If desired, propagate sign bit.  */
5213 	      if (code == SIGN_EXTRACT
5214 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5215 		     != 0)
5216 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5217 	    }
5218 
5219 	  return gen_int_mode (val, mode);
5220 	}
5221       break;
5222 
5223     case IF_THEN_ELSE:
5224       if (CONST_INT_P (op0))
5225 	return op0 != const0_rtx ? op1 : op2;
5226 
5227       /* Convert c ? a : a into "a".  */
5228       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5229 	return op1;
5230 
5231       /* Convert a != b ? a : b into "a".  */
5232       if (GET_CODE (op0) == NE
5233 	  && ! side_effects_p (op0)
5234 	  && ! HONOR_NANS (mode)
5235 	  && ! HONOR_SIGNED_ZEROS (mode)
5236 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5237 	       && rtx_equal_p (XEXP (op0, 1), op2))
5238 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5239 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5240 	return op1;
5241 
5242       /* Convert a == b ? a : b into "b".  */
5243       if (GET_CODE (op0) == EQ
5244 	  && ! side_effects_p (op0)
5245 	  && ! HONOR_NANS (mode)
5246 	  && ! HONOR_SIGNED_ZEROS (mode)
5247 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5248 	       && rtx_equal_p (XEXP (op0, 1), op2))
5249 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5250 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5251 	return op2;
5252 
5253       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5254 	{
5255 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5256 					? GET_MODE (XEXP (op0, 1))
5257 					: GET_MODE (XEXP (op0, 0)));
5258 	  rtx temp;
5259 
5260 	  /* Look for happy constants in op1 and op2.  */
5261 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5262 	    {
5263 	      HOST_WIDE_INT t = INTVAL (op1);
5264 	      HOST_WIDE_INT f = INTVAL (op2);
5265 
5266 	      if (t == STORE_FLAG_VALUE && f == 0)
5267 	        code = GET_CODE (op0);
5268 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5269 		{
5270 		  enum rtx_code tmp;
5271 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5272 		  if (tmp == UNKNOWN)
5273 		    break;
5274 		  code = tmp;
5275 		}
5276 	      else
5277 		break;
5278 
5279 	      return simplify_gen_relational (code, mode, cmp_mode,
5280 					      XEXP (op0, 0), XEXP (op0, 1));
5281 	    }
5282 
5283 	  if (cmp_mode == VOIDmode)
5284 	    cmp_mode = op0_mode;
5285 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5286 			  			cmp_mode, XEXP (op0, 0),
5287 						XEXP (op0, 1));
5288 
5289 	  /* See if any simplifications were possible.  */
5290 	  if (temp)
5291 	    {
5292 	      if (CONST_INT_P (temp))
5293 		return temp == const0_rtx ? op2 : op1;
5294 	      else if (temp)
5295 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5296 	    }
5297 	}
5298       break;
5299 
5300     case VEC_MERGE:
5301       gcc_assert (GET_MODE (op0) == mode);
5302       gcc_assert (GET_MODE (op1) == mode);
5303       gcc_assert (VECTOR_MODE_P (mode));
5304       trueop2 = avoid_constant_pool_reference (op2);
5305       if (CONST_INT_P (trueop2))
5306 	{
5307 	  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5308 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5309 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5310 	  unsigned HOST_WIDE_INT mask;
5311 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
5312 	    mask = -1;
5313 	  else
5314 	    mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5315 
5316 	  if (!(sel & mask) && !side_effects_p (op0))
5317 	    return op1;
5318 	  if ((sel & mask) == mask && !side_effects_p (op1))
5319 	    return op0;
5320 
5321 	  rtx trueop0 = avoid_constant_pool_reference (op0);
5322 	  rtx trueop1 = avoid_constant_pool_reference (op1);
5323 	  if (GET_CODE (trueop0) == CONST_VECTOR
5324 	      && GET_CODE (trueop1) == CONST_VECTOR)
5325 	    {
5326 	      rtvec v = rtvec_alloc (n_elts);
5327 	      unsigned int i;
5328 
5329 	      for (i = 0; i < n_elts; i++)
5330 		RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5331 				    ? CONST_VECTOR_ELT (trueop0, i)
5332 				    : CONST_VECTOR_ELT (trueop1, i));
5333 	      return gen_rtx_CONST_VECTOR (mode, v);
5334 	    }
5335 
5336 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5337 	     if no element from a appears in the result.  */
5338 	  if (GET_CODE (op0) == VEC_MERGE)
5339 	    {
5340 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
5341 	      if (CONST_INT_P (tem))
5342 		{
5343 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5344 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5345 		    return simplify_gen_ternary (code, mode, mode,
5346 						 XEXP (op0, 1), op1, op2);
5347 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5348 		    return simplify_gen_ternary (code, mode, mode,
5349 						 XEXP (op0, 0), op1, op2);
5350 		}
5351 	    }
5352 	  if (GET_CODE (op1) == VEC_MERGE)
5353 	    {
5354 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
5355 	      if (CONST_INT_P (tem))
5356 		{
5357 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5358 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5359 		    return simplify_gen_ternary (code, mode, mode,
5360 						 op0, XEXP (op1, 1), op2);
5361 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5362 		    return simplify_gen_ternary (code, mode, mode,
5363 						 op0, XEXP (op1, 0), op2);
5364 		}
5365 	    }
5366 
5367 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5368 	     with a.  */
5369 	  if (GET_CODE (op0) == VEC_DUPLICATE
5370 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5371 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5372 	      && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5373 	    {
5374 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5375 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
5376 		{
5377 		  if (XEXP (XEXP (op0, 0), 0) == op1
5378 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5379 		    return op1;
5380 		}
5381 	    }
5382 	}
5383 
5384       if (rtx_equal_p (op0, op1)
5385 	  && !side_effects_p (op2) && !side_effects_p (op1))
5386 	return op0;
5387 
5388       break;
5389 
5390     default:
5391       gcc_unreachable ();
5392     }
5393 
5394   return 0;
5395 }
5396 
5397 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5398    or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5399    CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5400 
5401    Works by unpacking OP into a collection of 8-bit values
5402    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5403    and then repacking them again for OUTERMODE.  */
5404 
5405 static rtx
5406 simplify_immed_subreg (machine_mode outermode, rtx op,
5407 		       machine_mode innermode, unsigned int byte)
5408 {
5409   enum {
5410     value_bit = 8,
5411     value_mask = (1 << value_bit) - 1
5412   };
5413   unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5414   int value_start;
5415   int i;
5416   int elem;
5417 
5418   int num_elem;
5419   rtx * elems;
5420   int elem_bitsize;
5421   rtx result_s;
5422   rtvec result_v = NULL;
5423   enum mode_class outer_class;
5424   machine_mode outer_submode;
5425   int max_bitsize;
5426 
5427   /* Some ports misuse CCmode.  */
5428   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5429     return op;
5430 
5431   /* We have no way to represent a complex constant at the rtl level.  */
5432   if (COMPLEX_MODE_P (outermode))
5433     return NULL_RTX;
5434 
5435   /* We support any size mode.  */
5436   max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5437 		     GET_MODE_BITSIZE (innermode));
5438 
5439   /* Unpack the value.  */
5440 
5441   if (GET_CODE (op) == CONST_VECTOR)
5442     {
5443       num_elem = CONST_VECTOR_NUNITS (op);
5444       elems = &CONST_VECTOR_ELT (op, 0);
5445       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5446     }
5447   else
5448     {
5449       num_elem = 1;
5450       elems = &op;
5451       elem_bitsize = max_bitsize;
5452     }
5453   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5454   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5455   /* I don't know how to handle endianness of sub-units.  */
5456   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5457 
5458   for (elem = 0; elem < num_elem; elem++)
5459     {
5460       unsigned char * vp;
5461       rtx el = elems[elem];
5462 
5463       /* Vectors are kept in target memory order.  (This is probably
5464 	 a mistake.)  */
5465       {
5466 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5467 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5468 			  / BITS_PER_UNIT);
5469 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5470 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5471 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5472 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5473 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5474       }
5475 
5476       switch (GET_CODE (el))
5477 	{
5478 	case CONST_INT:
5479 	  for (i = 0;
5480 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5481 	       i += value_bit)
5482 	    *vp++ = INTVAL (el) >> i;
5483 	  /* CONST_INTs are always logically sign-extended.  */
5484 	  for (; i < elem_bitsize; i += value_bit)
5485 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5486 	  break;
5487 
5488 	case CONST_WIDE_INT:
5489 	  {
5490 	    rtx_mode_t val = std::make_pair (el, innermode);
5491 	    unsigned char extend = wi::sign_mask (val);
5492 
5493 	    for (i = 0; i < elem_bitsize; i += value_bit)
5494 	      *vp++ = wi::extract_uhwi (val, i, value_bit);
5495 	    for (; i < elem_bitsize; i += value_bit)
5496 	      *vp++ = extend;
5497 	  }
5498 	  break;
5499 
5500 	case CONST_DOUBLE:
5501 	  if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5502 	    {
5503 	      unsigned char extend = 0;
5504 	      /* If this triggers, someone should have generated a
5505 		 CONST_INT instead.  */
5506 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5507 
5508 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5509 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5510 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5511 		{
5512 		  *vp++
5513 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5514 		  i += value_bit;
5515 		}
5516 
5517 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5518 		extend = -1;
5519 	      for (; i < elem_bitsize; i += value_bit)
5520 		*vp++ = extend;
5521 	    }
5522 	  else
5523 	    {
5524 	      /* This is big enough for anything on the platform.  */
5525 	      long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5526 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5527 
5528 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5529 	      gcc_assert (bitsize <= elem_bitsize);
5530 	      gcc_assert (bitsize % value_bit == 0);
5531 
5532 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5533 			      GET_MODE (el));
5534 
5535 	      /* real_to_target produces its result in words affected by
5536 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5537 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5538 	         of SUBREG in rtl.texi.  */
5539 	      for (i = 0; i < bitsize; i += value_bit)
5540 		{
5541 		  int ibase;
5542 		  if (WORDS_BIG_ENDIAN)
5543 		    ibase = bitsize - 1 - i;
5544 		  else
5545 		    ibase = i;
5546 		  *vp++ = tmp[ibase / 32] >> i % 32;
5547 		}
5548 
5549 	      /* It shouldn't matter what's done here, so fill it with
5550 		 zero.  */
5551 	      for (; i < elem_bitsize; i += value_bit)
5552 		*vp++ = 0;
5553 	    }
5554 	  break;
5555 
5556         case CONST_FIXED:
5557 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5558 	    {
5559 	      for (i = 0; i < elem_bitsize; i += value_bit)
5560 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5561 	    }
5562 	  else
5563 	    {
5564 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5565 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5566               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5567 		   i += value_bit)
5568 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5569 			>> (i - HOST_BITS_PER_WIDE_INT);
5570 	      for (; i < elem_bitsize; i += value_bit)
5571 		*vp++ = 0;
5572 	    }
5573           break;
5574 
5575 	default:
5576 	  gcc_unreachable ();
5577 	}
5578     }
5579 
5580   /* Now, pick the right byte to start with.  */
5581   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5582      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5583      will already have offset 0.  */
5584   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5585     {
5586       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5587 			- byte);
5588       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5589       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5590       byte = (subword_byte % UNITS_PER_WORD
5591 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5592     }
5593 
5594   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5595      so if it's become negative it will instead be very large.)  */
5596   gcc_assert (byte < GET_MODE_SIZE (innermode));
5597 
5598   /* Convert from bytes to chunks of size value_bit.  */
5599   value_start = byte * (BITS_PER_UNIT / value_bit);
5600 
5601   /* Re-pack the value.  */
5602 
5603   if (VECTOR_MODE_P (outermode))
5604     {
5605       num_elem = GET_MODE_NUNITS (outermode);
5606       result_v = rtvec_alloc (num_elem);
5607       elems = &RTVEC_ELT (result_v, 0);
5608       outer_submode = GET_MODE_INNER (outermode);
5609     }
5610   else
5611     {
5612       num_elem = 1;
5613       elems = &result_s;
5614       outer_submode = outermode;
5615     }
5616 
5617   outer_class = GET_MODE_CLASS (outer_submode);
5618   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5619 
5620   gcc_assert (elem_bitsize % value_bit == 0);
5621   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5622 
5623   for (elem = 0; elem < num_elem; elem++)
5624     {
5625       unsigned char *vp;
5626 
5627       /* Vectors are stored in target memory order.  (This is probably
5628 	 a mistake.)  */
5629       {
5630 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5631 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5632 			  / BITS_PER_UNIT);
5633 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5634 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5635 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5636 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5637 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5638       }
5639 
5640       switch (outer_class)
5641 	{
5642 	case MODE_INT:
5643 	case MODE_PARTIAL_INT:
5644 	  {
5645 	    int u;
5646 	    int base = 0;
5647 	    int units
5648 	      = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5649 	      / HOST_BITS_PER_WIDE_INT;
5650 	    HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5651 	    wide_int r;
5652 
5653 	    if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5654 	      return NULL_RTX;
5655 	    for (u = 0; u < units; u++)
5656 	      {
5657 		unsigned HOST_WIDE_INT buf = 0;
5658 		for (i = 0;
5659 		     i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5660 		     i += value_bit)
5661 		  buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5662 
5663 		tmp[u] = buf;
5664 		base += HOST_BITS_PER_WIDE_INT;
5665 	      }
5666 	    r = wide_int::from_array (tmp, units,
5667 				      GET_MODE_PRECISION (outer_submode));
5668 #if TARGET_SUPPORTS_WIDE_INT == 0
5669 	    /* Make sure r will fit into CONST_INT or CONST_DOUBLE.  */
5670 	    if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5671 	      return NULL_RTX;
5672 #endif
5673 	    elems[elem] = immed_wide_int_const (r, outer_submode);
5674 	  }
5675 	  break;
5676 
5677 	case MODE_FLOAT:
5678 	case MODE_DECIMAL_FLOAT:
5679 	  {
5680 	    REAL_VALUE_TYPE r;
5681 	    long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5682 
5683 	    /* real_from_target wants its input in words affected by
5684 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5685 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5686 	       of SUBREG in rtl.texi.  */
5687 	    for (i = 0; i < max_bitsize / 32; i++)
5688 	      tmp[i] = 0;
5689 	    for (i = 0; i < elem_bitsize; i += value_bit)
5690 	      {
5691 		int ibase;
5692 		if (WORDS_BIG_ENDIAN)
5693 		  ibase = elem_bitsize - 1 - i;
5694 		else
5695 		  ibase = i;
5696 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5697 	      }
5698 
5699 	    real_from_target (&r, tmp, outer_submode);
5700 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5701 	  }
5702 	  break;
5703 
5704 	case MODE_FRACT:
5705 	case MODE_UFRACT:
5706 	case MODE_ACCUM:
5707 	case MODE_UACCUM:
5708 	  {
5709 	    FIXED_VALUE_TYPE f;
5710 	    f.data.low = 0;
5711 	    f.data.high = 0;
5712 	    f.mode = outer_submode;
5713 
5714 	    for (i = 0;
5715 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5716 		 i += value_bit)
5717 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5718 	    for (; i < elem_bitsize; i += value_bit)
5719 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5720 			     << (i - HOST_BITS_PER_WIDE_INT));
5721 
5722 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5723           }
5724           break;
5725 
5726 	default:
5727 	  gcc_unreachable ();
5728 	}
5729     }
5730   if (VECTOR_MODE_P (outermode))
5731     return gen_rtx_CONST_VECTOR (outermode, result_v);
5732   else
5733     return result_s;
5734 }
5735 
5736 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5737    Return 0 if no simplifications are possible.  */
5738 rtx
5739 simplify_subreg (machine_mode outermode, rtx op,
5740 		 machine_mode innermode, unsigned int byte)
5741 {
5742   /* Little bit of sanity checking.  */
5743   gcc_assert (innermode != VOIDmode);
5744   gcc_assert (outermode != VOIDmode);
5745   gcc_assert (innermode != BLKmode);
5746   gcc_assert (outermode != BLKmode);
5747 
5748   gcc_assert (GET_MODE (op) == innermode
5749 	      || GET_MODE (op) == VOIDmode);
5750 
5751   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5752     return NULL_RTX;
5753 
5754   if (byte >= GET_MODE_SIZE (innermode))
5755     return NULL_RTX;
5756 
5757   if (outermode == innermode && !byte)
5758     return op;
5759 
5760   if (CONST_SCALAR_INT_P (op)
5761       || CONST_DOUBLE_AS_FLOAT_P (op)
5762       || GET_CODE (op) == CONST_FIXED
5763       || GET_CODE (op) == CONST_VECTOR)
5764     return simplify_immed_subreg (outermode, op, innermode, byte);
5765 
5766   /* Changing mode twice with SUBREG => just change it once,
5767      or not at all if changing back op starting mode.  */
5768   if (GET_CODE (op) == SUBREG)
5769     {
5770       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5771       int final_offset = byte + SUBREG_BYTE (op);
5772       rtx newx;
5773 
5774       if (outermode == innermostmode
5775 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5776 	return SUBREG_REG (op);
5777 
5778       /* The SUBREG_BYTE represents offset, as if the value were stored
5779 	 in memory.  Irritating exception is paradoxical subreg, where
5780 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5781 	 value should be negative.  For a moment, undo this exception.  */
5782       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5783 	{
5784 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5785 	  if (WORDS_BIG_ENDIAN)
5786 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5787 	  if (BYTES_BIG_ENDIAN)
5788 	    final_offset += difference % UNITS_PER_WORD;
5789 	}
5790       if (SUBREG_BYTE (op) == 0
5791 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5792 	{
5793 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5794 	  if (WORDS_BIG_ENDIAN)
5795 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5796 	  if (BYTES_BIG_ENDIAN)
5797 	    final_offset += difference % UNITS_PER_WORD;
5798 	}
5799 
5800       /* See whether resulting subreg will be paradoxical.  */
5801       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5802 	{
5803 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5804 	  if (final_offset < 0)
5805 	    return NULL_RTX;
5806 	  /* Bail out in case resulting subreg would be incorrect.  */
5807 	  if (final_offset % GET_MODE_SIZE (outermode)
5808 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5809 	    return NULL_RTX;
5810 	}
5811       else
5812 	{
5813 	  int offset = 0;
5814 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5815 
5816 	  /* In paradoxical subreg, see if we are still looking on lower part.
5817 	     If so, our SUBREG_BYTE will be 0.  */
5818 	  if (WORDS_BIG_ENDIAN)
5819 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5820 	  if (BYTES_BIG_ENDIAN)
5821 	    offset += difference % UNITS_PER_WORD;
5822 	  if (offset == final_offset)
5823 	    final_offset = 0;
5824 	  else
5825 	    return NULL_RTX;
5826 	}
5827 
5828       /* Recurse for further possible simplifications.  */
5829       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5830 			      final_offset);
5831       if (newx)
5832 	return newx;
5833       if (validate_subreg (outermode, innermostmode,
5834 			   SUBREG_REG (op), final_offset))
5835 	{
5836 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5837 	  if (SUBREG_PROMOTED_VAR_P (op)
5838 	      && SUBREG_PROMOTED_SIGN (op) >= 0
5839 	      && GET_MODE_CLASS (outermode) == MODE_INT
5840 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5841 			   GET_MODE_SIZE (innermode),
5842 			   GET_MODE_SIZE (innermostmode))
5843 	      && subreg_lowpart_p (newx))
5844 	    {
5845 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5846 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5847 	    }
5848 	  return newx;
5849 	}
5850       return NULL_RTX;
5851     }
5852 
5853   /* SUBREG of a hard register => just change the register number
5854      and/or mode.  If the hard register is not valid in that mode,
5855      suppress this simplification.  If the hard register is the stack,
5856      frame, or argument pointer, leave this as a SUBREG.  */
5857 
5858   if (REG_P (op) && HARD_REGISTER_P (op))
5859     {
5860       unsigned int regno, final_regno;
5861 
5862       regno = REGNO (op);
5863       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5864       if (HARD_REGISTER_NUM_P (final_regno))
5865 	{
5866 	  rtx x;
5867 	  int final_offset = byte;
5868 
5869 	  /* Adjust offset for paradoxical subregs.  */
5870 	  if (byte == 0
5871 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5872 	    {
5873 	      int difference = (GET_MODE_SIZE (innermode)
5874 				- GET_MODE_SIZE (outermode));
5875 	      if (WORDS_BIG_ENDIAN)
5876 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5877 	      if (BYTES_BIG_ENDIAN)
5878 		final_offset += difference % UNITS_PER_WORD;
5879 	    }
5880 
5881 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5882 
5883 	  /* Propagate original regno.  We don't have any way to specify
5884 	     the offset inside original regno, so do so only for lowpart.
5885 	     The information is used only by alias analysis that can not
5886 	     grog partial register anyway.  */
5887 
5888 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5889 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5890 	  return x;
5891 	}
5892     }
5893 
5894   /* If we have a SUBREG of a register that we are replacing and we are
5895      replacing it with a MEM, make a new MEM and try replacing the
5896      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5897      or if we would be widening it.  */
5898 
5899   if (MEM_P (op)
5900       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5901       /* Allow splitting of volatile memory references in case we don't
5902          have instruction to move the whole thing.  */
5903       && (! MEM_VOLATILE_P (op)
5904 	  || ! have_insn_for (SET, innermode))
5905       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5906     return adjust_address_nv (op, outermode, byte);
5907 
5908   /* Handle complex values represented as CONCAT
5909      of real and imaginary part.  */
5910   if (GET_CODE (op) == CONCAT)
5911     {
5912       unsigned int part_size, final_offset;
5913       rtx part, res;
5914 
5915       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5916       if (byte < part_size)
5917 	{
5918 	  part = XEXP (op, 0);
5919 	  final_offset = byte;
5920 	}
5921       else
5922 	{
5923 	  part = XEXP (op, 1);
5924 	  final_offset = byte - part_size;
5925 	}
5926 
5927       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5928 	return NULL_RTX;
5929 
5930       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5931       if (res)
5932 	return res;
5933       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5934 	return gen_rtx_SUBREG (outermode, part, final_offset);
5935       return NULL_RTX;
5936     }
5937 
5938   /* A SUBREG resulting from a zero extension may fold to zero if
5939      it extracts higher bits that the ZERO_EXTEND's source bits.  */
5940   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5941     {
5942       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5943       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5944 	return CONST0_RTX (outermode);
5945     }
5946 
5947   if (SCALAR_INT_MODE_P (outermode)
5948       && SCALAR_INT_MODE_P (innermode)
5949       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5950       && byte == subreg_lowpart_offset (outermode, innermode))
5951     {
5952       rtx tem = simplify_truncation (outermode, op, innermode);
5953       if (tem)
5954 	return tem;
5955     }
5956 
5957   return NULL_RTX;
5958 }
5959 
5960 /* Make a SUBREG operation or equivalent if it folds.  */
5961 
5962 rtx
5963 simplify_gen_subreg (machine_mode outermode, rtx op,
5964 		     machine_mode innermode, unsigned int byte)
5965 {
5966   rtx newx;
5967 
5968   newx = simplify_subreg (outermode, op, innermode, byte);
5969   if (newx)
5970     return newx;
5971 
5972   if (GET_CODE (op) == SUBREG
5973       || GET_CODE (op) == CONCAT
5974       || GET_MODE (op) == VOIDmode)
5975     return NULL_RTX;
5976 
5977   if (validate_subreg (outermode, innermode, op, byte))
5978     return gen_rtx_SUBREG (outermode, op, byte);
5979 
5980   return NULL_RTX;
5981 }
5982 
5983 /* Simplify X, an rtx expression.
5984 
5985    Return the simplified expression or NULL if no simplifications
5986    were possible.
5987 
5988    This is the preferred entry point into the simplification routines;
5989    however, we still allow passes to call the more specific routines.
5990 
5991    Right now GCC has three (yes, three) major bodies of RTL simplification
5992    code that need to be unified.
5993 
5994 	1. fold_rtx in cse.c.  This code uses various CSE specific
5995 	   information to aid in RTL simplification.
5996 
5997 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5998 	   it uses combine specific information to aid in RTL
5999 	   simplification.
6000 
6001 	3. The routines in this file.
6002 
6003 
6004    Long term we want to only have one body of simplification code; to
6005    get to that state I recommend the following steps:
6006 
6007 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
6008 	   which are not pass dependent state into these routines.
6009 
6010 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6011 	   use this routine whenever possible.
6012 
6013 	3. Allow for pass dependent state to be provided to these
6014 	   routines and add simplifications based on the pass dependent
6015 	   state.  Remove code from cse.c & combine.c that becomes
6016 	   redundant/dead.
6017 
6018     It will take time, but ultimately the compiler will be easier to
6019     maintain and improve.  It's totally silly that when we add a
6020     simplification that it needs to be added to 4 places (3 for RTL
6021     simplification and 1 for tree simplification.  */
6022 
6023 rtx
6024 simplify_rtx (const_rtx x)
6025 {
6026   const enum rtx_code code = GET_CODE (x);
6027   const machine_mode mode = GET_MODE (x);
6028 
6029   switch (GET_RTX_CLASS (code))
6030     {
6031     case RTX_UNARY:
6032       return simplify_unary_operation (code, mode,
6033 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6034     case RTX_COMM_ARITH:
6035       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6036 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6037 
6038       /* Fall through....  */
6039 
6040     case RTX_BIN_ARITH:
6041       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6042 
6043     case RTX_TERNARY:
6044     case RTX_BITFIELD_OPS:
6045       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6046 					 XEXP (x, 0), XEXP (x, 1),
6047 					 XEXP (x, 2));
6048 
6049     case RTX_COMPARE:
6050     case RTX_COMM_COMPARE:
6051       return simplify_relational_operation (code, mode,
6052                                             ((GET_MODE (XEXP (x, 0))
6053                                              != VOIDmode)
6054                                             ? GET_MODE (XEXP (x, 0))
6055                                             : GET_MODE (XEXP (x, 1))),
6056                                             XEXP (x, 0),
6057                                             XEXP (x, 1));
6058 
6059     case RTX_EXTRA:
6060       if (code == SUBREG)
6061 	return simplify_subreg (mode, SUBREG_REG (x),
6062 				GET_MODE (SUBREG_REG (x)),
6063 				SUBREG_BYTE (x));
6064       break;
6065 
6066     case RTX_OBJ:
6067       if (code == LO_SUM)
6068 	{
6069 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6070 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6071 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6072 	  return XEXP (x, 1);
6073 	}
6074       break;
6075 
6076     default:
6077       break;
6078     }
6079   return NULL;
6080 }
6081