xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/simplify-rtx.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "fold-const.h"
37 #include "varasm.h"
38 #include "tm_p.h"
39 #include "regs.h"
40 #include "hard-reg-set.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "recog.h"
44 #include "function.h"
45 #include "insn-codes.h"
46 #include "optabs.h"
47 #include "hashtab.h"
48 #include "statistics.h"
49 #include "real.h"
50 #include "fixed-value.h"
51 #include "expmed.h"
52 #include "dojump.h"
53 #include "explow.h"
54 #include "calls.h"
55 #include "emit-rtl.h"
56 #include "stmt.h"
57 #include "expr.h"
58 #include "diagnostic-core.h"
59 #include "ggc.h"
60 #include "target.h"
61 #include "predict.h"
62 
63 /* Simplification and canonicalization of RTL.  */
64 
65 /* Much code operates on (low, high) pairs; the low value is an
66    unsigned wide int, the high value a signed wide int.  We
67    occasionally need to sign extend from low to high as if low were a
68    signed wide int.  */
69 #define HWI_SIGN_EXTEND(low) \
70  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
71 
72 static rtx neg_const_int (machine_mode, const_rtx);
73 static bool plus_minus_operand_p (const_rtx);
74 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
75 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
76 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
77 				  unsigned int);
78 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
79 					   rtx, rtx);
80 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
81 					    machine_mode, rtx, rtx);
82 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
83 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
84 					rtx, rtx, rtx, rtx);
85 
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87    maximally negative number can overflow).  */
88 static rtx
89 neg_const_int (machine_mode mode, const_rtx i)
90 {
91   return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
92 }
93 
94 /* Test whether expression, X, is an immediate constant that represents
95    the most significant bit of machine mode MODE.  */
96 
97 bool
98 mode_signbit_p (machine_mode mode, const_rtx x)
99 {
100   unsigned HOST_WIDE_INT val;
101   unsigned int width;
102 
103   if (GET_MODE_CLASS (mode) != MODE_INT)
104     return false;
105 
106   width = GET_MODE_PRECISION (mode);
107   if (width == 0)
108     return false;
109 
110   if (width <= HOST_BITS_PER_WIDE_INT
111       && CONST_INT_P (x))
112     val = INTVAL (x);
113 #if TARGET_SUPPORTS_WIDE_INT
114   else if (CONST_WIDE_INT_P (x))
115     {
116       unsigned int i;
117       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
118       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
119 	return false;
120       for (i = 0; i < elts - 1; i++)
121 	if (CONST_WIDE_INT_ELT (x, i) != 0)
122 	  return false;
123       val = CONST_WIDE_INT_ELT (x, elts - 1);
124       width %= HOST_BITS_PER_WIDE_INT;
125       if (width == 0)
126 	width = HOST_BITS_PER_WIDE_INT;
127     }
128 #else
129   else if (width <= HOST_BITS_PER_DOUBLE_INT
130 	   && CONST_DOUBLE_AS_INT_P (x)
131 	   && CONST_DOUBLE_LOW (x) == 0)
132     {
133       val = CONST_DOUBLE_HIGH (x);
134       width -= HOST_BITS_PER_WIDE_INT;
135     }
136 #endif
137   else
138     /* X is not an integer constant.  */
139     return false;
140 
141   if (width < HOST_BITS_PER_WIDE_INT)
142     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
143   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
144 }
145 
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147    (after masking with the mode mask of MODE).  Returns false if the
148    precision of MODE is too large to handle.  */
149 
150 bool
151 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
152 {
153   unsigned int width;
154 
155   if (GET_MODE_CLASS (mode) != MODE_INT)
156     return false;
157 
158   width = GET_MODE_PRECISION (mode);
159   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160     return false;
161 
162   val &= GET_MODE_MASK (mode);
163   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
164 }
165 
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167    Returns false if the precision of MODE is too large to handle.  */
168 bool
169 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 {
171   unsigned int width;
172 
173   if (GET_MODE_CLASS (mode) != MODE_INT)
174     return false;
175 
176   width = GET_MODE_PRECISION (mode);
177   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178     return false;
179 
180   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181   return val != 0;
182 }
183 
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185    Returns false if the precision of MODE is too large to handle.  */
186 bool
187 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
188 {
189   unsigned int width;
190 
191   if (GET_MODE_CLASS (mode) != MODE_INT)
192     return false;
193 
194   width = GET_MODE_PRECISION (mode);
195   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
196     return false;
197 
198   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
199   return val == 0;
200 }
201 
202 /* Make a binary operation by properly ordering the operands and
203    seeing if the expression folds.  */
204 
205 rtx
206 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
207 		     rtx op1)
208 {
209   rtx tem;
210 
211   /* If this simplifies, do it.  */
212   tem = simplify_binary_operation (code, mode, op0, op1);
213   if (tem)
214     return tem;
215 
216   /* Put complex operands first and constants second if commutative.  */
217   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
218       && swap_commutative_operands_p (op0, op1))
219     tem = op0, op0 = op1, op1 = tem;
220 
221   return gen_rtx_fmt_ee (code, mode, op0, op1);
222 }
223 
224 /* If X is a MEM referencing the constant pool, return the real value.
225    Otherwise return X.  */
226 rtx
227 avoid_constant_pool_reference (rtx x)
228 {
229   rtx c, tmp, addr;
230   machine_mode cmode;
231   HOST_WIDE_INT offset = 0;
232 
233   switch (GET_CODE (x))
234     {
235     case MEM:
236       break;
237 
238     case FLOAT_EXTEND:
239       /* Handle float extensions of constant pool references.  */
240       tmp = XEXP (x, 0);
241       c = avoid_constant_pool_reference (tmp);
242       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
243 	{
244 	  REAL_VALUE_TYPE d;
245 
246 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
247 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
248 	}
249       return x;
250 
251     default:
252       return x;
253     }
254 
255   if (GET_MODE (x) == BLKmode)
256     return x;
257 
258   addr = XEXP (x, 0);
259 
260   /* Call target hook to avoid the effects of -fpic etc....  */
261   addr = targetm.delegitimize_address (addr);
262 
263   /* Split the address into a base and integer offset.  */
264   if (GET_CODE (addr) == CONST
265       && GET_CODE (XEXP (addr, 0)) == PLUS
266       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
267     {
268       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
269       addr = XEXP (XEXP (addr, 0), 0);
270     }
271 
272   if (GET_CODE (addr) == LO_SUM)
273     addr = XEXP (addr, 1);
274 
275   /* If this is a constant pool reference, we can turn it into its
276      constant and hope that simplifications happen.  */
277   if (GET_CODE (addr) == SYMBOL_REF
278       && CONSTANT_POOL_ADDRESS_P (addr))
279     {
280       c = get_pool_constant (addr);
281       cmode = get_pool_mode (addr);
282 
283       /* If we're accessing the constant in a different mode than it was
284          originally stored, attempt to fix that up via subreg simplifications.
285          If that fails we have no choice but to return the original memory.  */
286       if (offset == 0 && cmode == GET_MODE (x))
287 	return c;
288       else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
289         {
290           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
291           if (tem && CONSTANT_P (tem))
292             return tem;
293         }
294     }
295 
296   return x;
297 }
298 
299 /* Simplify a MEM based on its attributes.  This is the default
300    delegitimize_address target hook, and it's recommended that every
301    overrider call it.  */
302 
303 rtx
304 delegitimize_mem_from_attrs (rtx x)
305 {
306   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
307      use their base addresses as equivalent.  */
308   if (MEM_P (x)
309       && MEM_EXPR (x)
310       && MEM_OFFSET_KNOWN_P (x))
311     {
312       tree decl = MEM_EXPR (x);
313       machine_mode mode = GET_MODE (x);
314       HOST_WIDE_INT offset = 0;
315 
316       switch (TREE_CODE (decl))
317 	{
318 	default:
319 	  decl = NULL;
320 	  break;
321 
322 	case VAR_DECL:
323 	  break;
324 
325 	case ARRAY_REF:
326 	case ARRAY_RANGE_REF:
327 	case COMPONENT_REF:
328 	case BIT_FIELD_REF:
329 	case REALPART_EXPR:
330 	case IMAGPART_EXPR:
331 	case VIEW_CONVERT_EXPR:
332 	  {
333 	    HOST_WIDE_INT bitsize, bitpos;
334 	    tree toffset;
335 	    int unsignedp, volatilep = 0;
336 
337 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
338 					&mode, &unsignedp, &volatilep, false);
339 	    if (bitsize != GET_MODE_BITSIZE (mode)
340 		|| (bitpos % BITS_PER_UNIT)
341 		|| (toffset && !tree_fits_shwi_p (toffset)))
342 	      decl = NULL;
343 	    else
344 	      {
345 		offset += bitpos / BITS_PER_UNIT;
346 		if (toffset)
347 		  offset += tree_to_shwi (toffset);
348 	      }
349 	    break;
350 	  }
351 	}
352 
353       if (decl
354 	  && mode == GET_MODE (x)
355 	  && TREE_CODE (decl) == VAR_DECL
356 	  && (TREE_STATIC (decl)
357 	      || DECL_THREAD_LOCAL_P (decl))
358 	  && DECL_RTL_SET_P (decl)
359 	  && MEM_P (DECL_RTL (decl)))
360 	{
361 	  rtx newx;
362 
363 	  offset += MEM_OFFSET (x);
364 
365 	  newx = DECL_RTL (decl);
366 
367 	  if (MEM_P (newx))
368 	    {
369 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
370 
371 	      /* Avoid creating a new MEM needlessly if we already had
372 		 the same address.  We do if there's no OFFSET and the
373 		 old address X is identical to NEWX, or if X is of the
374 		 form (plus NEWX OFFSET), or the NEWX is of the form
375 		 (plus Y (const_int Z)) and X is that with the offset
376 		 added: (plus Y (const_int Z+OFFSET)).  */
377 	      if (!((offset == 0
378 		     || (GET_CODE (o) == PLUS
379 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
380 			 && (offset == INTVAL (XEXP (o, 1))
381 			     || (GET_CODE (n) == PLUS
382 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
383 				 && (INTVAL (XEXP (n, 1)) + offset
384 				     == INTVAL (XEXP (o, 1)))
385 				 && (n = XEXP (n, 0))))
386 			 && (o = XEXP (o, 0))))
387 		    && rtx_equal_p (o, n)))
388 		x = adjust_address_nv (newx, mode, offset);
389 	    }
390 	  else if (GET_MODE (x) == GET_MODE (newx)
391 		   && offset == 0)
392 	    x = newx;
393 	}
394     }
395 
396   return x;
397 }
398 
399 /* Make a unary operation by first seeing if it folds and otherwise making
400    the specified operation.  */
401 
402 rtx
403 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
404 		    machine_mode op_mode)
405 {
406   rtx tem;
407 
408   /* If this simplifies, use it.  */
409   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
410     return tem;
411 
412   return gen_rtx_fmt_e (code, mode, op);
413 }
414 
415 /* Likewise for ternary operations.  */
416 
417 rtx
418 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
419 		      machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
420 {
421   rtx tem;
422 
423   /* If this simplifies, use it.  */
424   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
425 					      op0, op1, op2)))
426     return tem;
427 
428   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
429 }
430 
431 /* Likewise, for relational operations.
432    CMP_MODE specifies mode comparison is done in.  */
433 
434 rtx
435 simplify_gen_relational (enum rtx_code code, machine_mode mode,
436 			 machine_mode cmp_mode, rtx op0, rtx op1)
437 {
438   rtx tem;
439 
440   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
441 						 op0, op1)))
442     return tem;
443 
444   return gen_rtx_fmt_ee (code, mode, op0, op1);
445 }
446 
447 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
448    and simplify the result.  If FN is non-NULL, call this callback on each
449    X, if it returns non-NULL, replace X with its return value and simplify the
450    result.  */
451 
452 rtx
453 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
454 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
455 {
456   enum rtx_code code = GET_CODE (x);
457   machine_mode mode = GET_MODE (x);
458   machine_mode op_mode;
459   const char *fmt;
460   rtx op0, op1, op2, newx, op;
461   rtvec vec, newvec;
462   int i, j;
463 
464   if (__builtin_expect (fn != NULL, 0))
465     {
466       newx = fn (x, old_rtx, data);
467       if (newx)
468 	return newx;
469     }
470   else if (rtx_equal_p (x, old_rtx))
471     return copy_rtx ((rtx) data);
472 
473   switch (GET_RTX_CLASS (code))
474     {
475     case RTX_UNARY:
476       op0 = XEXP (x, 0);
477       op_mode = GET_MODE (op0);
478       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
479       if (op0 == XEXP (x, 0))
480 	return x;
481       return simplify_gen_unary (code, mode, op0, op_mode);
482 
483     case RTX_BIN_ARITH:
484     case RTX_COMM_ARITH:
485       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
486       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
488 	return x;
489       return simplify_gen_binary (code, mode, op0, op1);
490 
491     case RTX_COMPARE:
492     case RTX_COMM_COMPARE:
493       op0 = XEXP (x, 0);
494       op1 = XEXP (x, 1);
495       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
496       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
497       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
498       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
499 	return x;
500       return simplify_gen_relational (code, mode, op_mode, op0, op1);
501 
502     case RTX_TERNARY:
503     case RTX_BITFIELD_OPS:
504       op0 = XEXP (x, 0);
505       op_mode = GET_MODE (op0);
506       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
507       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
508       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
509       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
510 	return x;
511       if (op_mode == VOIDmode)
512 	op_mode = GET_MODE (op0);
513       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
514 
515     case RTX_EXTRA:
516       if (code == SUBREG)
517 	{
518 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
519 	  if (op0 == SUBREG_REG (x))
520 	    return x;
521 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
522 				     GET_MODE (SUBREG_REG (x)),
523 				     SUBREG_BYTE (x));
524 	  return op0 ? op0 : x;
525 	}
526       break;
527 
528     case RTX_OBJ:
529       if (code == MEM)
530 	{
531 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
532 	  if (op0 == XEXP (x, 0))
533 	    return x;
534 	  return replace_equiv_address_nv (x, op0);
535 	}
536       else if (code == LO_SUM)
537 	{
538 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
539 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
540 
541 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
542 	  if (GET_CODE (op0) == HIGH)
543 	    {
544 	      rtx base0, base1, offset0, offset1;
545 	      split_const (XEXP (op0, 0), &base0, &offset0);
546 	      split_const (op1, &base1, &offset1);
547 	      if (rtx_equal_p (base0, base1))
548 		return op1;
549 	    }
550 
551 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
552 	    return x;
553 	  return gen_rtx_LO_SUM (mode, op0, op1);
554 	}
555       break;
556 
557     default:
558       break;
559     }
560 
561   newx = x;
562   fmt = GET_RTX_FORMAT (code);
563   for (i = 0; fmt[i]; i++)
564     switch (fmt[i])
565       {
566       case 'E':
567 	vec = XVEC (x, i);
568 	newvec = XVEC (newx, i);
569 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
570 	  {
571 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
572 					  old_rtx, fn, data);
573 	    if (op != RTVEC_ELT (vec, j))
574 	      {
575 		if (newvec == vec)
576 		  {
577 		    newvec = shallow_copy_rtvec (vec);
578 		    if (x == newx)
579 		      newx = shallow_copy_rtx (x);
580 		    XVEC (newx, i) = newvec;
581 		  }
582 		RTVEC_ELT (newvec, j) = op;
583 	      }
584 	  }
585 	break;
586 
587       case 'e':
588 	if (XEXP (x, i))
589 	  {
590 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
591 	    if (op != XEXP (x, i))
592 	      {
593 		if (x == newx)
594 		  newx = shallow_copy_rtx (x);
595 		XEXP (newx, i) = op;
596 	      }
597 	  }
598 	break;
599       }
600   return newx;
601 }
602 
603 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
604    resulting RTX.  Return a new RTX which is as simplified as possible.  */
605 
606 rtx
607 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
608 {
609   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
610 }
611 
612 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
613    Only handle cases where the truncated value is inherently an rvalue.
614 
615    RTL provides two ways of truncating a value:
616 
617    1. a lowpart subreg.  This form is only a truncation when both
618       the outer and inner modes (here MODE and OP_MODE respectively)
619       are scalar integers, and only then when the subreg is used as
620       an rvalue.
621 
622       It is only valid to form such truncating subregs if the
623       truncation requires no action by the target.  The onus for
624       proving this is on the creator of the subreg -- e.g. the
625       caller to simplify_subreg or simplify_gen_subreg -- and typically
626       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
627 
628    2. a TRUNCATE.  This form handles both scalar and compound integers.
629 
630    The first form is preferred where valid.  However, the TRUNCATE
631    handling in simplify_unary_operation turns the second form into the
632    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
633    so it is generally safe to form rvalue truncations using:
634 
635       simplify_gen_unary (TRUNCATE, ...)
636 
637    and leave simplify_unary_operation to work out which representation
638    should be used.
639 
640    Because of the proof requirements on (1), simplify_truncation must
641    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
642    regardless of whether the outer truncation came from a SUBREG or a
643    TRUNCATE.  For example, if the caller has proven that an SImode
644    truncation of:
645 
646       (and:DI X Y)
647 
648    is a no-op and can be represented as a subreg, it does not follow
649    that SImode truncations of X and Y are also no-ops.  On a target
650    like 64-bit MIPS that requires SImode values to be stored in
651    sign-extended form, an SImode truncation of:
652 
653       (and:DI (reg:DI X) (const_int 63))
654 
655    is trivially a no-op because only the lower 6 bits can be set.
656    However, X is still an arbitrary 64-bit number and so we cannot
657    assume that truncating it too is a no-op.  */
658 
659 static rtx
660 simplify_truncation (machine_mode mode, rtx op,
661 		     machine_mode op_mode)
662 {
663   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
664   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
665   gcc_assert (precision <= op_precision);
666 
667   /* Optimize truncations of zero and sign extended values.  */
668   if (GET_CODE (op) == ZERO_EXTEND
669       || GET_CODE (op) == SIGN_EXTEND)
670     {
671       /* There are three possibilities.  If MODE is the same as the
672 	 origmode, we can omit both the extension and the subreg.
673 	 If MODE is not larger than the origmode, we can apply the
674 	 truncation without the extension.  Finally, if the outermode
675 	 is larger than the origmode, we can just extend to the appropriate
676 	 mode.  */
677       machine_mode origmode = GET_MODE (XEXP (op, 0));
678       if (mode == origmode)
679 	return XEXP (op, 0);
680       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
681 	return simplify_gen_unary (TRUNCATE, mode,
682 				   XEXP (op, 0), origmode);
683       else
684 	return simplify_gen_unary (GET_CODE (op), mode,
685 				   XEXP (op, 0), origmode);
686     }
687 
688   /* If the machine can perform operations in the truncated mode, distribute
689      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
690      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
691   if (1
692 #ifdef WORD_REGISTER_OPERATIONS
693       && precision >= BITS_PER_WORD
694 #endif
695       && (GET_CODE (op) == PLUS
696 	  || GET_CODE (op) == MINUS
697 	  || GET_CODE (op) == MULT))
698     {
699       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
700       if (op0)
701 	{
702 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
703 	  if (op1)
704 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
705 	}
706     }
707 
708   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
709      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
710      the outer subreg is effectively a truncation to the original mode.  */
711   if ((GET_CODE (op) == LSHIFTRT
712        || GET_CODE (op) == ASHIFTRT)
713       /* Ensure that OP_MODE is at least twice as wide as MODE
714 	 to avoid the possibility that an outer LSHIFTRT shifts by more
715 	 than the sign extension's sign_bit_copies and introduces zeros
716 	 into the high bits of the result.  */
717       && 2 * precision <= op_precision
718       && CONST_INT_P (XEXP (op, 1))
719       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
720       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
721       && UINTVAL (XEXP (op, 1)) < precision)
722     return simplify_gen_binary (ASHIFTRT, mode,
723 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
724 
725   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
726      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
727      the outer subreg is effectively a truncation to the original mode.  */
728   if ((GET_CODE (op) == LSHIFTRT
729        || GET_CODE (op) == ASHIFTRT)
730       && CONST_INT_P (XEXP (op, 1))
731       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
732       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
733       && UINTVAL (XEXP (op, 1)) < precision)
734     return simplify_gen_binary (LSHIFTRT, mode,
735 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
736 
737   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
738      to (ashift:QI (x:QI) C), where C is a suitable small constant and
739      the outer subreg is effectively a truncation to the original mode.  */
740   if (GET_CODE (op) == ASHIFT
741       && CONST_INT_P (XEXP (op, 1))
742       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
743 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
744       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
745       && UINTVAL (XEXP (op, 1)) < precision)
746     return simplify_gen_binary (ASHIFT, mode,
747 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
748 
749   /* Recognize a word extraction from a multi-word subreg.  */
750   if ((GET_CODE (op) == LSHIFTRT
751        || GET_CODE (op) == ASHIFTRT)
752       && SCALAR_INT_MODE_P (mode)
753       && SCALAR_INT_MODE_P (op_mode)
754       && precision >= BITS_PER_WORD
755       && 2 * precision <= op_precision
756       && CONST_INT_P (XEXP (op, 1))
757       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
758       && UINTVAL (XEXP (op, 1)) < op_precision)
759     {
760       int byte = subreg_lowpart_offset (mode, op_mode);
761       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
762       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
763 				  (WORDS_BIG_ENDIAN
764 				   ? byte - shifted_bytes
765 				   : byte + shifted_bytes));
766     }
767 
768   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
769      and try replacing the TRUNCATE and shift with it.  Don't do this
770      if the MEM has a mode-dependent address.  */
771   if ((GET_CODE (op) == LSHIFTRT
772        || GET_CODE (op) == ASHIFTRT)
773       && SCALAR_INT_MODE_P (op_mode)
774       && MEM_P (XEXP (op, 0))
775       && CONST_INT_P (XEXP (op, 1))
776       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
777       && INTVAL (XEXP (op, 1)) > 0
778       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
779       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
780 				     MEM_ADDR_SPACE (XEXP (op, 0)))
781       && ! MEM_VOLATILE_P (XEXP (op, 0))
782       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
783 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
784     {
785       int byte = subreg_lowpart_offset (mode, op_mode);
786       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
787       return adjust_address_nv (XEXP (op, 0), mode,
788 				(WORDS_BIG_ENDIAN
789 				 ? byte - shifted_bytes
790 				 : byte + shifted_bytes));
791     }
792 
793   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
794      (OP:SI foo:SI) if OP is NEG or ABS.  */
795   if ((GET_CODE (op) == ABS
796        || GET_CODE (op) == NEG)
797       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
798 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
799       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
800     return simplify_gen_unary (GET_CODE (op), mode,
801 			       XEXP (XEXP (op, 0), 0), mode);
802 
803   /* (truncate:A (subreg:B (truncate:C X) 0)) is
804      (truncate:A X).  */
805   if (GET_CODE (op) == SUBREG
806       && SCALAR_INT_MODE_P (mode)
807       && SCALAR_INT_MODE_P (op_mode)
808       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
809       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
810       && subreg_lowpart_p (op))
811     {
812       rtx inner = XEXP (SUBREG_REG (op), 0);
813       if (GET_MODE_PRECISION (mode)
814 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
815 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
816       else
817 	/* If subreg above is paradoxical and C is narrower
818 	   than A, return (subreg:A (truncate:C X) 0).  */
819 	return simplify_gen_subreg (mode, SUBREG_REG (op),
820 				    GET_MODE (SUBREG_REG (op)), 0);
821     }
822 
823   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
824   if (GET_CODE (op) == TRUNCATE)
825     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
826 			       GET_MODE (XEXP (op, 0)));
827 
828   return NULL_RTX;
829 }
830 
831 /* Try to simplify a unary operation CODE whose output mode is to be
832    MODE with input operand OP whose mode was originally OP_MODE.
833    Return zero if no simplification can be made.  */
834 rtx
835 simplify_unary_operation (enum rtx_code code, machine_mode mode,
836 			  rtx op, machine_mode op_mode)
837 {
838   rtx trueop, tem;
839 
840   trueop = avoid_constant_pool_reference (op);
841 
842   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
843   if (tem)
844     return tem;
845 
846   return simplify_unary_operation_1 (code, mode, op);
847 }
848 
849 /* Perform some simplifications we can do even if the operands
850    aren't constant.  */
851 static rtx
852 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
853 {
854   enum rtx_code reversed;
855   rtx temp;
856 
857   switch (code)
858     {
859     case NOT:
860       /* (not (not X)) == X.  */
861       if (GET_CODE (op) == NOT)
862 	return XEXP (op, 0);
863 
864       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
865 	 comparison is all ones.   */
866       if (COMPARISON_P (op)
867 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
868 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
869 	return simplify_gen_relational (reversed, mode, VOIDmode,
870 					XEXP (op, 0), XEXP (op, 1));
871 
872       /* (not (plus X -1)) can become (neg X).  */
873       if (GET_CODE (op) == PLUS
874 	  && XEXP (op, 1) == constm1_rtx)
875 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
876 
877       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
878 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
879 	 and MODE_VECTOR_INT.  */
880       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
881 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
882 				    CONSTM1_RTX (mode));
883 
884       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
885       if (GET_CODE (op) == XOR
886 	  && CONST_INT_P (XEXP (op, 1))
887 	  && (temp = simplify_unary_operation (NOT, mode,
888 					       XEXP (op, 1), mode)) != 0)
889 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
890 
891       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
892       if (GET_CODE (op) == PLUS
893 	  && CONST_INT_P (XEXP (op, 1))
894 	  && mode_signbit_p (mode, XEXP (op, 1))
895 	  && (temp = simplify_unary_operation (NOT, mode,
896 					       XEXP (op, 1), mode)) != 0)
897 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
898 
899 
900       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
901 	 operands other than 1, but that is not valid.  We could do a
902 	 similar simplification for (not (lshiftrt C X)) where C is
903 	 just the sign bit, but this doesn't seem common enough to
904 	 bother with.  */
905       if (GET_CODE (op) == ASHIFT
906 	  && XEXP (op, 0) == const1_rtx)
907 	{
908 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
909 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
910 	}
911 
912       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
913 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
914 	 so we can perform the above simplification.  */
915       if (STORE_FLAG_VALUE == -1
916 	  && GET_CODE (op) == ASHIFTRT
917 	  && CONST_INT_P (XEXP (op, 1))
918 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
919 	return simplify_gen_relational (GE, mode, VOIDmode,
920 					XEXP (op, 0), const0_rtx);
921 
922 
923       if (GET_CODE (op) == SUBREG
924 	  && subreg_lowpart_p (op)
925 	  && (GET_MODE_SIZE (GET_MODE (op))
926 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
927 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
928 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
929 	{
930 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
931 	  rtx x;
932 
933 	  x = gen_rtx_ROTATE (inner_mode,
934 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
935 						  inner_mode),
936 			      XEXP (SUBREG_REG (op), 1));
937 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
938 	  if (temp)
939 	    return temp;
940 	}
941 
942       /* Apply De Morgan's laws to reduce number of patterns for machines
943 	 with negating logical insns (and-not, nand, etc.).  If result has
944 	 only one NOT, put it first, since that is how the patterns are
945 	 coded.  */
946       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
947 	{
948 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
949 	  machine_mode op_mode;
950 
951 	  op_mode = GET_MODE (in1);
952 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
953 
954 	  op_mode = GET_MODE (in2);
955 	  if (op_mode == VOIDmode)
956 	    op_mode = mode;
957 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
958 
959 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
960 	    {
961 	      rtx tem = in2;
962 	      in2 = in1; in1 = tem;
963 	    }
964 
965 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
966 				 mode, in1, in2);
967 	}
968 
969       /* (not (bswap x)) -> (bswap (not x)).  */
970       if (GET_CODE (op) == BSWAP)
971 	{
972 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973 	  return simplify_gen_unary (BSWAP, mode, x, mode);
974 	}
975       break;
976 
977     case NEG:
978       /* (neg (neg X)) == X.  */
979       if (GET_CODE (op) == NEG)
980 	return XEXP (op, 0);
981 
982       /* (neg (plus X 1)) can become (not X).  */
983       if (GET_CODE (op) == PLUS
984 	  && XEXP (op, 1) == const1_rtx)
985 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
986 
987       /* Similarly, (neg (not X)) is (plus X 1).  */
988       if (GET_CODE (op) == NOT)
989 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
990 				    CONST1_RTX (mode));
991 
992       /* (neg (minus X Y)) can become (minus Y X).  This transformation
993 	 isn't safe for modes with signed zeros, since if X and Y are
994 	 both +0, (minus Y X) is the same as (minus X Y).  If the
995 	 rounding mode is towards +infinity (or -infinity) then the two
996 	 expressions will be rounded differently.  */
997       if (GET_CODE (op) == MINUS
998 	  && !HONOR_SIGNED_ZEROS (mode)
999 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1001 
1002       if (GET_CODE (op) == PLUS
1003 	  && !HONOR_SIGNED_ZEROS (mode)
1004 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1005 	{
1006 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1007 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1008 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1009 	    {
1010 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1011 	      if (temp)
1012 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1013 	    }
1014 
1015 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1016 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1017 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1018 	}
1019 
1020       /* (neg (mult A B)) becomes (mult A (neg B)).
1021 	 This works even for floating-point values.  */
1022       if (GET_CODE (op) == MULT
1023 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1024 	{
1025 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1026 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1027 	}
1028 
1029       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1030 	 this if we can then eliminate the NEG (e.g., if the operand
1031 	 is a constant).  */
1032       if (GET_CODE (op) == ASHIFT)
1033 	{
1034 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1035 	  if (temp)
1036 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1037 	}
1038 
1039       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1040 	 C is equal to the width of MODE minus 1.  */
1041       if (GET_CODE (op) == ASHIFTRT
1042 	  && CONST_INT_P (XEXP (op, 1))
1043 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1044 	return simplify_gen_binary (LSHIFTRT, mode,
1045 				    XEXP (op, 0), XEXP (op, 1));
1046 
1047       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1048 	 C is equal to the width of MODE minus 1.  */
1049       if (GET_CODE (op) == LSHIFTRT
1050 	  && CONST_INT_P (XEXP (op, 1))
1051 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1052 	return simplify_gen_binary (ASHIFTRT, mode,
1053 				    XEXP (op, 0), XEXP (op, 1));
1054 
1055       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1056       if (GET_CODE (op) == XOR
1057 	  && XEXP (op, 1) == const1_rtx
1058 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1059 	return plus_constant (mode, XEXP (op, 0), -1);
1060 
1061       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1062       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1063       if (GET_CODE (op) == LT
1064 	  && XEXP (op, 1) == const0_rtx
1065 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1066 	{
1067 	  machine_mode inner = GET_MODE (XEXP (op, 0));
1068 	  int isize = GET_MODE_PRECISION (inner);
1069 	  if (STORE_FLAG_VALUE == 1)
1070 	    {
1071 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1072 					  GEN_INT (isize - 1));
1073 	      if (mode == inner)
1074 		return temp;
1075 	      if (GET_MODE_PRECISION (mode) > isize)
1076 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1077 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1078 	    }
1079 	  else if (STORE_FLAG_VALUE == -1)
1080 	    {
1081 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1082 					  GEN_INT (isize - 1));
1083 	      if (mode == inner)
1084 		return temp;
1085 	      if (GET_MODE_PRECISION (mode) > isize)
1086 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1087 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1088 	    }
1089 	}
1090       break;
1091 
1092     case TRUNCATE:
1093       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1094 	 with the umulXi3_highpart patterns.  */
1095       if (GET_CODE (op) == LSHIFTRT
1096 	  && GET_CODE (XEXP (op, 0)) == MULT)
1097 	break;
1098 
1099       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1100 	{
1101 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1102 	    {
1103 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1104 	      if (temp)
1105 		return temp;
1106 	    }
1107 	  /* We can't handle truncation to a partial integer mode here
1108 	     because we don't know the real bitsize of the partial
1109 	     integer mode.  */
1110 	  break;
1111 	}
1112 
1113       if (GET_MODE (op) != VOIDmode)
1114 	{
1115 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1116 	  if (temp)
1117 	    return temp;
1118 	}
1119 
1120       /* If we know that the value is already truncated, we can
1121 	 replace the TRUNCATE with a SUBREG.  */
1122       if (GET_MODE_NUNITS (mode) == 1
1123 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1124 	      || truncated_to_mode (mode, op)))
1125 	{
1126 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1127 	  if (temp)
1128 	    return temp;
1129 	}
1130 
1131       /* A truncate of a comparison can be replaced with a subreg if
1132          STORE_FLAG_VALUE permits.  This is like the previous test,
1133          but it works even if the comparison is done in a mode larger
1134          than HOST_BITS_PER_WIDE_INT.  */
1135       if (HWI_COMPUTABLE_MODE_P (mode)
1136 	  && COMPARISON_P (op)
1137 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1138 	{
1139 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1140 	  if (temp)
1141 	    return temp;
1142 	}
1143 
1144       /* A truncate of a memory is just loading the low part of the memory
1145 	 if we are not changing the meaning of the address. */
1146       if (GET_CODE (op) == MEM
1147 	  && !VECTOR_MODE_P (mode)
1148 	  && !MEM_VOLATILE_P (op)
1149 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1150 	{
1151 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1152 	  if (temp)
1153 	    return temp;
1154 	}
1155 
1156       break;
1157 
1158     case FLOAT_TRUNCATE:
1159       if (DECIMAL_FLOAT_MODE_P (mode))
1160 	break;
1161 
1162       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1163       if (GET_CODE (op) == FLOAT_EXTEND
1164 	  && GET_MODE (XEXP (op, 0)) == mode)
1165 	return XEXP (op, 0);
1166 
1167       /* (float_truncate:SF (float_truncate:DF foo:XF))
1168          = (float_truncate:SF foo:XF).
1169 	 This may eliminate double rounding, so it is unsafe.
1170 
1171          (float_truncate:SF (float_extend:XF foo:DF))
1172          = (float_truncate:SF foo:DF).
1173 
1174          (float_truncate:DF (float_extend:XF foo:SF))
1175          = (float_extend:SF foo:DF).  */
1176       if ((GET_CODE (op) == FLOAT_TRUNCATE
1177 	   && flag_unsafe_math_optimizations)
1178 	  || GET_CODE (op) == FLOAT_EXTEND)
1179 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1180 							    0)))
1181 				   > GET_MODE_SIZE (mode)
1182 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1183 				   mode,
1184 				   XEXP (op, 0), mode);
1185 
1186       /*  (float_truncate (float x)) is (float x)  */
1187       if (GET_CODE (op) == FLOAT
1188 	  && (flag_unsafe_math_optimizations
1189 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1190 		  && ((unsigned)significand_size (GET_MODE (op))
1191 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1192 			  - num_sign_bit_copies (XEXP (op, 0),
1193 						 GET_MODE (XEXP (op, 0))))))))
1194 	return simplify_gen_unary (FLOAT, mode,
1195 				   XEXP (op, 0),
1196 				   GET_MODE (XEXP (op, 0)));
1197 
1198       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1199 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1200       if ((GET_CODE (op) == ABS
1201 	   || GET_CODE (op) == NEG)
1202 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1203 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1204 	return simplify_gen_unary (GET_CODE (op), mode,
1205 				   XEXP (XEXP (op, 0), 0), mode);
1206 
1207       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1208 	 is (float_truncate:SF x).  */
1209       if (GET_CODE (op) == SUBREG
1210 	  && subreg_lowpart_p (op)
1211 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1212 	return SUBREG_REG (op);
1213       break;
1214 
1215     case FLOAT_EXTEND:
1216       if (DECIMAL_FLOAT_MODE_P (mode))
1217 	break;
1218 
1219       /*  (float_extend (float_extend x)) is (float_extend x)
1220 
1221 	  (float_extend (float x)) is (float x) assuming that double
1222 	  rounding can't happen.
1223           */
1224       if (GET_CODE (op) == FLOAT_EXTEND
1225 	  || (GET_CODE (op) == FLOAT
1226 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1227 	      && ((unsigned)significand_size (GET_MODE (op))
1228 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1229 		      - num_sign_bit_copies (XEXP (op, 0),
1230 					     GET_MODE (XEXP (op, 0)))))))
1231 	return simplify_gen_unary (GET_CODE (op), mode,
1232 				   XEXP (op, 0),
1233 				   GET_MODE (XEXP (op, 0)));
1234 
1235       break;
1236 
1237     case ABS:
1238       /* (abs (neg <foo>)) -> (abs <foo>) */
1239       if (GET_CODE (op) == NEG)
1240 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1241 				   GET_MODE (XEXP (op, 0)));
1242 
1243       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244          do nothing.  */
1245       if (GET_MODE (op) == VOIDmode)
1246 	break;
1247 
1248       /* If operand is something known to be positive, ignore the ABS.  */
1249       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1250 	  || val_signbit_known_clear_p (GET_MODE (op),
1251 					nonzero_bits (op, GET_MODE (op))))
1252 	return op;
1253 
1254       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1255       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1256 	return gen_rtx_NEG (mode, op);
1257 
1258       break;
1259 
1260     case FFS:
1261       /* (ffs (*_extend <X>)) = (ffs <X>) */
1262       if (GET_CODE (op) == SIGN_EXTEND
1263 	  || GET_CODE (op) == ZERO_EXTEND)
1264 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1265 				   GET_MODE (XEXP (op, 0)));
1266       break;
1267 
1268     case POPCOUNT:
1269       switch (GET_CODE (op))
1270 	{
1271 	case BSWAP:
1272 	case ZERO_EXTEND:
1273 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1274 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1275 				     GET_MODE (XEXP (op, 0)));
1276 
1277 	case ROTATE:
1278 	case ROTATERT:
1279 	  /* Rotations don't affect popcount.  */
1280 	  if (!side_effects_p (XEXP (op, 1)))
1281 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1282 				       GET_MODE (XEXP (op, 0)));
1283 	  break;
1284 
1285 	default:
1286 	  break;
1287 	}
1288       break;
1289 
1290     case PARITY:
1291       switch (GET_CODE (op))
1292 	{
1293 	case NOT:
1294 	case BSWAP:
1295 	case ZERO_EXTEND:
1296 	case SIGN_EXTEND:
1297 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1298 				     GET_MODE (XEXP (op, 0)));
1299 
1300 	case ROTATE:
1301 	case ROTATERT:
1302 	  /* Rotations don't affect parity.  */
1303 	  if (!side_effects_p (XEXP (op, 1)))
1304 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1305 				       GET_MODE (XEXP (op, 0)));
1306 	  break;
1307 
1308 	default:
1309 	  break;
1310 	}
1311       break;
1312 
1313     case BSWAP:
1314       /* (bswap (bswap x)) -> x.  */
1315       if (GET_CODE (op) == BSWAP)
1316 	return XEXP (op, 0);
1317       break;
1318 
1319     case FLOAT:
1320       /* (float (sign_extend <X>)) = (float <X>).  */
1321       if (GET_CODE (op) == SIGN_EXTEND)
1322 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1323 				   GET_MODE (XEXP (op, 0)));
1324       break;
1325 
1326     case SIGN_EXTEND:
1327       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1328 	 becomes just the MINUS if its mode is MODE.  This allows
1329 	 folding switch statements on machines using casesi (such as
1330 	 the VAX).  */
1331       if (GET_CODE (op) == TRUNCATE
1332 	  && GET_MODE (XEXP (op, 0)) == mode
1333 	  && GET_CODE (XEXP (op, 0)) == MINUS
1334 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1335 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1336 	return XEXP (op, 0);
1337 
1338       /* Extending a widening multiplication should be canonicalized to
1339 	 a wider widening multiplication.  */
1340       if (GET_CODE (op) == MULT)
1341 	{
1342 	  rtx lhs = XEXP (op, 0);
1343 	  rtx rhs = XEXP (op, 1);
1344 	  enum rtx_code lcode = GET_CODE (lhs);
1345 	  enum rtx_code rcode = GET_CODE (rhs);
1346 
1347 	  /* Widening multiplies usually extend both operands, but sometimes
1348 	     they use a shift to extract a portion of a register.  */
1349 	  if ((lcode == SIGN_EXTEND
1350 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1351 	      && (rcode == SIGN_EXTEND
1352 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1353 	    {
1354 	      machine_mode lmode = GET_MODE (lhs);
1355 	      machine_mode rmode = GET_MODE (rhs);
1356 	      int bits;
1357 
1358 	      if (lcode == ASHIFTRT)
1359 		/* Number of bits not shifted off the end.  */
1360 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1361 	      else /* lcode == SIGN_EXTEND */
1362 		/* Size of inner mode.  */
1363 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1364 
1365 	      if (rcode == ASHIFTRT)
1366 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1367 	      else /* rcode == SIGN_EXTEND */
1368 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1369 
1370 	      /* We can only widen multiplies if the result is mathematiclly
1371 		 equivalent.  I.e. if overflow was impossible.  */
1372 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1373 		return simplify_gen_binary
1374 			 (MULT, mode,
1375 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1376 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1377 	    }
1378 	}
1379 
1380       /* Check for a sign extension of a subreg of a promoted
1381 	 variable, where the promotion is sign-extended, and the
1382 	 target mode is the same as the variable's promotion.  */
1383       if (GET_CODE (op) == SUBREG
1384 	  && SUBREG_PROMOTED_VAR_P (op)
1385 	  && SUBREG_PROMOTED_SIGNED_P (op)
1386 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1387 	{
1388 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1389 	  if (temp)
1390 	    return temp;
1391 	}
1392 
1393       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1394 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1395       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1396 	{
1397 	  gcc_assert (GET_MODE_PRECISION (mode)
1398 		      > GET_MODE_PRECISION (GET_MODE (op)));
1399 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1400 				     GET_MODE (XEXP (op, 0)));
1401 	}
1402 
1403       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1404 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1405 	 GET_MODE_BITSIZE (N) - I bits.
1406 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1407 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1408       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1409 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1410 	  && CONST_INT_P (XEXP (op, 1))
1411 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1412 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1413 	{
1414 	  machine_mode tmode
1415 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1416 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1417 	  gcc_assert (GET_MODE_BITSIZE (mode)
1418 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1419 	  if (tmode != BLKmode)
1420 	    {
1421 	      rtx inner =
1422 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1423 	      if (inner)
1424 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1425 					   ? SIGN_EXTEND : ZERO_EXTEND,
1426 					   mode, inner, tmode);
1427 	    }
1428 	}
1429 
1430 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1431       /* As we do not know which address space the pointer is referring to,
1432 	 we can do this only if the target does not support different pointer
1433 	 or address modes depending on the address space.  */
1434       if (target_default_pointer_address_modes_p ()
1435 	  && ! POINTERS_EXTEND_UNSIGNED
1436 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1437 	  && (CONSTANT_P (op)
1438 	      || (GET_CODE (op) == SUBREG
1439 		  && REG_P (SUBREG_REG (op))
1440 		  && REG_POINTER (SUBREG_REG (op))
1441 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1442 	{
1443 	  temp
1444 	    = convert_memory_address_addr_space_1 (Pmode, op,
1445 						   ADDR_SPACE_GENERIC, false,
1446 						   true);
1447 	  if (temp)
1448 	    return temp;
1449 	}
1450 #endif
1451       break;
1452 
1453     case ZERO_EXTEND:
1454       /* Check for a zero extension of a subreg of a promoted
1455 	 variable, where the promotion is zero-extended, and the
1456 	 target mode is the same as the variable's promotion.  */
1457       if (GET_CODE (op) == SUBREG
1458 	  && SUBREG_PROMOTED_VAR_P (op)
1459 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1460 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1461 	{
1462 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1463 	  if (temp)
1464 	    return temp;
1465 	}
1466 
1467       /* Extending a widening multiplication should be canonicalized to
1468 	 a wider widening multiplication.  */
1469       if (GET_CODE (op) == MULT)
1470 	{
1471 	  rtx lhs = XEXP (op, 0);
1472 	  rtx rhs = XEXP (op, 1);
1473 	  enum rtx_code lcode = GET_CODE (lhs);
1474 	  enum rtx_code rcode = GET_CODE (rhs);
1475 
1476 	  /* Widening multiplies usually extend both operands, but sometimes
1477 	     they use a shift to extract a portion of a register.  */
1478 	  if ((lcode == ZERO_EXTEND
1479 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1480 	      && (rcode == ZERO_EXTEND
1481 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1482 	    {
1483 	      machine_mode lmode = GET_MODE (lhs);
1484 	      machine_mode rmode = GET_MODE (rhs);
1485 	      int bits;
1486 
1487 	      if (lcode == LSHIFTRT)
1488 		/* Number of bits not shifted off the end.  */
1489 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1490 	      else /* lcode == ZERO_EXTEND */
1491 		/* Size of inner mode.  */
1492 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1493 
1494 	      if (rcode == LSHIFTRT)
1495 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1496 	      else /* rcode == ZERO_EXTEND */
1497 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1498 
1499 	      /* We can only widen multiplies if the result is mathematiclly
1500 		 equivalent.  I.e. if overflow was impossible.  */
1501 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1502 		return simplify_gen_binary
1503 			 (MULT, mode,
1504 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1505 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1506 	    }
1507 	}
1508 
1509       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1510       if (GET_CODE (op) == ZERO_EXTEND)
1511 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1512 				   GET_MODE (XEXP (op, 0)));
1513 
1514       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1515 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1516 	 GET_MODE_PRECISION (N) - I bits.  */
1517       if (GET_CODE (op) == LSHIFTRT
1518 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1519 	  && CONST_INT_P (XEXP (op, 1))
1520 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1521 	  && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1522 	{
1523 	  machine_mode tmode
1524 	    = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1525 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1526 	  if (tmode != BLKmode)
1527 	    {
1528 	      rtx inner =
1529 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1530 	      if (inner)
1531 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1532 	    }
1533 	}
1534 
1535       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1536 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1537 	 of mode N.  E.g.
1538 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1539 	 (and:SI (reg:SI) (const_int 63)).  */
1540       if (GET_CODE (op) == SUBREG
1541 	  && GET_MODE_PRECISION (GET_MODE (op))
1542 	     < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1543 	  && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1544 	     <= HOST_BITS_PER_WIDE_INT
1545 	  && GET_MODE_PRECISION (mode)
1546 	     >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1547 	  && subreg_lowpart_p (op)
1548 	  && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1549 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1550 	{
1551 	  if (GET_MODE_PRECISION (mode)
1552 	      == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1553 	    return SUBREG_REG (op);
1554 	  return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1555 				     GET_MODE (SUBREG_REG (op)));
1556 	}
1557 
1558 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1559       /* As we do not know which address space the pointer is referring to,
1560 	 we can do this only if the target does not support different pointer
1561 	 or address modes depending on the address space.  */
1562       if (target_default_pointer_address_modes_p ()
1563 	  && POINTERS_EXTEND_UNSIGNED > 0
1564 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1565 	  && (CONSTANT_P (op)
1566 	      || (GET_CODE (op) == SUBREG
1567 		  && REG_P (SUBREG_REG (op))
1568 		  && REG_POINTER (SUBREG_REG (op))
1569 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1570 	{
1571 	  temp
1572 	    = convert_memory_address_addr_space_1 (Pmode, op,
1573 						   ADDR_SPACE_GENERIC, false,
1574 						   true);
1575 	  if (temp)
1576 	    return temp;
1577 	}
1578 #endif
1579       break;
1580 
1581     default:
1582       break;
1583     }
1584 
1585   return 0;
1586 }
1587 
1588 /* Try to compute the value of a unary operation CODE whose output mode is to
1589    be MODE with input operand OP whose mode was originally OP_MODE.
1590    Return zero if the value cannot be computed.  */
1591 rtx
1592 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1593 				rtx op, machine_mode op_mode)
1594 {
1595   unsigned int width = GET_MODE_PRECISION (mode);
1596 
1597   if (code == VEC_DUPLICATE)
1598     {
1599       gcc_assert (VECTOR_MODE_P (mode));
1600       if (GET_MODE (op) != VOIDmode)
1601       {
1602 	if (!VECTOR_MODE_P (GET_MODE (op)))
1603 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1604 	else
1605 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1606 						(GET_MODE (op)));
1607       }
1608       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1609 	  || GET_CODE (op) == CONST_VECTOR)
1610 	{
1611           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1612           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1613 	  rtvec v = rtvec_alloc (n_elts);
1614 	  unsigned int i;
1615 
1616 	  if (GET_CODE (op) != CONST_VECTOR)
1617 	    for (i = 0; i < n_elts; i++)
1618 	      RTVEC_ELT (v, i) = op;
1619 	  else
1620 	    {
1621 	      machine_mode inmode = GET_MODE (op);
1622               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1623               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1624 
1625 	      gcc_assert (in_n_elts < n_elts);
1626 	      gcc_assert ((n_elts % in_n_elts) == 0);
1627 	      for (i = 0; i < n_elts; i++)
1628 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1629 	    }
1630 	  return gen_rtx_CONST_VECTOR (mode, v);
1631 	}
1632     }
1633 
1634   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1635     {
1636       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1637       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1638       machine_mode opmode = GET_MODE (op);
1639       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1640       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1641       rtvec v = rtvec_alloc (n_elts);
1642       unsigned int i;
1643 
1644       gcc_assert (op_n_elts == n_elts);
1645       for (i = 0; i < n_elts; i++)
1646 	{
1647 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1648 					    CONST_VECTOR_ELT (op, i),
1649 					    GET_MODE_INNER (opmode));
1650 	  if (!x)
1651 	    return 0;
1652 	  RTVEC_ELT (v, i) = x;
1653 	}
1654       return gen_rtx_CONST_VECTOR (mode, v);
1655     }
1656 
1657   /* The order of these tests is critical so that, for example, we don't
1658      check the wrong mode (input vs. output) for a conversion operation,
1659      such as FIX.  At some point, this should be simplified.  */
1660 
1661   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1662     {
1663       REAL_VALUE_TYPE d;
1664 
1665       if (op_mode == VOIDmode)
1666 	{
1667 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1668 	     the bits of the constant are significant, though, this is
1669 	     a dangerous assumption as many times CONST_INTs are
1670 	     created and used with garbage in the bits outside of the
1671 	     precision of the implied mode of the const_int.  */
1672 	  op_mode = MAX_MODE_INT;
1673 	}
1674 
1675       real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1676       d = real_value_truncate (mode, d);
1677       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1678     }
1679   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1680     {
1681       REAL_VALUE_TYPE d;
1682 
1683       if (op_mode == VOIDmode)
1684 	{
1685 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1686 	     the bits of the constant are significant, though, this is
1687 	     a dangerous assumption as many times CONST_INTs are
1688 	     created and used with garbage in the bits outside of the
1689 	     precision of the implied mode of the const_int.  */
1690 	  op_mode = MAX_MODE_INT;
1691 	}
1692 
1693       real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1694       d = real_value_truncate (mode, d);
1695       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1696     }
1697 
1698   if (CONST_SCALAR_INT_P (op) && width > 0)
1699     {
1700       wide_int result;
1701       machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1702       rtx_mode_t op0 = std::make_pair (op, imode);
1703       int int_value;
1704 
1705 #if TARGET_SUPPORTS_WIDE_INT == 0
1706       /* This assert keeps the simplification from producing a result
1707 	 that cannot be represented in a CONST_DOUBLE but a lot of
1708 	 upstream callers expect that this function never fails to
1709 	 simplify something and so you if you added this to the test
1710 	 above the code would die later anyway.  If this assert
1711 	 happens, you just need to make the port support wide int.  */
1712       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1713 #endif
1714 
1715       switch (code)
1716 	{
1717 	case NOT:
1718 	  result = wi::bit_not (op0);
1719 	  break;
1720 
1721 	case NEG:
1722 	  result = wi::neg (op0);
1723 	  break;
1724 
1725 	case ABS:
1726 	  result = wi::abs (op0);
1727 	  break;
1728 
1729 	case FFS:
1730 	  result = wi::shwi (wi::ffs (op0), mode);
1731 	  break;
1732 
1733 	case CLZ:
1734 	  if (wi::ne_p (op0, 0))
1735 	    int_value = wi::clz (op0);
1736 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1737 	    int_value = GET_MODE_PRECISION (mode);
1738 	  result = wi::shwi (int_value, mode);
1739 	  break;
1740 
1741 	case CLRSB:
1742 	  result = wi::shwi (wi::clrsb (op0), mode);
1743 	  break;
1744 
1745 	case CTZ:
1746 	  if (wi::ne_p (op0, 0))
1747 	    int_value = wi::ctz (op0);
1748 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1749 	    int_value = GET_MODE_PRECISION (mode);
1750 	  result = wi::shwi (int_value, mode);
1751 	  break;
1752 
1753 	case POPCOUNT:
1754 	  result = wi::shwi (wi::popcount (op0), mode);
1755 	  break;
1756 
1757 	case PARITY:
1758 	  result = wi::shwi (wi::parity (op0), mode);
1759 	  break;
1760 
1761 	case BSWAP:
1762 	  result = wide_int (op0).bswap ();
1763 	  break;
1764 
1765 	case TRUNCATE:
1766 	case ZERO_EXTEND:
1767 	  result = wide_int::from (op0, width, UNSIGNED);
1768 	  break;
1769 
1770 	case SIGN_EXTEND:
1771 	  result = wide_int::from (op0, width, SIGNED);
1772 	  break;
1773 
1774 	case SQRT:
1775 	default:
1776 	  return 0;
1777 	}
1778 
1779       return immed_wide_int_const (result, mode);
1780     }
1781 
1782   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1783 	   && SCALAR_FLOAT_MODE_P (mode)
1784 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1785     {
1786       REAL_VALUE_TYPE d;
1787       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1788 
1789       switch (code)
1790 	{
1791 	case SQRT:
1792 	  return 0;
1793 	case ABS:
1794 	  d = real_value_abs (&d);
1795 	  break;
1796 	case NEG:
1797 	  d = real_value_negate (&d);
1798 	  break;
1799 	case FLOAT_TRUNCATE:
1800 	  d = real_value_truncate (mode, d);
1801 	  break;
1802 	case FLOAT_EXTEND:
1803 	  /* All this does is change the mode, unless changing
1804 	     mode class.  */
1805 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1806 	    real_convert (&d, mode, &d);
1807 	  break;
1808 	case FIX:
1809 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1810 	  break;
1811 	case NOT:
1812 	  {
1813 	    long tmp[4];
1814 	    int i;
1815 
1816 	    real_to_target (tmp, &d, GET_MODE (op));
1817 	    for (i = 0; i < 4; i++)
1818 	      tmp[i] = ~tmp[i];
1819 	    real_from_target (&d, tmp, mode);
1820 	    break;
1821 	  }
1822 	default:
1823 	  gcc_unreachable ();
1824 	}
1825       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1826     }
1827   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1828 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1829 	   && GET_MODE_CLASS (mode) == MODE_INT
1830 	   && width > 0)
1831     {
1832       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1833 	 operators are intentionally left unspecified (to ease implementation
1834 	 by target backends), for consistency, this routine implements the
1835 	 same semantics for constant folding as used by the middle-end.  */
1836 
1837       /* This was formerly used only for non-IEEE float.
1838 	 eggert@twinsun.com says it is safe for IEEE also.  */
1839       REAL_VALUE_TYPE x, t;
1840       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1841       wide_int wmax, wmin;
1842       /* This is part of the abi to real_to_integer, but we check
1843 	 things before making this call.  */
1844       bool fail;
1845 
1846       switch (code)
1847 	{
1848 	case FIX:
1849 	  if (REAL_VALUE_ISNAN (x))
1850 	    return const0_rtx;
1851 
1852 	  /* Test against the signed upper bound.  */
1853 	  wmax = wi::max_value (width, SIGNED);
1854 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
1855 	  if (REAL_VALUES_LESS (t, x))
1856 	    return immed_wide_int_const (wmax, mode);
1857 
1858 	  /* Test against the signed lower bound.  */
1859 	  wmin = wi::min_value (width, SIGNED);
1860 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
1861 	  if (REAL_VALUES_LESS (x, t))
1862 	    return immed_wide_int_const (wmin, mode);
1863 
1864 	  return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1865 	  break;
1866 
1867 	case UNSIGNED_FIX:
1868 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1869 	    return const0_rtx;
1870 
1871 	  /* Test against the unsigned upper bound.  */
1872 	  wmax = wi::max_value (width, UNSIGNED);
1873 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1874 	  if (REAL_VALUES_LESS (t, x))
1875 	    return immed_wide_int_const (wmax, mode);
1876 
1877 	  return immed_wide_int_const (real_to_integer (&x, &fail, width),
1878 				       mode);
1879 	  break;
1880 
1881 	default:
1882 	  gcc_unreachable ();
1883 	}
1884     }
1885 
1886   return NULL_RTX;
1887 }
1888 
1889 /* Subroutine of simplify_binary_operation to simplify a binary operation
1890    CODE that can commute with byte swapping, with result mode MODE and
1891    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
1892    Return zero if no simplification or canonicalization is possible.  */
1893 
1894 static rtx
1895 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1896 				  rtx op0, rtx op1)
1897 {
1898   rtx tem;
1899 
1900   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
1901   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1902     {
1903       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1904 				 simplify_gen_unary (BSWAP, mode, op1, mode));
1905       return simplify_gen_unary (BSWAP, mode, tem, mode);
1906     }
1907 
1908   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
1909   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1910     {
1911       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1912       return simplify_gen_unary (BSWAP, mode, tem, mode);
1913     }
1914 
1915   return NULL_RTX;
1916 }
1917 
1918 /* Subroutine of simplify_binary_operation to simplify a commutative,
1919    associative binary operation CODE with result mode MODE, operating
1920    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1921    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1922    canonicalization is possible.  */
1923 
1924 static rtx
1925 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1926 				rtx op0, rtx op1)
1927 {
1928   rtx tem;
1929 
1930   /* Linearize the operator to the left.  */
1931   if (GET_CODE (op1) == code)
1932     {
1933       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1934       if (GET_CODE (op0) == code)
1935 	{
1936 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1937 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1938 	}
1939 
1940       /* "a op (b op c)" becomes "(b op c) op a".  */
1941       if (! swap_commutative_operands_p (op1, op0))
1942 	return simplify_gen_binary (code, mode, op1, op0);
1943 
1944       tem = op0;
1945       op0 = op1;
1946       op1 = tem;
1947     }
1948 
1949   if (GET_CODE (op0) == code)
1950     {
1951       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1952       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1953 	{
1954 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1955 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1956 	}
1957 
1958       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1959       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1960       if (tem != 0)
1961         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1962 
1963       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1964       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1965       if (tem != 0)
1966         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1967     }
1968 
1969   return 0;
1970 }
1971 
1972 
1973 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1974    and OP1.  Return 0 if no simplification is possible.
1975 
1976    Don't use this for relational operations such as EQ or LT.
1977    Use simplify_relational_operation instead.  */
1978 rtx
1979 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1980 			   rtx op0, rtx op1)
1981 {
1982   rtx trueop0, trueop1;
1983   rtx tem;
1984 
1985   /* Relational operations don't work here.  We must know the mode
1986      of the operands in order to do the comparison correctly.
1987      Assuming a full word can give incorrect results.
1988      Consider comparing 128 with -128 in QImode.  */
1989   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1990   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1991 
1992   /* Make sure the constant is second.  */
1993   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1994       && swap_commutative_operands_p (op0, op1))
1995     {
1996       tem = op0, op0 = op1, op1 = tem;
1997     }
1998 
1999   trueop0 = avoid_constant_pool_reference (op0);
2000   trueop1 = avoid_constant_pool_reference (op1);
2001 
2002   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2003   if (tem)
2004     return tem;
2005   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2006 }
2007 
2008 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2009    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2010    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2011    actual constants.  */
2012 
2013 static rtx
2014 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2015 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2016 {
2017   rtx tem, reversed, opleft, opright;
2018   HOST_WIDE_INT val;
2019   unsigned int width = GET_MODE_PRECISION (mode);
2020 
2021   /* Even if we can't compute a constant result,
2022      there are some cases worth simplifying.  */
2023 
2024   switch (code)
2025     {
2026     case PLUS:
2027       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2028 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2029 	 when x is -0 and the rounding mode is not towards -infinity,
2030 	 since (-0) + 0 is then 0.  */
2031       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2032 	return op0;
2033 
2034       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2035 	 transformations are safe even for IEEE.  */
2036       if (GET_CODE (op0) == NEG)
2037 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2038       else if (GET_CODE (op1) == NEG)
2039 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2040 
2041       /* (~a) + 1 -> -a */
2042       if (INTEGRAL_MODE_P (mode)
2043 	  && GET_CODE (op0) == NOT
2044 	  && trueop1 == const1_rtx)
2045 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2046 
2047       /* Handle both-operands-constant cases.  We can only add
2048 	 CONST_INTs to constants since the sum of relocatable symbols
2049 	 can't be handled by most assemblers.  Don't add CONST_INT
2050 	 to CONST_INT since overflow won't be computed properly if wider
2051 	 than HOST_BITS_PER_WIDE_INT.  */
2052 
2053       if ((GET_CODE (op0) == CONST
2054 	   || GET_CODE (op0) == SYMBOL_REF
2055 	   || GET_CODE (op0) == LABEL_REF)
2056 	  && CONST_INT_P (op1))
2057 	return plus_constant (mode, op0, INTVAL (op1));
2058       else if ((GET_CODE (op1) == CONST
2059 		|| GET_CODE (op1) == SYMBOL_REF
2060 		|| GET_CODE (op1) == LABEL_REF)
2061 	       && CONST_INT_P (op0))
2062 	return plus_constant (mode, op1, INTVAL (op0));
2063 
2064       /* See if this is something like X * C - X or vice versa or
2065 	 if the multiplication is written as a shift.  If so, we can
2066 	 distribute and make a new multiply, shift, or maybe just
2067 	 have X (if C is 2 in the example above).  But don't make
2068 	 something more expensive than we had before.  */
2069 
2070       if (SCALAR_INT_MODE_P (mode))
2071 	{
2072 	  rtx lhs = op0, rhs = op1;
2073 
2074 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2075 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2076 
2077 	  if (GET_CODE (lhs) == NEG)
2078 	    {
2079 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2080 	      lhs = XEXP (lhs, 0);
2081 	    }
2082 	  else if (GET_CODE (lhs) == MULT
2083 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2084 	    {
2085 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2086 	      lhs = XEXP (lhs, 0);
2087 	    }
2088 	  else if (GET_CODE (lhs) == ASHIFT
2089 		   && CONST_INT_P (XEXP (lhs, 1))
2090                    && INTVAL (XEXP (lhs, 1)) >= 0
2091 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2092 	    {
2093 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2094 					    GET_MODE_PRECISION (mode));
2095 	      lhs = XEXP (lhs, 0);
2096 	    }
2097 
2098 	  if (GET_CODE (rhs) == NEG)
2099 	    {
2100 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2101 	      rhs = XEXP (rhs, 0);
2102 	    }
2103 	  else if (GET_CODE (rhs) == MULT
2104 		   && CONST_INT_P (XEXP (rhs, 1)))
2105 	    {
2106 	      coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2107 	      rhs = XEXP (rhs, 0);
2108 	    }
2109 	  else if (GET_CODE (rhs) == ASHIFT
2110 		   && CONST_INT_P (XEXP (rhs, 1))
2111 		   && INTVAL (XEXP (rhs, 1)) >= 0
2112 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2113 	    {
2114 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2115 					    GET_MODE_PRECISION (mode));
2116 	      rhs = XEXP (rhs, 0);
2117 	    }
2118 
2119 	  if (rtx_equal_p (lhs, rhs))
2120 	    {
2121 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2122 	      rtx coeff;
2123 	      bool speed = optimize_function_for_speed_p (cfun);
2124 
2125 	      coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2126 
2127 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2128 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2129 		? tem : 0;
2130 	    }
2131 	}
2132 
2133       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2134       if (CONST_SCALAR_INT_P (op1)
2135 	  && GET_CODE (op0) == XOR
2136 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2137 	  && mode_signbit_p (mode, op1))
2138 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2139 				    simplify_gen_binary (XOR, mode, op1,
2140 							 XEXP (op0, 1)));
2141 
2142       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2143       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2144 	  && GET_CODE (op0) == MULT
2145 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2146 	{
2147 	  rtx in1, in2;
2148 
2149 	  in1 = XEXP (XEXP (op0, 0), 0);
2150 	  in2 = XEXP (op0, 1);
2151 	  return simplify_gen_binary (MINUS, mode, op1,
2152 				      simplify_gen_binary (MULT, mode,
2153 							   in1, in2));
2154 	}
2155 
2156       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2157 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2158 	 is 1.  */
2159       if (COMPARISON_P (op0)
2160 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2161 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2162 	  && (reversed = reversed_comparison (op0, mode)))
2163 	return
2164 	  simplify_gen_unary (NEG, mode, reversed, mode);
2165 
2166       /* If one of the operands is a PLUS or a MINUS, see if we can
2167 	 simplify this by the associative law.
2168 	 Don't use the associative law for floating point.
2169 	 The inaccuracy makes it nonassociative,
2170 	 and subtle programs can break if operations are associated.  */
2171 
2172       if (INTEGRAL_MODE_P (mode)
2173 	  && (plus_minus_operand_p (op0)
2174 	      || plus_minus_operand_p (op1))
2175 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2176 	return tem;
2177 
2178       /* Reassociate floating point addition only when the user
2179 	 specifies associative math operations.  */
2180       if (FLOAT_MODE_P (mode)
2181 	  && flag_associative_math)
2182 	{
2183 	  tem = simplify_associative_operation (code, mode, op0, op1);
2184 	  if (tem)
2185 	    return tem;
2186 	}
2187       break;
2188 
2189     case COMPARE:
2190       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2191       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2192 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2193 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2194 	{
2195 	  rtx xop00 = XEXP (op0, 0);
2196 	  rtx xop10 = XEXP (op1, 0);
2197 
2198 #ifdef HAVE_cc0
2199 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2200 #else
2201 	    if (REG_P (xop00) && REG_P (xop10)
2202 		&& GET_MODE (xop00) == GET_MODE (xop10)
2203 		&& REGNO (xop00) == REGNO (xop10)
2204 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2205 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2206 #endif
2207 	      return xop00;
2208 	}
2209       break;
2210 
2211     case MINUS:
2212       /* We can't assume x-x is 0 even with non-IEEE floating point,
2213 	 but since it is zero except in very strange circumstances, we
2214 	 will treat it as zero with -ffinite-math-only.  */
2215       if (rtx_equal_p (trueop0, trueop1)
2216 	  && ! side_effects_p (op0)
2217 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2218 	return CONST0_RTX (mode);
2219 
2220       /* Change subtraction from zero into negation.  (0 - x) is the
2221 	 same as -x when x is NaN, infinite, or finite and nonzero.
2222 	 But if the mode has signed zeros, and does not round towards
2223 	 -infinity, then 0 - 0 is 0, not -0.  */
2224       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2225 	return simplify_gen_unary (NEG, mode, op1, mode);
2226 
2227       /* (-1 - a) is ~a.  */
2228       if (trueop0 == constm1_rtx)
2229 	return simplify_gen_unary (NOT, mode, op1, mode);
2230 
2231       /* Subtracting 0 has no effect unless the mode has signed zeros
2232 	 and supports rounding towards -infinity.  In such a case,
2233 	 0 - 0 is -0.  */
2234       if (!(HONOR_SIGNED_ZEROS (mode)
2235 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2236 	  && trueop1 == CONST0_RTX (mode))
2237 	return op0;
2238 
2239       /* See if this is something like X * C - X or vice versa or
2240 	 if the multiplication is written as a shift.  If so, we can
2241 	 distribute and make a new multiply, shift, or maybe just
2242 	 have X (if C is 2 in the example above).  But don't make
2243 	 something more expensive than we had before.  */
2244 
2245       if (SCALAR_INT_MODE_P (mode))
2246 	{
2247 	  rtx lhs = op0, rhs = op1;
2248 
2249 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2250 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2251 
2252 	  if (GET_CODE (lhs) == NEG)
2253 	    {
2254 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2255 	      lhs = XEXP (lhs, 0);
2256 	    }
2257 	  else if (GET_CODE (lhs) == MULT
2258 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2259 	    {
2260 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2261 	      lhs = XEXP (lhs, 0);
2262 	    }
2263 	  else if (GET_CODE (lhs) == ASHIFT
2264 		   && CONST_INT_P (XEXP (lhs, 1))
2265 		   && INTVAL (XEXP (lhs, 1)) >= 0
2266 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2267 	    {
2268 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2269 					    GET_MODE_PRECISION (mode));
2270 	      lhs = XEXP (lhs, 0);
2271 	    }
2272 
2273 	  if (GET_CODE (rhs) == NEG)
2274 	    {
2275 	      negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2276 	      rhs = XEXP (rhs, 0);
2277 	    }
2278 	  else if (GET_CODE (rhs) == MULT
2279 		   && CONST_INT_P (XEXP (rhs, 1)))
2280 	    {
2281 	      negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2282 	      rhs = XEXP (rhs, 0);
2283 	    }
2284 	  else if (GET_CODE (rhs) == ASHIFT
2285 		   && CONST_INT_P (XEXP (rhs, 1))
2286 		   && INTVAL (XEXP (rhs, 1)) >= 0
2287 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2288 	    {
2289 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2290 					       GET_MODE_PRECISION (mode));
2291 	      negcoeff1 = -negcoeff1;
2292 	      rhs = XEXP (rhs, 0);
2293 	    }
2294 
2295 	  if (rtx_equal_p (lhs, rhs))
2296 	    {
2297 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2298 	      rtx coeff;
2299 	      bool speed = optimize_function_for_speed_p (cfun);
2300 
2301 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2302 
2303 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2304 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2305 		? tem : 0;
2306 	    }
2307 	}
2308 
2309       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2310       if (GET_CODE (op1) == NEG)
2311 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2312 
2313       /* (-x - c) may be simplified as (-c - x).  */
2314       if (GET_CODE (op0) == NEG
2315 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2316 	{
2317 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2318 	  if (tem)
2319 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2320 	}
2321 
2322       /* Don't let a relocatable value get a negative coeff.  */
2323       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2324 	return simplify_gen_binary (PLUS, mode,
2325 				    op0,
2326 				    neg_const_int (mode, op1));
2327 
2328       /* (x - (x & y)) -> (x & ~y) */
2329       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2330 	{
2331 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2332 	    {
2333 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2334 					GET_MODE (XEXP (op1, 1)));
2335 	      return simplify_gen_binary (AND, mode, op0, tem);
2336 	    }
2337 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2338 	    {
2339 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2340 					GET_MODE (XEXP (op1, 0)));
2341 	      return simplify_gen_binary (AND, mode, op0, tem);
2342 	    }
2343 	}
2344 
2345       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2346 	 by reversing the comparison code if valid.  */
2347       if (STORE_FLAG_VALUE == 1
2348 	  && trueop0 == const1_rtx
2349 	  && COMPARISON_P (op1)
2350 	  && (reversed = reversed_comparison (op1, mode)))
2351 	return reversed;
2352 
2353       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2354       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2355 	  && GET_CODE (op1) == MULT
2356 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2357 	{
2358 	  rtx in1, in2;
2359 
2360 	  in1 = XEXP (XEXP (op1, 0), 0);
2361 	  in2 = XEXP (op1, 1);
2362 	  return simplify_gen_binary (PLUS, mode,
2363 				      simplify_gen_binary (MULT, mode,
2364 							   in1, in2),
2365 				      op0);
2366 	}
2367 
2368       /* Canonicalize (minus (neg A) (mult B C)) to
2369 	 (minus (mult (neg B) C) A).  */
2370       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2371 	  && GET_CODE (op1) == MULT
2372 	  && GET_CODE (op0) == NEG)
2373 	{
2374 	  rtx in1, in2;
2375 
2376 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2377 	  in2 = XEXP (op1, 1);
2378 	  return simplify_gen_binary (MINUS, mode,
2379 				      simplify_gen_binary (MULT, mode,
2380 							   in1, in2),
2381 				      XEXP (op0, 0));
2382 	}
2383 
2384       /* If one of the operands is a PLUS or a MINUS, see if we can
2385 	 simplify this by the associative law.  This will, for example,
2386          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2387 	 Don't use the associative law for floating point.
2388 	 The inaccuracy makes it nonassociative,
2389 	 and subtle programs can break if operations are associated.  */
2390 
2391       if (INTEGRAL_MODE_P (mode)
2392 	  && (plus_minus_operand_p (op0)
2393 	      || plus_minus_operand_p (op1))
2394 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2395 	return tem;
2396       break;
2397 
2398     case MULT:
2399       if (trueop1 == constm1_rtx)
2400 	return simplify_gen_unary (NEG, mode, op0, mode);
2401 
2402       if (GET_CODE (op0) == NEG)
2403 	{
2404 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2405 	  /* If op1 is a MULT as well and simplify_unary_operation
2406 	     just moved the NEG to the second operand, simplify_gen_binary
2407 	     below could through simplify_associative_operation move
2408 	     the NEG around again and recurse endlessly.  */
2409 	  if (temp
2410 	      && GET_CODE (op1) == MULT
2411 	      && GET_CODE (temp) == MULT
2412 	      && XEXP (op1, 0) == XEXP (temp, 0)
2413 	      && GET_CODE (XEXP (temp, 1)) == NEG
2414 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2415 	    temp = NULL_RTX;
2416 	  if (temp)
2417 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2418 	}
2419       if (GET_CODE (op1) == NEG)
2420 	{
2421 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2422 	  /* If op0 is a MULT as well and simplify_unary_operation
2423 	     just moved the NEG to the second operand, simplify_gen_binary
2424 	     below could through simplify_associative_operation move
2425 	     the NEG around again and recurse endlessly.  */
2426 	  if (temp
2427 	      && GET_CODE (op0) == MULT
2428 	      && GET_CODE (temp) == MULT
2429 	      && XEXP (op0, 0) == XEXP (temp, 0)
2430 	      && GET_CODE (XEXP (temp, 1)) == NEG
2431 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2432 	    temp = NULL_RTX;
2433 	  if (temp)
2434 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2435 	}
2436 
2437       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2438 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2439 	 when the mode has signed zeros, since multiplying a negative
2440 	 number by 0 will give -0, not 0.  */
2441       if (!HONOR_NANS (mode)
2442 	  && !HONOR_SIGNED_ZEROS (mode)
2443 	  && trueop1 == CONST0_RTX (mode)
2444 	  && ! side_effects_p (op0))
2445 	return op1;
2446 
2447       /* In IEEE floating point, x*1 is not equivalent to x for
2448 	 signalling NaNs.  */
2449       if (!HONOR_SNANS (mode)
2450 	  && trueop1 == CONST1_RTX (mode))
2451 	return op0;
2452 
2453       /* Convert multiply by constant power of two into shift.  */
2454       if (CONST_SCALAR_INT_P (trueop1))
2455 	{
2456 	  val = wi::exact_log2 (std::make_pair (trueop1, mode));
2457 	  if (val >= 0)
2458 	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2459 	}
2460 
2461       /* x*2 is x+x and x*(-1) is -x */
2462       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2463 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2464 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2465 	  && GET_MODE (op0) == mode)
2466 	{
2467 	  REAL_VALUE_TYPE d;
2468 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2469 
2470 	  if (REAL_VALUES_EQUAL (d, dconst2))
2471 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2472 
2473 	  if (!HONOR_SNANS (mode)
2474 	      && REAL_VALUES_EQUAL (d, dconstm1))
2475 	    return simplify_gen_unary (NEG, mode, op0, mode);
2476 	}
2477 
2478       /* Optimize -x * -x as x * x.  */
2479       if (FLOAT_MODE_P (mode)
2480 	  && GET_CODE (op0) == NEG
2481 	  && GET_CODE (op1) == NEG
2482 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2483 	  && !side_effects_p (XEXP (op0, 0)))
2484 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2485 
2486       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2487       if (SCALAR_FLOAT_MODE_P (mode)
2488 	  && GET_CODE (op0) == ABS
2489 	  && GET_CODE (op1) == ABS
2490 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2491 	  && !side_effects_p (XEXP (op0, 0)))
2492 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2493 
2494       /* Reassociate multiplication, but for floating point MULTs
2495 	 only when the user specifies unsafe math optimizations.  */
2496       if (! FLOAT_MODE_P (mode)
2497 	  || flag_unsafe_math_optimizations)
2498 	{
2499 	  tem = simplify_associative_operation (code, mode, op0, op1);
2500 	  if (tem)
2501 	    return tem;
2502 	}
2503       break;
2504 
2505     case IOR:
2506       if (trueop1 == CONST0_RTX (mode))
2507 	return op0;
2508       if (INTEGRAL_MODE_P (mode)
2509 	  && trueop1 == CONSTM1_RTX (mode)
2510 	  && !side_effects_p (op0))
2511 	return op1;
2512       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2513 	return op0;
2514       /* A | (~A) -> -1 */
2515       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2516 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2517 	  && ! side_effects_p (op0)
2518 	  && SCALAR_INT_MODE_P (mode))
2519 	return constm1_rtx;
2520 
2521       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2522       if (CONST_INT_P (op1)
2523 	  && HWI_COMPUTABLE_MODE_P (mode)
2524 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2525 	  && !side_effects_p (op0))
2526 	return op1;
2527 
2528       /* Canonicalize (X & C1) | C2.  */
2529       if (GET_CODE (op0) == AND
2530 	  && CONST_INT_P (trueop1)
2531 	  && CONST_INT_P (XEXP (op0, 1)))
2532 	{
2533 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2534 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2535 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2536 
2537 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2538 	  if ((c1 & c2) == c1
2539 	      && !side_effects_p (XEXP (op0, 0)))
2540 	    return trueop1;
2541 
2542 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2543 	  if (((c1|c2) & mask) == mask)
2544 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2545 
2546 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2547 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2548 	    {
2549 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2550 					 gen_int_mode (c1 & ~c2, mode));
2551 	      return simplify_gen_binary (IOR, mode, tem, op1);
2552 	    }
2553 	}
2554 
2555       /* Convert (A & B) | A to A.  */
2556       if (GET_CODE (op0) == AND
2557 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2558 	      || rtx_equal_p (XEXP (op0, 1), op1))
2559 	  && ! side_effects_p (XEXP (op0, 0))
2560 	  && ! side_effects_p (XEXP (op0, 1)))
2561 	return op1;
2562 
2563       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2564          mode size to (rotate A CX).  */
2565 
2566       if (GET_CODE (op1) == ASHIFT
2567           || GET_CODE (op1) == SUBREG)
2568         {
2569 	  opleft = op1;
2570 	  opright = op0;
2571 	}
2572       else
2573         {
2574 	  opright = op1;
2575 	  opleft = op0;
2576 	}
2577 
2578       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2579           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2580           && CONST_INT_P (XEXP (opleft, 1))
2581           && CONST_INT_P (XEXP (opright, 1))
2582           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2583               == GET_MODE_PRECISION (mode)))
2584         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2585 
2586       /* Same, but for ashift that has been "simplified" to a wider mode
2587         by simplify_shift_const.  */
2588 
2589       if (GET_CODE (opleft) == SUBREG
2590           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2591           && GET_CODE (opright) == LSHIFTRT
2592           && GET_CODE (XEXP (opright, 0)) == SUBREG
2593           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2594           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2595           && (GET_MODE_SIZE (GET_MODE (opleft))
2596               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2597           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2598                           SUBREG_REG (XEXP (opright, 0)))
2599           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2600           && CONST_INT_P (XEXP (opright, 1))
2601           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2602               == GET_MODE_PRECISION (mode)))
2603         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2604                                XEXP (SUBREG_REG (opleft), 1));
2605 
2606       /* If we have (ior (and (X C1) C2)), simplify this by making
2607 	 C1 as small as possible if C1 actually changes.  */
2608       if (CONST_INT_P (op1)
2609 	  && (HWI_COMPUTABLE_MODE_P (mode)
2610 	      || INTVAL (op1) > 0)
2611 	  && GET_CODE (op0) == AND
2612 	  && CONST_INT_P (XEXP (op0, 1))
2613 	  && CONST_INT_P (op1)
2614 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2615 	{
2616 	  rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2617 					 gen_int_mode (UINTVAL (XEXP (op0, 1))
2618 						       & ~UINTVAL (op1),
2619 						       mode));
2620 	  return simplify_gen_binary (IOR, mode, tmp, op1);
2621 	}
2622 
2623       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2624          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2625 	 the PLUS does not affect any of the bits in OP1: then we can do
2626 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2627          can be safely shifted left C bits.  */
2628       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2629           && GET_CODE (XEXP (op0, 0)) == PLUS
2630           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2631           && CONST_INT_P (XEXP (op0, 1))
2632           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2633         {
2634           int count = INTVAL (XEXP (op0, 1));
2635           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2636 
2637           if (mask >> count == INTVAL (trueop1)
2638 	      && trunc_int_for_mode (mask, mode) == mask
2639               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2640 	    return simplify_gen_binary (ASHIFTRT, mode,
2641 					plus_constant (mode, XEXP (op0, 0),
2642 						       mask),
2643 					XEXP (op0, 1));
2644         }
2645 
2646       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2647       if (tem)
2648 	return tem;
2649 
2650       tem = simplify_associative_operation (code, mode, op0, op1);
2651       if (tem)
2652 	return tem;
2653       break;
2654 
2655     case XOR:
2656       if (trueop1 == CONST0_RTX (mode))
2657 	return op0;
2658       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2659 	return simplify_gen_unary (NOT, mode, op0, mode);
2660       if (rtx_equal_p (trueop0, trueop1)
2661 	  && ! side_effects_p (op0)
2662 	  && GET_MODE_CLASS (mode) != MODE_CC)
2663 	 return CONST0_RTX (mode);
2664 
2665       /* Canonicalize XOR of the most significant bit to PLUS.  */
2666       if (CONST_SCALAR_INT_P (op1)
2667 	  && mode_signbit_p (mode, op1))
2668 	return simplify_gen_binary (PLUS, mode, op0, op1);
2669       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2670       if (CONST_SCALAR_INT_P (op1)
2671 	  && GET_CODE (op0) == PLUS
2672 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2673 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2674 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2675 				    simplify_gen_binary (XOR, mode, op1,
2676 							 XEXP (op0, 1)));
2677 
2678       /* If we are XORing two things that have no bits in common,
2679 	 convert them into an IOR.  This helps to detect rotation encoded
2680 	 using those methods and possibly other simplifications.  */
2681 
2682       if (HWI_COMPUTABLE_MODE_P (mode)
2683 	  && (nonzero_bits (op0, mode)
2684 	      & nonzero_bits (op1, mode)) == 0)
2685 	return (simplify_gen_binary (IOR, mode, op0, op1));
2686 
2687       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2688 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2689 	 (NOT y).  */
2690       {
2691 	int num_negated = 0;
2692 
2693 	if (GET_CODE (op0) == NOT)
2694 	  num_negated++, op0 = XEXP (op0, 0);
2695 	if (GET_CODE (op1) == NOT)
2696 	  num_negated++, op1 = XEXP (op1, 0);
2697 
2698 	if (num_negated == 2)
2699 	  return simplify_gen_binary (XOR, mode, op0, op1);
2700 	else if (num_negated == 1)
2701 	  return simplify_gen_unary (NOT, mode,
2702 				     simplify_gen_binary (XOR, mode, op0, op1),
2703 				     mode);
2704       }
2705 
2706       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2707 	 correspond to a machine insn or result in further simplifications
2708 	 if B is a constant.  */
2709 
2710       if (GET_CODE (op0) == AND
2711 	  && rtx_equal_p (XEXP (op0, 1), op1)
2712 	  && ! side_effects_p (op1))
2713 	return simplify_gen_binary (AND, mode,
2714 				    simplify_gen_unary (NOT, mode,
2715 							XEXP (op0, 0), mode),
2716 				    op1);
2717 
2718       else if (GET_CODE (op0) == AND
2719 	       && rtx_equal_p (XEXP (op0, 0), op1)
2720 	       && ! side_effects_p (op1))
2721 	return simplify_gen_binary (AND, mode,
2722 				    simplify_gen_unary (NOT, mode,
2723 							XEXP (op0, 1), mode),
2724 				    op1);
2725 
2726       /* Given (xor (ior (xor A B) C) D), where B, C and D are
2727 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2728 	 out bits inverted twice and not set by C.  Similarly, given
2729 	 (xor (and (xor A B) C) D), simplify without inverting C in
2730 	 the xor operand: (xor (and A C) (B&C)^D).
2731       */
2732       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2733 	       && GET_CODE (XEXP (op0, 0)) == XOR
2734 	       && CONST_INT_P (op1)
2735 	       && CONST_INT_P (XEXP (op0, 1))
2736 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2737 	{
2738 	  enum rtx_code op = GET_CODE (op0);
2739 	  rtx a = XEXP (XEXP (op0, 0), 0);
2740 	  rtx b = XEXP (XEXP (op0, 0), 1);
2741 	  rtx c = XEXP (op0, 1);
2742 	  rtx d = op1;
2743 	  HOST_WIDE_INT bval = INTVAL (b);
2744 	  HOST_WIDE_INT cval = INTVAL (c);
2745 	  HOST_WIDE_INT dval = INTVAL (d);
2746 	  HOST_WIDE_INT xcval;
2747 
2748 	  if (op == IOR)
2749 	    xcval = ~cval;
2750 	  else
2751 	    xcval = cval;
2752 
2753 	  return simplify_gen_binary (XOR, mode,
2754 				      simplify_gen_binary (op, mode, a, c),
2755 				      gen_int_mode ((bval & xcval) ^ dval,
2756 						    mode));
2757 	}
2758 
2759       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2760 	 we can transform like this:
2761             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2762                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2763                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2764 	 Attempt a few simplifications when B and C are both constants.  */
2765       if (GET_CODE (op0) == AND
2766 	  && CONST_INT_P (op1)
2767 	  && CONST_INT_P (XEXP (op0, 1)))
2768 	{
2769 	  rtx a = XEXP (op0, 0);
2770 	  rtx b = XEXP (op0, 1);
2771 	  rtx c = op1;
2772 	  HOST_WIDE_INT bval = INTVAL (b);
2773 	  HOST_WIDE_INT cval = INTVAL (c);
2774 
2775 	  /* Instead of computing ~A&C, we compute its negated value,
2776 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
2777 	     optimize for sure.  If it does not simplify, we still try
2778 	     to compute ~A&C below, but since that always allocates
2779 	     RTL, we don't try that before committing to returning a
2780 	     simplified expression.  */
2781 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2782 						  GEN_INT (~cval));
2783 
2784 	  if ((~cval & bval) == 0)
2785 	    {
2786 	      rtx na_c = NULL_RTX;
2787 	      if (n_na_c)
2788 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2789 	      else
2790 		{
2791 		  /* If ~A does not simplify, don't bother: we don't
2792 		     want to simplify 2 operations into 3, and if na_c
2793 		     were to simplify with na, n_na_c would have
2794 		     simplified as well.  */
2795 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
2796 		  if (na)
2797 		    na_c = simplify_gen_binary (AND, mode, na, c);
2798 		}
2799 
2800 	      /* Try to simplify ~A&C | ~B&C.  */
2801 	      if (na_c != NULL_RTX)
2802 		return simplify_gen_binary (IOR, mode, na_c,
2803 					    gen_int_mode (~bval & cval, mode));
2804 	    }
2805 	  else
2806 	    {
2807 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2808 	      if (n_na_c == CONSTM1_RTX (mode))
2809 		{
2810 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2811 						    gen_int_mode (~cval & bval,
2812 								  mode));
2813 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2814 					      gen_int_mode (~bval & cval,
2815 							    mode));
2816 		}
2817 	    }
2818 	}
2819 
2820       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2821 	 comparison if STORE_FLAG_VALUE is 1.  */
2822       if (STORE_FLAG_VALUE == 1
2823 	  && trueop1 == const1_rtx
2824 	  && COMPARISON_P (op0)
2825 	  && (reversed = reversed_comparison (op0, mode)))
2826 	return reversed;
2827 
2828       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2829 	 is (lt foo (const_int 0)), so we can perform the above
2830 	 simplification if STORE_FLAG_VALUE is 1.  */
2831 
2832       if (STORE_FLAG_VALUE == 1
2833 	  && trueop1 == const1_rtx
2834 	  && GET_CODE (op0) == LSHIFTRT
2835 	  && CONST_INT_P (XEXP (op0, 1))
2836 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2837 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2838 
2839       /* (xor (comparison foo bar) (const_int sign-bit))
2840 	 when STORE_FLAG_VALUE is the sign bit.  */
2841       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2842 	  && trueop1 == const_true_rtx
2843 	  && COMPARISON_P (op0)
2844 	  && (reversed = reversed_comparison (op0, mode)))
2845 	return reversed;
2846 
2847       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2848       if (tem)
2849 	return tem;
2850 
2851       tem = simplify_associative_operation (code, mode, op0, op1);
2852       if (tem)
2853 	return tem;
2854       break;
2855 
2856     case AND:
2857       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2858 	return trueop1;
2859       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2860 	return op0;
2861       if (HWI_COMPUTABLE_MODE_P (mode))
2862 	{
2863 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2864 	  HOST_WIDE_INT nzop1;
2865 	  if (CONST_INT_P (trueop1))
2866 	    {
2867 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2868 	      /* If we are turning off bits already known off in OP0, we need
2869 		 not do an AND.  */
2870 	      if ((nzop0 & ~val1) == 0)
2871 		return op0;
2872 	    }
2873 	  nzop1 = nonzero_bits (trueop1, mode);
2874 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2875 	  if ((nzop1 & nzop0) == 0
2876 	      && !side_effects_p (op0) && !side_effects_p (op1))
2877 	    return CONST0_RTX (mode);
2878 	}
2879       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2880 	  && GET_MODE_CLASS (mode) != MODE_CC)
2881 	return op0;
2882       /* A & (~A) -> 0 */
2883       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2884 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2885 	  && ! side_effects_p (op0)
2886 	  && GET_MODE_CLASS (mode) != MODE_CC)
2887 	return CONST0_RTX (mode);
2888 
2889       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2890 	 there are no nonzero bits of C outside of X's mode.  */
2891       if ((GET_CODE (op0) == SIGN_EXTEND
2892 	   || GET_CODE (op0) == ZERO_EXTEND)
2893 	  && CONST_INT_P (trueop1)
2894 	  && HWI_COMPUTABLE_MODE_P (mode)
2895 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2896 	      & UINTVAL (trueop1)) == 0)
2897 	{
2898 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
2899 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2900 				     gen_int_mode (INTVAL (trueop1),
2901 						   imode));
2902 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2903 	}
2904 
2905       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2906 	 we might be able to further simplify the AND with X and potentially
2907 	 remove the truncation altogether.  */
2908       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2909 	{
2910 	  rtx x = XEXP (op0, 0);
2911 	  machine_mode xmode = GET_MODE (x);
2912 	  tem = simplify_gen_binary (AND, xmode, x,
2913 				     gen_int_mode (INTVAL (trueop1), xmode));
2914 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2915 	}
2916 
2917       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2918       if (GET_CODE (op0) == IOR
2919 	  && CONST_INT_P (trueop1)
2920 	  && CONST_INT_P (XEXP (op0, 1)))
2921 	{
2922 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2923 	  return simplify_gen_binary (IOR, mode,
2924 				      simplify_gen_binary (AND, mode,
2925 							   XEXP (op0, 0), op1),
2926 				      gen_int_mode (tmp, mode));
2927 	}
2928 
2929       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2930 	 insn (and may simplify more).  */
2931       if (GET_CODE (op0) == XOR
2932 	  && rtx_equal_p (XEXP (op0, 0), op1)
2933 	  && ! side_effects_p (op1))
2934 	return simplify_gen_binary (AND, mode,
2935 				    simplify_gen_unary (NOT, mode,
2936 							XEXP (op0, 1), mode),
2937 				    op1);
2938 
2939       if (GET_CODE (op0) == XOR
2940 	  && rtx_equal_p (XEXP (op0, 1), op1)
2941 	  && ! side_effects_p (op1))
2942 	return simplify_gen_binary (AND, mode,
2943 				    simplify_gen_unary (NOT, mode,
2944 							XEXP (op0, 0), mode),
2945 				    op1);
2946 
2947       /* Similarly for (~(A ^ B)) & A.  */
2948       if (GET_CODE (op0) == NOT
2949 	  && GET_CODE (XEXP (op0, 0)) == XOR
2950 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2951 	  && ! side_effects_p (op1))
2952 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2953 
2954       if (GET_CODE (op0) == NOT
2955 	  && GET_CODE (XEXP (op0, 0)) == XOR
2956 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2957 	  && ! side_effects_p (op1))
2958 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2959 
2960       /* Convert (A | B) & A to A.  */
2961       if (GET_CODE (op0) == IOR
2962 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2963 	      || rtx_equal_p (XEXP (op0, 1), op1))
2964 	  && ! side_effects_p (XEXP (op0, 0))
2965 	  && ! side_effects_p (XEXP (op0, 1)))
2966 	return op1;
2967 
2968       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2969 	 ((A & N) + B) & M -> (A + B) & M
2970 	 Similarly if (N & M) == 0,
2971 	 ((A | N) + B) & M -> (A + B) & M
2972 	 and for - instead of + and/or ^ instead of |.
2973          Also, if (N & M) == 0, then
2974 	 (A +- N) & M -> A & M.  */
2975       if (CONST_INT_P (trueop1)
2976 	  && HWI_COMPUTABLE_MODE_P (mode)
2977 	  && ~UINTVAL (trueop1)
2978 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2979 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2980 	{
2981 	  rtx pmop[2];
2982 	  int which;
2983 
2984 	  pmop[0] = XEXP (op0, 0);
2985 	  pmop[1] = XEXP (op0, 1);
2986 
2987 	  if (CONST_INT_P (pmop[1])
2988 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2989 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
2990 
2991 	  for (which = 0; which < 2; which++)
2992 	    {
2993 	      tem = pmop[which];
2994 	      switch (GET_CODE (tem))
2995 		{
2996 		case AND:
2997 		  if (CONST_INT_P (XEXP (tem, 1))
2998 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2999 		      == UINTVAL (trueop1))
3000 		    pmop[which] = XEXP (tem, 0);
3001 		  break;
3002 		case IOR:
3003 		case XOR:
3004 		  if (CONST_INT_P (XEXP (tem, 1))
3005 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3006 		    pmop[which] = XEXP (tem, 0);
3007 		  break;
3008 		default:
3009 		  break;
3010 		}
3011 	    }
3012 
3013 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3014 	    {
3015 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3016 					 pmop[0], pmop[1]);
3017 	      return simplify_gen_binary (code, mode, tem, op1);
3018 	    }
3019 	}
3020 
3021       /* (and X (ior (not X) Y) -> (and X Y) */
3022       if (GET_CODE (op1) == IOR
3023 	  && GET_CODE (XEXP (op1, 0)) == NOT
3024 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3025        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3026 
3027       /* (and (ior (not X) Y) X) -> (and X Y) */
3028       if (GET_CODE (op0) == IOR
3029 	  && GET_CODE (XEXP (op0, 0)) == NOT
3030 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3031 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3032 
3033       /* (and X (ior Y (not X)) -> (and X Y) */
3034       if (GET_CODE (op1) == IOR
3035 	  && GET_CODE (XEXP (op1, 1)) == NOT
3036 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3037        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3038 
3039       /* (and (ior Y (not X)) X) -> (and X Y) */
3040       if (GET_CODE (op0) == IOR
3041 	  && GET_CODE (XEXP (op0, 1)) == NOT
3042 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3043 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3044 
3045       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3046       if (tem)
3047 	return tem;
3048 
3049       tem = simplify_associative_operation (code, mode, op0, op1);
3050       if (tem)
3051 	return tem;
3052       break;
3053 
3054     case UDIV:
3055       /* 0/x is 0 (or x&0 if x has side-effects).  */
3056       if (trueop0 == CONST0_RTX (mode))
3057 	{
3058 	  if (side_effects_p (op1))
3059 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3060 	  return trueop0;
3061 	}
3062       /* x/1 is x.  */
3063       if (trueop1 == CONST1_RTX (mode))
3064 	{
3065 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3066 	  if (tem)
3067 	    return tem;
3068 	}
3069       /* Convert divide by power of two into shift.  */
3070       if (CONST_INT_P (trueop1)
3071 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3072 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3073       break;
3074 
3075     case DIV:
3076       /* Handle floating point and integers separately.  */
3077       if (SCALAR_FLOAT_MODE_P (mode))
3078 	{
3079 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3080 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3081 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3082 	     zeros, since dividing 0 by a negative number gives -0.0  */
3083 	  if (trueop0 == CONST0_RTX (mode)
3084 	      && !HONOR_NANS (mode)
3085 	      && !HONOR_SIGNED_ZEROS (mode)
3086 	      && ! side_effects_p (op1))
3087 	    return op0;
3088 	  /* x/1.0 is x.  */
3089 	  if (trueop1 == CONST1_RTX (mode)
3090 	      && !HONOR_SNANS (mode))
3091 	    return op0;
3092 
3093 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3094 	      && trueop1 != CONST0_RTX (mode))
3095 	    {
3096 	      REAL_VALUE_TYPE d;
3097 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3098 
3099 	      /* x/-1.0 is -x.  */
3100 	      if (REAL_VALUES_EQUAL (d, dconstm1)
3101 		  && !HONOR_SNANS (mode))
3102 		return simplify_gen_unary (NEG, mode, op0, mode);
3103 
3104 	      /* Change FP division by a constant into multiplication.
3105 		 Only do this with -freciprocal-math.  */
3106 	      if (flag_reciprocal_math
3107 		  && !REAL_VALUES_EQUAL (d, dconst0))
3108 		{
3109 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3110 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3111 		  return simplify_gen_binary (MULT, mode, op0, tem);
3112 		}
3113 	    }
3114 	}
3115       else if (SCALAR_INT_MODE_P (mode))
3116 	{
3117 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3118 	  if (trueop0 == CONST0_RTX (mode)
3119 	      && !cfun->can_throw_non_call_exceptions)
3120 	    {
3121 	      if (side_effects_p (op1))
3122 		return simplify_gen_binary (AND, mode, op1, trueop0);
3123 	      return trueop0;
3124 	    }
3125 	  /* x/1 is x.  */
3126 	  if (trueop1 == CONST1_RTX (mode))
3127 	    {
3128 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3129 	      if (tem)
3130 		return tem;
3131 	    }
3132 	  /* x/-1 is -x.  */
3133 	  if (trueop1 == constm1_rtx)
3134 	    {
3135 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 	      if (x)
3137 		return simplify_gen_unary (NEG, mode, x, mode);
3138 	    }
3139 	}
3140       break;
3141 
3142     case UMOD:
3143       /* 0%x is 0 (or x&0 if x has side-effects).  */
3144       if (trueop0 == CONST0_RTX (mode))
3145 	{
3146 	  if (side_effects_p (op1))
3147 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3148 	  return trueop0;
3149 	}
3150       /* x%1 is 0 (of x&0 if x has side-effects).  */
3151       if (trueop1 == CONST1_RTX (mode))
3152 	{
3153 	  if (side_effects_p (op0))
3154 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3155 	  return CONST0_RTX (mode);
3156 	}
3157       /* Implement modulus by power of two as AND.  */
3158       if (CONST_INT_P (trueop1)
3159 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3160 	return simplify_gen_binary (AND, mode, op0,
3161 				    gen_int_mode (INTVAL (op1) - 1, mode));
3162       break;
3163 
3164     case MOD:
3165       /* 0%x is 0 (or x&0 if x has side-effects).  */
3166       if (trueop0 == CONST0_RTX (mode))
3167 	{
3168 	  if (side_effects_p (op1))
3169 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3170 	  return trueop0;
3171 	}
3172       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3173       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3174 	{
3175 	  if (side_effects_p (op0))
3176 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3177 	  return CONST0_RTX (mode);
3178 	}
3179       break;
3180 
3181     case ROTATERT:
3182     case ROTATE:
3183       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3184 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3185 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3186 	 amount instead.  */
3187 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3188       if (CONST_INT_P (trueop1)
3189 	  && IN_RANGE (INTVAL (trueop1),
3190 		       GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3191 		       GET_MODE_PRECISION (mode) - 1))
3192 	return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3193 				    mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3194 							- INTVAL (trueop1)));
3195 #endif
3196       /* FALLTHRU */
3197     case ASHIFTRT:
3198       if (trueop1 == CONST0_RTX (mode))
3199 	return op0;
3200       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3201 	return op0;
3202       /* Rotating ~0 always results in ~0.  */
3203       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3204 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3205 	  && ! side_effects_p (op1))
3206 	return op0;
3207       /* Given:
3208 	 scalar modes M1, M2
3209 	 scalar constants c1, c2
3210 	 size (M2) > size (M1)
3211 	 c1 == size (M2) - size (M1)
3212 	 optimize:
3213 	 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3214 				 <low_part>)
3215 		      (const_int <c2>))
3216 	 to:
3217 	 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3218 		    <low_part>).  */
3219       if (code == ASHIFTRT
3220 	  && !VECTOR_MODE_P (mode)
3221 	  && SUBREG_P (op0)
3222 	  && CONST_INT_P (op1)
3223 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3224 	  && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3225 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3226 	  && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3227 	      > GET_MODE_BITSIZE (mode))
3228 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3229 	      == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3230 		  - GET_MODE_BITSIZE (mode)))
3231 	  && subreg_lowpart_p (op0))
3232 	{
3233 	  rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3234 			     + INTVAL (op1));
3235 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3236 	  tmp = simplify_gen_binary (ASHIFTRT,
3237 				     GET_MODE (SUBREG_REG (op0)),
3238 				     XEXP (SUBREG_REG (op0), 0),
3239 				     tmp);
3240 	  return simplify_gen_subreg (mode, tmp, inner_mode,
3241 				      subreg_lowpart_offset (mode,
3242 							     inner_mode));
3243 	}
3244     canonicalize_shift:
3245       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3246 	{
3247 	  val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3248 	  if (val != INTVAL (op1))
3249 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3250 	}
3251       break;
3252 
3253     case ASHIFT:
3254     case SS_ASHIFT:
3255     case US_ASHIFT:
3256       if (trueop1 == CONST0_RTX (mode))
3257 	return op0;
3258       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3259 	return op0;
3260       goto canonicalize_shift;
3261 
3262     case LSHIFTRT:
3263       if (trueop1 == CONST0_RTX (mode))
3264 	return op0;
3265       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3266 	return op0;
3267       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3268       if (GET_CODE (op0) == CLZ
3269 	  && CONST_INT_P (trueop1)
3270 	  && STORE_FLAG_VALUE == 1
3271 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3272 	{
3273 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3274 	  unsigned HOST_WIDE_INT zero_val = 0;
3275 
3276 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3277 	      && zero_val == GET_MODE_PRECISION (imode)
3278 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3279 	    return simplify_gen_relational (EQ, mode, imode,
3280 					    XEXP (op0, 0), const0_rtx);
3281 	}
3282       goto canonicalize_shift;
3283 
3284     case SMIN:
3285       if (width <= HOST_BITS_PER_WIDE_INT
3286 	  && mode_signbit_p (mode, trueop1)
3287 	  && ! side_effects_p (op0))
3288 	return op1;
3289       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3290 	return op0;
3291       tem = simplify_associative_operation (code, mode, op0, op1);
3292       if (tem)
3293 	return tem;
3294       break;
3295 
3296     case SMAX:
3297       if (width <= HOST_BITS_PER_WIDE_INT
3298 	  && CONST_INT_P (trueop1)
3299 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3300 	  && ! side_effects_p (op0))
3301 	return op1;
3302       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3303 	return op0;
3304       tem = simplify_associative_operation (code, mode, op0, op1);
3305       if (tem)
3306 	return tem;
3307       break;
3308 
3309     case UMIN:
3310       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3311 	return op1;
3312       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3313 	return op0;
3314       tem = simplify_associative_operation (code, mode, op0, op1);
3315       if (tem)
3316 	return tem;
3317       break;
3318 
3319     case UMAX:
3320       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3321 	return op1;
3322       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3323 	return op0;
3324       tem = simplify_associative_operation (code, mode, op0, op1);
3325       if (tem)
3326 	return tem;
3327       break;
3328 
3329     case SS_PLUS:
3330     case US_PLUS:
3331     case SS_MINUS:
3332     case US_MINUS:
3333     case SS_MULT:
3334     case US_MULT:
3335     case SS_DIV:
3336     case US_DIV:
3337       /* ??? There are simplifications that can be done.  */
3338       return 0;
3339 
3340     case VEC_SELECT:
3341       if (!VECTOR_MODE_P (mode))
3342 	{
3343 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3344 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3345 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3346 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3347 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3348 
3349 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3350 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3351 						      (trueop1, 0, 0)));
3352 
3353 	  /* Extract a scalar element from a nested VEC_SELECT expression
3354 	     (with optional nested VEC_CONCAT expression).  Some targets
3355 	     (i386) extract scalar element from a vector using chain of
3356 	     nested VEC_SELECT expressions.  When input operand is a memory
3357 	     operand, this operation can be simplified to a simple scalar
3358 	     load from an offseted memory address.  */
3359 	  if (GET_CODE (trueop0) == VEC_SELECT)
3360 	    {
3361 	      rtx op0 = XEXP (trueop0, 0);
3362 	      rtx op1 = XEXP (trueop0, 1);
3363 
3364 	      machine_mode opmode = GET_MODE (op0);
3365 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3366 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3367 
3368 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3369 	      int elem;
3370 
3371 	      rtvec vec;
3372 	      rtx tmp_op, tmp;
3373 
3374 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3375 	      gcc_assert (i < n_elts);
3376 
3377 	      /* Select element, pointed by nested selector.  */
3378 	      elem = INTVAL (XVECEXP (op1, 0, i));
3379 
3380 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3381 	      if (GET_CODE (op0) == VEC_CONCAT)
3382 		{
3383 		  rtx op00 = XEXP (op0, 0);
3384 		  rtx op01 = XEXP (op0, 1);
3385 
3386 		  machine_mode mode00, mode01;
3387 		  int n_elts00, n_elts01;
3388 
3389 		  mode00 = GET_MODE (op00);
3390 		  mode01 = GET_MODE (op01);
3391 
3392 		  /* Find out number of elements of each operand.  */
3393 		  if (VECTOR_MODE_P (mode00))
3394 		    {
3395 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3396 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3397 		    }
3398 		  else
3399 		    n_elts00 = 1;
3400 
3401 		  if (VECTOR_MODE_P (mode01))
3402 		    {
3403 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3404 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3405 		    }
3406 		  else
3407 		    n_elts01 = 1;
3408 
3409 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3410 
3411 		  /* Select correct operand of VEC_CONCAT
3412 		     and adjust selector. */
3413 		  if (elem < n_elts01)
3414 		    tmp_op = op00;
3415 		  else
3416 		    {
3417 		      tmp_op = op01;
3418 		      elem -= n_elts00;
3419 		    }
3420 		}
3421 	      else
3422 		tmp_op = op0;
3423 
3424 	      vec = rtvec_alloc (1);
3425 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3426 
3427 	      tmp = gen_rtx_fmt_ee (code, mode,
3428 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3429 	      return tmp;
3430 	    }
3431 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3432 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3433 	    return XEXP (trueop0, 0);
3434 	}
3435       else
3436 	{
3437 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3438 	  gcc_assert (GET_MODE_INNER (mode)
3439 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3440 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3441 
3442 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3443 	    {
3444 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3445 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3446 	      rtvec v = rtvec_alloc (n_elts);
3447 	      unsigned int i;
3448 
3449 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3450 	      for (i = 0; i < n_elts; i++)
3451 		{
3452 		  rtx x = XVECEXP (trueop1, 0, i);
3453 
3454 		  gcc_assert (CONST_INT_P (x));
3455 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3456 						       INTVAL (x));
3457 		}
3458 
3459 	      return gen_rtx_CONST_VECTOR (mode, v);
3460 	    }
3461 
3462 	  /* Recognize the identity.  */
3463 	  if (GET_MODE (trueop0) == mode)
3464 	    {
3465 	      bool maybe_ident = true;
3466 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3467 		{
3468 		  rtx j = XVECEXP (trueop1, 0, i);
3469 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3470 		    {
3471 		      maybe_ident = false;
3472 		      break;
3473 		    }
3474 		}
3475 	      if (maybe_ident)
3476 		return trueop0;
3477 	    }
3478 
3479 	  /* If we build {a,b} then permute it, build the result directly.  */
3480 	  if (XVECLEN (trueop1, 0) == 2
3481 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3482 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3483 	      && GET_CODE (trueop0) == VEC_CONCAT
3484 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3485 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3486 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3487 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3488 	    {
3489 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3490 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3491 	      rtx subop0, subop1;
3492 
3493 	      gcc_assert (i0 < 4 && i1 < 4);
3494 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3495 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3496 
3497 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3498 	    }
3499 
3500 	  if (XVECLEN (trueop1, 0) == 2
3501 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3502 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3503 	      && GET_CODE (trueop0) == VEC_CONCAT
3504 	      && GET_MODE (trueop0) == mode)
3505 	    {
3506 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3507 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3508 	      rtx subop0, subop1;
3509 
3510 	      gcc_assert (i0 < 2 && i1 < 2);
3511 	      subop0 = XEXP (trueop0, i0);
3512 	      subop1 = XEXP (trueop0, i1);
3513 
3514 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3515 	    }
3516 
3517 	  /* If we select one half of a vec_concat, return that.  */
3518 	  if (GET_CODE (trueop0) == VEC_CONCAT
3519 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3520 	    {
3521 	      rtx subop0 = XEXP (trueop0, 0);
3522 	      rtx subop1 = XEXP (trueop0, 1);
3523 	      machine_mode mode0 = GET_MODE (subop0);
3524 	      machine_mode mode1 = GET_MODE (subop1);
3525 	      int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3526 	      int l0 = GET_MODE_SIZE (mode0) / li;
3527 	      int l1 = GET_MODE_SIZE (mode1) / li;
3528 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3529 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3530 		{
3531 		  bool success = true;
3532 		  for (int i = 1; i < l0; ++i)
3533 		    {
3534 		      rtx j = XVECEXP (trueop1, 0, i);
3535 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
3536 			{
3537 			  success = false;
3538 			  break;
3539 			}
3540 		    }
3541 		  if (success)
3542 		    return subop0;
3543 		}
3544 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3545 		{
3546 		  bool success = true;
3547 		  for (int i = 1; i < l1; ++i)
3548 		    {
3549 		      rtx j = XVECEXP (trueop1, 0, i);
3550 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3551 			{
3552 			  success = false;
3553 			  break;
3554 			}
3555 		    }
3556 		  if (success)
3557 		    return subop1;
3558 		}
3559 	    }
3560 	}
3561 
3562       if (XVECLEN (trueop1, 0) == 1
3563 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3564 	  && GET_CODE (trueop0) == VEC_CONCAT)
3565 	{
3566 	  rtx vec = trueop0;
3567 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3568 
3569 	  /* Try to find the element in the VEC_CONCAT.  */
3570 	  while (GET_MODE (vec) != mode
3571 		 && GET_CODE (vec) == VEC_CONCAT)
3572 	    {
3573 	      HOST_WIDE_INT vec_size;
3574 
3575 	      if (CONST_INT_P (XEXP (vec, 0)))
3576 	        {
3577 	          /* vec_concat of two const_ints doesn't make sense with
3578 	             respect to modes.  */
3579 	          if (CONST_INT_P (XEXP (vec, 1)))
3580 	            return 0;
3581 
3582 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3583 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3584 	        }
3585 	      else
3586 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3587 
3588 	      if (offset < vec_size)
3589 		vec = XEXP (vec, 0);
3590 	      else
3591 		{
3592 		  offset -= vec_size;
3593 		  vec = XEXP (vec, 1);
3594 		}
3595 	      vec = avoid_constant_pool_reference (vec);
3596 	    }
3597 
3598 	  if (GET_MODE (vec) == mode)
3599 	    return vec;
3600 	}
3601 
3602       /* If we select elements in a vec_merge that all come from the same
3603 	 operand, select from that operand directly.  */
3604       if (GET_CODE (op0) == VEC_MERGE)
3605 	{
3606 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3607 	  if (CONST_INT_P (trueop02))
3608 	    {
3609 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3610 	      bool all_operand0 = true;
3611 	      bool all_operand1 = true;
3612 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3613 		{
3614 		  rtx j = XVECEXP (trueop1, 0, i);
3615 		  if (sel & (1 << UINTVAL (j)))
3616 		    all_operand1 = false;
3617 		  else
3618 		    all_operand0 = false;
3619 		}
3620 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3621 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3622 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3623 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3624 	    }
3625 	}
3626 
3627       /* If we have two nested selects that are inverses of each
3628 	 other, replace them with the source operand.  */
3629       if (GET_CODE (trueop0) == VEC_SELECT
3630 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
3631 	{
3632 	  rtx op0_subop1 = XEXP (trueop0, 1);
3633 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3634 	  gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3635 
3636 	  /* Apply the outer ordering vector to the inner one.  (The inner
3637 	     ordering vector is expressly permitted to be of a different
3638 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
3639 	     then the two VEC_SELECTs cancel.  */
3640 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3641 	    {
3642 	      rtx x = XVECEXP (trueop1, 0, i);
3643 	      if (!CONST_INT_P (x))
3644 		return 0;
3645 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3646 	      if (!CONST_INT_P (y) || i != INTVAL (y))
3647 		return 0;
3648 	    }
3649 	  return XEXP (trueop0, 0);
3650 	}
3651 
3652       return 0;
3653     case VEC_CONCAT:
3654       {
3655 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3656 				      ? GET_MODE (trueop0)
3657 				      : GET_MODE_INNER (mode));
3658 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3659 				      ? GET_MODE (trueop1)
3660 				      : GET_MODE_INNER (mode));
3661 
3662 	gcc_assert (VECTOR_MODE_P (mode));
3663 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3664 		    == GET_MODE_SIZE (mode));
3665 
3666 	if (VECTOR_MODE_P (op0_mode))
3667 	  gcc_assert (GET_MODE_INNER (mode)
3668 		      == GET_MODE_INNER (op0_mode));
3669 	else
3670 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3671 
3672 	if (VECTOR_MODE_P (op1_mode))
3673 	  gcc_assert (GET_MODE_INNER (mode)
3674 		      == GET_MODE_INNER (op1_mode));
3675 	else
3676 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3677 
3678 	if ((GET_CODE (trueop0) == CONST_VECTOR
3679 	     || CONST_SCALAR_INT_P (trueop0)
3680 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3681 	    && (GET_CODE (trueop1) == CONST_VECTOR
3682 		|| CONST_SCALAR_INT_P (trueop1)
3683 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3684 	  {
3685 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3686 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3687 	    rtvec v = rtvec_alloc (n_elts);
3688 	    unsigned int i;
3689 	    unsigned in_n_elts = 1;
3690 
3691 	    if (VECTOR_MODE_P (op0_mode))
3692 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3693 	    for (i = 0; i < n_elts; i++)
3694 	      {
3695 		if (i < in_n_elts)
3696 		  {
3697 		    if (!VECTOR_MODE_P (op0_mode))
3698 		      RTVEC_ELT (v, i) = trueop0;
3699 		    else
3700 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3701 		  }
3702 		else
3703 		  {
3704 		    if (!VECTOR_MODE_P (op1_mode))
3705 		      RTVEC_ELT (v, i) = trueop1;
3706 		    else
3707 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3708 							   i - in_n_elts);
3709 		  }
3710 	      }
3711 
3712 	    return gen_rtx_CONST_VECTOR (mode, v);
3713 	  }
3714 
3715 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
3716 	   Restrict the transformation to avoid generating a VEC_SELECT with a
3717 	   mode unrelated to its operand.  */
3718 	if (GET_CODE (trueop0) == VEC_SELECT
3719 	    && GET_CODE (trueop1) == VEC_SELECT
3720 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3721 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
3722 	  {
3723 	    rtx par0 = XEXP (trueop0, 1);
3724 	    rtx par1 = XEXP (trueop1, 1);
3725 	    int len0 = XVECLEN (par0, 0);
3726 	    int len1 = XVECLEN (par1, 0);
3727 	    rtvec vec = rtvec_alloc (len0 + len1);
3728 	    for (int i = 0; i < len0; i++)
3729 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3730 	    for (int i = 0; i < len1; i++)
3731 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3732 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3733 					gen_rtx_PARALLEL (VOIDmode, vec));
3734 	  }
3735       }
3736       return 0;
3737 
3738     default:
3739       gcc_unreachable ();
3740     }
3741 
3742   return 0;
3743 }
3744 
3745 rtx
3746 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3747 				 rtx op0, rtx op1)
3748 {
3749   unsigned int width = GET_MODE_PRECISION (mode);
3750 
3751   if (VECTOR_MODE_P (mode)
3752       && code != VEC_CONCAT
3753       && GET_CODE (op0) == CONST_VECTOR
3754       && GET_CODE (op1) == CONST_VECTOR)
3755     {
3756       unsigned n_elts = GET_MODE_NUNITS (mode);
3757       machine_mode op0mode = GET_MODE (op0);
3758       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3759       machine_mode op1mode = GET_MODE (op1);
3760       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3761       rtvec v = rtvec_alloc (n_elts);
3762       unsigned int i;
3763 
3764       gcc_assert (op0_n_elts == n_elts);
3765       gcc_assert (op1_n_elts == n_elts);
3766       for (i = 0; i < n_elts; i++)
3767 	{
3768 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3769 					     CONST_VECTOR_ELT (op0, i),
3770 					     CONST_VECTOR_ELT (op1, i));
3771 	  if (!x)
3772 	    return 0;
3773 	  RTVEC_ELT (v, i) = x;
3774 	}
3775 
3776       return gen_rtx_CONST_VECTOR (mode, v);
3777     }
3778 
3779   if (VECTOR_MODE_P (mode)
3780       && code == VEC_CONCAT
3781       && (CONST_SCALAR_INT_P (op0)
3782 	  || GET_CODE (op0) == CONST_FIXED
3783 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3784       && (CONST_SCALAR_INT_P (op1)
3785 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3786 	  || GET_CODE (op1) == CONST_FIXED))
3787     {
3788       unsigned n_elts = GET_MODE_NUNITS (mode);
3789       rtvec v = rtvec_alloc (n_elts);
3790 
3791       gcc_assert (n_elts >= 2);
3792       if (n_elts == 2)
3793 	{
3794 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3795 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3796 
3797 	  RTVEC_ELT (v, 0) = op0;
3798 	  RTVEC_ELT (v, 1) = op1;
3799 	}
3800       else
3801 	{
3802 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3803 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3804 	  unsigned i;
3805 
3806 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3807 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3808 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3809 
3810 	  for (i = 0; i < op0_n_elts; ++i)
3811 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3812 	  for (i = 0; i < op1_n_elts; ++i)
3813 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3814 	}
3815 
3816       return gen_rtx_CONST_VECTOR (mode, v);
3817     }
3818 
3819   if (SCALAR_FLOAT_MODE_P (mode)
3820       && CONST_DOUBLE_AS_FLOAT_P (op0)
3821       && CONST_DOUBLE_AS_FLOAT_P (op1)
3822       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3823     {
3824       if (code == AND
3825 	  || code == IOR
3826 	  || code == XOR)
3827 	{
3828 	  long tmp0[4];
3829 	  long tmp1[4];
3830 	  REAL_VALUE_TYPE r;
3831 	  int i;
3832 
3833 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3834 			  GET_MODE (op0));
3835 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3836 			  GET_MODE (op1));
3837 	  for (i = 0; i < 4; i++)
3838 	    {
3839 	      switch (code)
3840 	      {
3841 	      case AND:
3842 		tmp0[i] &= tmp1[i];
3843 		break;
3844 	      case IOR:
3845 		tmp0[i] |= tmp1[i];
3846 		break;
3847 	      case XOR:
3848 		tmp0[i] ^= tmp1[i];
3849 		break;
3850 	      default:
3851 		gcc_unreachable ();
3852 	      }
3853 	    }
3854 	   real_from_target (&r, tmp0, mode);
3855 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3856 	}
3857       else
3858 	{
3859 	  REAL_VALUE_TYPE f0, f1, value, result;
3860 	  bool inexact;
3861 
3862 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3863 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3864 	  real_convert (&f0, mode, &f0);
3865 	  real_convert (&f1, mode, &f1);
3866 
3867 	  if (HONOR_SNANS (mode)
3868 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3869 	    return 0;
3870 
3871 	  if (code == DIV
3872 	      && REAL_VALUES_EQUAL (f1, dconst0)
3873 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3874 	    return 0;
3875 
3876 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3877 	      && flag_trapping_math
3878 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3879 	    {
3880 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3881 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3882 
3883 	      switch (code)
3884 		{
3885 		case PLUS:
3886 		  /* Inf + -Inf = NaN plus exception.  */
3887 		  if (s0 != s1)
3888 		    return 0;
3889 		  break;
3890 		case MINUS:
3891 		  /* Inf - Inf = NaN plus exception.  */
3892 		  if (s0 == s1)
3893 		    return 0;
3894 		  break;
3895 		case DIV:
3896 		  /* Inf / Inf = NaN plus exception.  */
3897 		  return 0;
3898 		default:
3899 		  break;
3900 		}
3901 	    }
3902 
3903 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3904 	      && flag_trapping_math
3905 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3906 		  || (REAL_VALUE_ISINF (f1)
3907 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3908 	    /* Inf * 0 = NaN plus exception.  */
3909 	    return 0;
3910 
3911 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3912 				     &f0, &f1);
3913 	  real_convert (&result, mode, &value);
3914 
3915 	  /* Don't constant fold this floating point operation if
3916 	     the result has overflowed and flag_trapping_math.  */
3917 
3918 	  if (flag_trapping_math
3919 	      && MODE_HAS_INFINITIES (mode)
3920 	      && REAL_VALUE_ISINF (result)
3921 	      && !REAL_VALUE_ISINF (f0)
3922 	      && !REAL_VALUE_ISINF (f1))
3923 	    /* Overflow plus exception.  */
3924 	    return 0;
3925 
3926 	  /* Don't constant fold this floating point operation if the
3927 	     result may dependent upon the run-time rounding mode and
3928 	     flag_rounding_math is set, or if GCC's software emulation
3929 	     is unable to accurately represent the result.  */
3930 
3931 	  if ((flag_rounding_math
3932 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3933 	      && (inexact || !real_identical (&result, &value)))
3934 	    return NULL_RTX;
3935 
3936 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3937 	}
3938     }
3939 
3940   /* We can fold some multi-word operations.  */
3941   if ((GET_MODE_CLASS (mode) == MODE_INT
3942        || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3943       && CONST_SCALAR_INT_P (op0)
3944       && CONST_SCALAR_INT_P (op1))
3945     {
3946       wide_int result;
3947       bool overflow;
3948       rtx_mode_t pop0 = std::make_pair (op0, mode);
3949       rtx_mode_t pop1 = std::make_pair (op1, mode);
3950 
3951 #if TARGET_SUPPORTS_WIDE_INT == 0
3952       /* This assert keeps the simplification from producing a result
3953 	 that cannot be represented in a CONST_DOUBLE but a lot of
3954 	 upstream callers expect that this function never fails to
3955 	 simplify something and so you if you added this to the test
3956 	 above the code would die later anyway.  If this assert
3957 	 happens, you just need to make the port support wide int.  */
3958       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3959 #endif
3960       switch (code)
3961 	{
3962 	case MINUS:
3963 	  result = wi::sub (pop0, pop1);
3964 	  break;
3965 
3966 	case PLUS:
3967 	  result = wi::add (pop0, pop1);
3968 	  break;
3969 
3970 	case MULT:
3971 	  result = wi::mul (pop0, pop1);
3972 	  break;
3973 
3974 	case DIV:
3975 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3976 	  if (overflow)
3977 	    return NULL_RTX;
3978 	  break;
3979 
3980 	case MOD:
3981 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3982 	  if (overflow)
3983 	    return NULL_RTX;
3984 	  break;
3985 
3986 	case UDIV:
3987 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3988 	  if (overflow)
3989 	    return NULL_RTX;
3990 	  break;
3991 
3992 	case UMOD:
3993 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3994 	  if (overflow)
3995 	    return NULL_RTX;
3996 	  break;
3997 
3998 	case AND:
3999 	  result = wi::bit_and (pop0, pop1);
4000 	  break;
4001 
4002 	case IOR:
4003 	  result = wi::bit_or (pop0, pop1);
4004 	  break;
4005 
4006 	case XOR:
4007 	  result = wi::bit_xor (pop0, pop1);
4008 	  break;
4009 
4010 	case SMIN:
4011 	  result = wi::smin (pop0, pop1);
4012 	  break;
4013 
4014 	case SMAX:
4015 	  result = wi::smax (pop0, pop1);
4016 	  break;
4017 
4018 	case UMIN:
4019 	  result = wi::umin (pop0, pop1);
4020 	  break;
4021 
4022 	case UMAX:
4023 	  result = wi::umax (pop0, pop1);
4024 	  break;
4025 
4026 	case LSHIFTRT:
4027 	case ASHIFTRT:
4028 	case ASHIFT:
4029 	  {
4030 	    wide_int wop1 = pop1;
4031 	    if (SHIFT_COUNT_TRUNCATED)
4032 	      wop1 = wi::umod_trunc (wop1, width);
4033 	    else if (wi::geu_p (wop1, width))
4034 	      return NULL_RTX;
4035 
4036 	    switch (code)
4037 	      {
4038 	      case LSHIFTRT:
4039 		result = wi::lrshift (pop0, wop1);
4040 		break;
4041 
4042 	      case ASHIFTRT:
4043 		result = wi::arshift (pop0, wop1);
4044 		break;
4045 
4046 	      case ASHIFT:
4047 		result = wi::lshift (pop0, wop1);
4048 		break;
4049 
4050 	      default:
4051 		gcc_unreachable ();
4052 	      }
4053 	    break;
4054 	  }
4055 	case ROTATE:
4056 	case ROTATERT:
4057 	  {
4058 	    if (wi::neg_p (pop1))
4059 	      return NULL_RTX;
4060 
4061 	    switch (code)
4062 	      {
4063 	      case ROTATE:
4064 		result = wi::lrotate (pop0, pop1);
4065 		break;
4066 
4067 	      case ROTATERT:
4068 		result = wi::rrotate (pop0, pop1);
4069 		break;
4070 
4071 	      default:
4072 		gcc_unreachable ();
4073 	      }
4074 	    break;
4075 	  }
4076 	default:
4077 	  return NULL_RTX;
4078 	}
4079       return immed_wide_int_const (result, mode);
4080     }
4081 
4082   return NULL_RTX;
4083 }
4084 
4085 
4086 
4087 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4088    PLUS or MINUS.
4089 
4090    Rather than test for specific case, we do this by a brute-force method
4091    and do all possible simplifications until no more changes occur.  Then
4092    we rebuild the operation.  */
4093 
4094 struct simplify_plus_minus_op_data
4095 {
4096   rtx op;
4097   short neg;
4098 };
4099 
4100 static bool
4101 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4102 {
4103   int result;
4104 
4105   result = (commutative_operand_precedence (y)
4106 	    - commutative_operand_precedence (x));
4107   if (result)
4108     return result > 0;
4109 
4110   /* Group together equal REGs to do more simplification.  */
4111   if (REG_P (x) && REG_P (y))
4112     return REGNO (x) > REGNO (y);
4113   else
4114     return false;
4115 }
4116 
4117 static rtx
4118 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4119 		     rtx op1)
4120 {
4121   struct simplify_plus_minus_op_data ops[16];
4122   rtx result, tem;
4123   int n_ops = 2;
4124   int changed, n_constants, canonicalized = 0;
4125   int i, j;
4126 
4127   memset (ops, 0, sizeof ops);
4128 
4129   /* Set up the two operands and then expand them until nothing has been
4130      changed.  If we run out of room in our array, give up; this should
4131      almost never happen.  */
4132 
4133   ops[0].op = op0;
4134   ops[0].neg = 0;
4135   ops[1].op = op1;
4136   ops[1].neg = (code == MINUS);
4137 
4138   do
4139     {
4140       changed = 0;
4141       n_constants = 0;
4142 
4143       for (i = 0; i < n_ops; i++)
4144 	{
4145 	  rtx this_op = ops[i].op;
4146 	  int this_neg = ops[i].neg;
4147 	  enum rtx_code this_code = GET_CODE (this_op);
4148 
4149 	  switch (this_code)
4150 	    {
4151 	    case PLUS:
4152 	    case MINUS:
4153 	      if (n_ops == ARRAY_SIZE (ops))
4154 		return NULL_RTX;
4155 
4156 	      ops[n_ops].op = XEXP (this_op, 1);
4157 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4158 	      n_ops++;
4159 
4160 	      ops[i].op = XEXP (this_op, 0);
4161 	      changed = 1;
4162 	      canonicalized |= this_neg || i != n_ops - 2;
4163 	      break;
4164 
4165 	    case NEG:
4166 	      ops[i].op = XEXP (this_op, 0);
4167 	      ops[i].neg = ! this_neg;
4168 	      changed = 1;
4169 	      canonicalized = 1;
4170 	      break;
4171 
4172 	    case CONST:
4173 	      if (n_ops != ARRAY_SIZE (ops)
4174 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4175 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4176 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4177 		{
4178 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4179 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4180 		  ops[n_ops].neg = this_neg;
4181 		  n_ops++;
4182 		  changed = 1;
4183 	          canonicalized = 1;
4184 		}
4185 	      break;
4186 
4187 	    case NOT:
4188 	      /* ~a -> (-a - 1) */
4189 	      if (n_ops != ARRAY_SIZE (ops))
4190 		{
4191 		  ops[n_ops].op = CONSTM1_RTX (mode);
4192 		  ops[n_ops++].neg = this_neg;
4193 		  ops[i].op = XEXP (this_op, 0);
4194 		  ops[i].neg = !this_neg;
4195 		  changed = 1;
4196 	          canonicalized = 1;
4197 		}
4198 	      break;
4199 
4200 	    case CONST_INT:
4201 	      n_constants++;
4202 	      if (this_neg)
4203 		{
4204 		  ops[i].op = neg_const_int (mode, this_op);
4205 		  ops[i].neg = 0;
4206 		  changed = 1;
4207 	          canonicalized = 1;
4208 		}
4209 	      break;
4210 
4211 	    default:
4212 	      break;
4213 	    }
4214 	}
4215     }
4216   while (changed);
4217 
4218   if (n_constants > 1)
4219     canonicalized = 1;
4220 
4221   gcc_assert (n_ops >= 2);
4222 
4223   /* If we only have two operands, we can avoid the loops.  */
4224   if (n_ops == 2)
4225     {
4226       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4227       rtx lhs, rhs;
4228 
4229       /* Get the two operands.  Be careful with the order, especially for
4230 	 the cases where code == MINUS.  */
4231       if (ops[0].neg && ops[1].neg)
4232 	{
4233 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4234 	  rhs = ops[1].op;
4235 	}
4236       else if (ops[0].neg)
4237 	{
4238 	  lhs = ops[1].op;
4239 	  rhs = ops[0].op;
4240 	}
4241       else
4242 	{
4243 	  lhs = ops[0].op;
4244 	  rhs = ops[1].op;
4245 	}
4246 
4247       return simplify_const_binary_operation (code, mode, lhs, rhs);
4248     }
4249 
4250   /* Now simplify each pair of operands until nothing changes.  */
4251   do
4252     {
4253       /* Insertion sort is good enough for a small array.  */
4254       for (i = 1; i < n_ops; i++)
4255         {
4256           struct simplify_plus_minus_op_data save;
4257           j = i - 1;
4258           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4259 	    continue;
4260 
4261           canonicalized = 1;
4262           save = ops[i];
4263           do
4264 	    ops[j + 1] = ops[j];
4265           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4266           ops[j + 1] = save;
4267         }
4268 
4269       changed = 0;
4270       for (i = n_ops - 1; i > 0; i--)
4271 	for (j = i - 1; j >= 0; j--)
4272 	  {
4273 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4274 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4275 
4276 	    if (lhs != 0 && rhs != 0)
4277 	      {
4278 		enum rtx_code ncode = PLUS;
4279 
4280 		if (lneg != rneg)
4281 		  {
4282 		    ncode = MINUS;
4283 		    if (lneg)
4284 		      tem = lhs, lhs = rhs, rhs = tem;
4285 		  }
4286 		else if (swap_commutative_operands_p (lhs, rhs))
4287 		  tem = lhs, lhs = rhs, rhs = tem;
4288 
4289 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4290 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4291 		  {
4292 		    rtx tem_lhs, tem_rhs;
4293 
4294 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4295 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4296 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4297 
4298 		    if (tem && !CONSTANT_P (tem))
4299 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4300 		  }
4301 		else
4302 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4303 
4304 		if (tem)
4305 		  {
4306 		    /* Reject "simplifications" that just wrap the two
4307 		       arguments in a CONST.  Failure to do so can result
4308 		       in infinite recursion with simplify_binary_operation
4309 		       when it calls us to simplify CONST operations.
4310 		       Also, if we find such a simplification, don't try
4311 		       any more combinations with this rhs:  We must have
4312 		       something like symbol+offset, ie. one of the
4313 		       trivial CONST expressions we handle later.  */
4314 		    if (GET_CODE (tem) == CONST
4315 			&& GET_CODE (XEXP (tem, 0)) == ncode
4316 			&& XEXP (XEXP (tem, 0), 0) == lhs
4317 			&& XEXP (XEXP (tem, 0), 1) == rhs)
4318 		      break;
4319 		    lneg &= rneg;
4320 		    if (GET_CODE (tem) == NEG)
4321 		      tem = XEXP (tem, 0), lneg = !lneg;
4322 		    if (CONST_INT_P (tem) && lneg)
4323 		      tem = neg_const_int (mode, tem), lneg = 0;
4324 
4325 		    ops[i].op = tem;
4326 		    ops[i].neg = lneg;
4327 		    ops[j].op = NULL_RTX;
4328 		    changed = 1;
4329 		    canonicalized = 1;
4330 		  }
4331 	      }
4332 	  }
4333 
4334       /* If nothing changed, fail.  */
4335       if (!canonicalized)
4336         return NULL_RTX;
4337 
4338       /* Pack all the operands to the lower-numbered entries.  */
4339       for (i = 0, j = 0; j < n_ops; j++)
4340         if (ops[j].op)
4341           {
4342 	    ops[i] = ops[j];
4343 	    i++;
4344           }
4345       n_ops = i;
4346     }
4347   while (changed);
4348 
4349   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4350   if (n_ops == 2
4351       && CONST_INT_P (ops[1].op)
4352       && CONSTANT_P (ops[0].op)
4353       && ops[0].neg)
4354     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4355 
4356   /* We suppressed creation of trivial CONST expressions in the
4357      combination loop to avoid recursion.  Create one manually now.
4358      The combination loop should have ensured that there is exactly
4359      one CONST_INT, and the sort will have ensured that it is last
4360      in the array and that any other constant will be next-to-last.  */
4361 
4362   if (n_ops > 1
4363       && CONST_INT_P (ops[n_ops - 1].op)
4364       && CONSTANT_P (ops[n_ops - 2].op))
4365     {
4366       rtx value = ops[n_ops - 1].op;
4367       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4368 	value = neg_const_int (mode, value);
4369       ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4370 					 INTVAL (value));
4371       n_ops--;
4372     }
4373 
4374   /* Put a non-negated operand first, if possible.  */
4375 
4376   for (i = 0; i < n_ops && ops[i].neg; i++)
4377     continue;
4378   if (i == n_ops)
4379     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4380   else if (i != 0)
4381     {
4382       tem = ops[0].op;
4383       ops[0] = ops[i];
4384       ops[i].op = tem;
4385       ops[i].neg = 1;
4386     }
4387 
4388   /* Now make the result by performing the requested operations.  */
4389   result = ops[0].op;
4390   for (i = 1; i < n_ops; i++)
4391     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4392 			     mode, result, ops[i].op);
4393 
4394   return result;
4395 }
4396 
4397 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4398 static bool
4399 plus_minus_operand_p (const_rtx x)
4400 {
4401   return GET_CODE (x) == PLUS
4402          || GET_CODE (x) == MINUS
4403 	 || (GET_CODE (x) == CONST
4404 	     && GET_CODE (XEXP (x, 0)) == PLUS
4405 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4406 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4407 }
4408 
4409 /* Like simplify_binary_operation except used for relational operators.
4410    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4411    not also be VOIDmode.
4412 
4413    CMP_MODE specifies in which mode the comparison is done in, so it is
4414    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4415    the operands or, if both are VOIDmode, the operands are compared in
4416    "infinite precision".  */
4417 rtx
4418 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4419 			       machine_mode cmp_mode, rtx op0, rtx op1)
4420 {
4421   rtx tem, trueop0, trueop1;
4422 
4423   if (cmp_mode == VOIDmode)
4424     cmp_mode = GET_MODE (op0);
4425   if (cmp_mode == VOIDmode)
4426     cmp_mode = GET_MODE (op1);
4427 
4428   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4429   if (tem)
4430     {
4431       if (SCALAR_FLOAT_MODE_P (mode))
4432 	{
4433           if (tem == const0_rtx)
4434             return CONST0_RTX (mode);
4435 #ifdef FLOAT_STORE_FLAG_VALUE
4436 	  {
4437 	    REAL_VALUE_TYPE val;
4438 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4439 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4440 	  }
4441 #else
4442 	  return NULL_RTX;
4443 #endif
4444 	}
4445       if (VECTOR_MODE_P (mode))
4446 	{
4447 	  if (tem == const0_rtx)
4448 	    return CONST0_RTX (mode);
4449 #ifdef VECTOR_STORE_FLAG_VALUE
4450 	  {
4451 	    int i, units;
4452 	    rtvec v;
4453 
4454 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4455 	    if (val == NULL_RTX)
4456 	      return NULL_RTX;
4457 	    if (val == const1_rtx)
4458 	      return CONST1_RTX (mode);
4459 
4460 	    units = GET_MODE_NUNITS (mode);
4461 	    v = rtvec_alloc (units);
4462 	    for (i = 0; i < units; i++)
4463 	      RTVEC_ELT (v, i) = val;
4464 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4465 	  }
4466 #else
4467 	  return NULL_RTX;
4468 #endif
4469 	}
4470 
4471       return tem;
4472     }
4473 
4474   /* For the following tests, ensure const0_rtx is op1.  */
4475   if (swap_commutative_operands_p (op0, op1)
4476       || (op0 == const0_rtx && op1 != const0_rtx))
4477     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4478 
4479   /* If op0 is a compare, extract the comparison arguments from it.  */
4480   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4481     return simplify_gen_relational (code, mode, VOIDmode,
4482 				    XEXP (op0, 0), XEXP (op0, 1));
4483 
4484   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4485       || CC0_P (op0))
4486     return NULL_RTX;
4487 
4488   trueop0 = avoid_constant_pool_reference (op0);
4489   trueop1 = avoid_constant_pool_reference (op1);
4490   return simplify_relational_operation_1 (code, mode, cmp_mode,
4491 		  			  trueop0, trueop1);
4492 }
4493 
4494 /* This part of simplify_relational_operation is only used when CMP_MODE
4495    is not in class MODE_CC (i.e. it is a real comparison).
4496 
4497    MODE is the mode of the result, while CMP_MODE specifies in which
4498    mode the comparison is done in, so it is the mode of the operands.  */
4499 
4500 static rtx
4501 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4502 				 machine_mode cmp_mode, rtx op0, rtx op1)
4503 {
4504   enum rtx_code op0code = GET_CODE (op0);
4505 
4506   if (op1 == const0_rtx && COMPARISON_P (op0))
4507     {
4508       /* If op0 is a comparison, extract the comparison arguments
4509          from it.  */
4510       if (code == NE)
4511 	{
4512 	  if (GET_MODE (op0) == mode)
4513 	    return simplify_rtx (op0);
4514 	  else
4515 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4516 					    XEXP (op0, 0), XEXP (op0, 1));
4517 	}
4518       else if (code == EQ)
4519 	{
4520 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4521 	  if (new_code != UNKNOWN)
4522 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4523 					    XEXP (op0, 0), XEXP (op0, 1));
4524 	}
4525     }
4526 
4527   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4528      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4529   if ((code == LTU || code == GEU)
4530       && GET_CODE (op0) == PLUS
4531       && CONST_INT_P (XEXP (op0, 1))
4532       && (rtx_equal_p (op1, XEXP (op0, 0))
4533 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4534       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4535       && XEXP (op0, 1) != const0_rtx)
4536     {
4537       rtx new_cmp
4538 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4539       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4540 				      cmp_mode, XEXP (op0, 0), new_cmp);
4541     }
4542 
4543   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4544   if ((code == LTU || code == GEU)
4545       && GET_CODE (op0) == PLUS
4546       && rtx_equal_p (op1, XEXP (op0, 1))
4547       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4548       && !rtx_equal_p (op1, XEXP (op0, 0)))
4549     return simplify_gen_relational (code, mode, cmp_mode, op0,
4550 				    copy_rtx (XEXP (op0, 0)));
4551 
4552   if (op1 == const0_rtx)
4553     {
4554       /* Canonicalize (GTU x 0) as (NE x 0).  */
4555       if (code == GTU)
4556         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4557       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4558       if (code == LEU)
4559         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4560     }
4561   else if (op1 == const1_rtx)
4562     {
4563       switch (code)
4564         {
4565         case GE:
4566 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4567 	  return simplify_gen_relational (GT, mode, cmp_mode,
4568 					  op0, const0_rtx);
4569 	case GEU:
4570 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4571 	  return simplify_gen_relational (NE, mode, cmp_mode,
4572 					  op0, const0_rtx);
4573 	case LT:
4574 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4575 	  return simplify_gen_relational (LE, mode, cmp_mode,
4576 					  op0, const0_rtx);
4577 	case LTU:
4578 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4579 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4580 					  op0, const0_rtx);
4581 	default:
4582 	  break;
4583 	}
4584     }
4585   else if (op1 == constm1_rtx)
4586     {
4587       /* Canonicalize (LE x -1) as (LT x 0).  */
4588       if (code == LE)
4589         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4590       /* Canonicalize (GT x -1) as (GE x 0).  */
4591       if (code == GT)
4592         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4593     }
4594 
4595   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4596   if ((code == EQ || code == NE)
4597       && (op0code == PLUS || op0code == MINUS)
4598       && CONSTANT_P (op1)
4599       && CONSTANT_P (XEXP (op0, 1))
4600       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4601     {
4602       rtx x = XEXP (op0, 0);
4603       rtx c = XEXP (op0, 1);
4604       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4605       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4606 
4607       /* Detect an infinite recursive condition, where we oscillate at this
4608 	 simplification case between:
4609 	    A + B == C  <--->  C - B == A,
4610 	 where A, B, and C are all constants with non-simplifiable expressions,
4611 	 usually SYMBOL_REFs.  */
4612       if (GET_CODE (tem) == invcode
4613 	  && CONSTANT_P (x)
4614 	  && rtx_equal_p (c, XEXP (tem, 1)))
4615 	return NULL_RTX;
4616 
4617       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4618     }
4619 
4620   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4621      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4622   if (code == NE
4623       && op1 == const0_rtx
4624       && GET_MODE_CLASS (mode) == MODE_INT
4625       && cmp_mode != VOIDmode
4626       /* ??? Work-around BImode bugs in the ia64 backend.  */
4627       && mode != BImode
4628       && cmp_mode != BImode
4629       && nonzero_bits (op0, cmp_mode) == 1
4630       && STORE_FLAG_VALUE == 1)
4631     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4632 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4633 	   : lowpart_subreg (mode, op0, cmp_mode);
4634 
4635   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4636   if ((code == EQ || code == NE)
4637       && op1 == const0_rtx
4638       && op0code == XOR)
4639     return simplify_gen_relational (code, mode, cmp_mode,
4640 				    XEXP (op0, 0), XEXP (op0, 1));
4641 
4642   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4643   if ((code == EQ || code == NE)
4644       && op0code == XOR
4645       && rtx_equal_p (XEXP (op0, 0), op1)
4646       && !side_effects_p (XEXP (op0, 0)))
4647     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4648 				    CONST0_RTX (mode));
4649 
4650   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4651   if ((code == EQ || code == NE)
4652       && op0code == XOR
4653       && rtx_equal_p (XEXP (op0, 1), op1)
4654       && !side_effects_p (XEXP (op0, 1)))
4655     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4656 				    CONST0_RTX (mode));
4657 
4658   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4659   if ((code == EQ || code == NE)
4660       && op0code == XOR
4661       && CONST_SCALAR_INT_P (op1)
4662       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4663     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4664 				    simplify_gen_binary (XOR, cmp_mode,
4665 							 XEXP (op0, 1), op1));
4666 
4667   /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4668      can be implemented with a BICS instruction on some targets, or
4669      constant-folded if y is a constant.  */
4670   if ((code == EQ || code == NE)
4671       && op0code == AND
4672       && rtx_equal_p (XEXP (op0, 0), op1)
4673       && !side_effects_p (op1)
4674       && op1 != CONST0_RTX (cmp_mode))
4675     {
4676       rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4677       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4678 
4679       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4680 				      CONST0_RTX (cmp_mode));
4681     }
4682 
4683   /* Likewise for (eq/ne (and x y) y).  */
4684   if ((code == EQ || code == NE)
4685       && op0code == AND
4686       && rtx_equal_p (XEXP (op0, 1), op1)
4687       && !side_effects_p (op1)
4688       && op1 != CONST0_RTX (cmp_mode))
4689     {
4690       rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4691       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4692 
4693       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4694 				      CONST0_RTX (cmp_mode));
4695     }
4696 
4697   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
4698   if ((code == EQ || code == NE)
4699       && GET_CODE (op0) == BSWAP
4700       && CONST_SCALAR_INT_P (op1))
4701     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4702 				    simplify_gen_unary (BSWAP, cmp_mode,
4703 							op1, cmp_mode));
4704 
4705   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
4706   if ((code == EQ || code == NE)
4707       && GET_CODE (op0) == BSWAP
4708       && GET_CODE (op1) == BSWAP)
4709     return simplify_gen_relational (code, mode, cmp_mode,
4710 				    XEXP (op0, 0), XEXP (op1, 0));
4711 
4712   if (op0code == POPCOUNT && op1 == const0_rtx)
4713     switch (code)
4714       {
4715       case EQ:
4716       case LE:
4717       case LEU:
4718 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4719 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4720 					XEXP (op0, 0), const0_rtx);
4721 
4722       case NE:
4723       case GT:
4724       case GTU:
4725 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4726 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4727 					XEXP (op0, 0), const0_rtx);
4728 
4729       default:
4730 	break;
4731       }
4732 
4733   return NULL_RTX;
4734 }
4735 
4736 enum
4737 {
4738   CMP_EQ = 1,
4739   CMP_LT = 2,
4740   CMP_GT = 4,
4741   CMP_LTU = 8,
4742   CMP_GTU = 16
4743 };
4744 
4745 
4746 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4747    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4748    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4749    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4750    For floating-point comparisons, assume that the operands were ordered.  */
4751 
4752 static rtx
4753 comparison_result (enum rtx_code code, int known_results)
4754 {
4755   switch (code)
4756     {
4757     case EQ:
4758     case UNEQ:
4759       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4760     case NE:
4761     case LTGT:
4762       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4763 
4764     case LT:
4765     case UNLT:
4766       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4767     case GE:
4768     case UNGE:
4769       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4770 
4771     case GT:
4772     case UNGT:
4773       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4774     case LE:
4775     case UNLE:
4776       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4777 
4778     case LTU:
4779       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4780     case GEU:
4781       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4782 
4783     case GTU:
4784       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4785     case LEU:
4786       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4787 
4788     case ORDERED:
4789       return const_true_rtx;
4790     case UNORDERED:
4791       return const0_rtx;
4792     default:
4793       gcc_unreachable ();
4794     }
4795 }
4796 
4797 /* Check if the given comparison (done in the given MODE) is actually
4798    a tautology or a contradiction.  If the mode is VOID_mode, the
4799    comparison is done in "infinite precision".  If no simplification
4800    is possible, this function returns zero.  Otherwise, it returns
4801    either const_true_rtx or const0_rtx.  */
4802 
4803 rtx
4804 simplify_const_relational_operation (enum rtx_code code,
4805 				     machine_mode mode,
4806 				     rtx op0, rtx op1)
4807 {
4808   rtx tem;
4809   rtx trueop0;
4810   rtx trueop1;
4811 
4812   gcc_assert (mode != VOIDmode
4813 	      || (GET_MODE (op0) == VOIDmode
4814 		  && GET_MODE (op1) == VOIDmode));
4815 
4816   /* If op0 is a compare, extract the comparison arguments from it.  */
4817   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4818     {
4819       op1 = XEXP (op0, 1);
4820       op0 = XEXP (op0, 0);
4821 
4822       if (GET_MODE (op0) != VOIDmode)
4823 	mode = GET_MODE (op0);
4824       else if (GET_MODE (op1) != VOIDmode)
4825 	mode = GET_MODE (op1);
4826       else
4827 	return 0;
4828     }
4829 
4830   /* We can't simplify MODE_CC values since we don't know what the
4831      actual comparison is.  */
4832   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4833     return 0;
4834 
4835   /* Make sure the constant is second.  */
4836   if (swap_commutative_operands_p (op0, op1))
4837     {
4838       tem = op0, op0 = op1, op1 = tem;
4839       code = swap_condition (code);
4840     }
4841 
4842   trueop0 = avoid_constant_pool_reference (op0);
4843   trueop1 = avoid_constant_pool_reference (op1);
4844 
4845   /* For integer comparisons of A and B maybe we can simplify A - B and can
4846      then simplify a comparison of that with zero.  If A and B are both either
4847      a register or a CONST_INT, this can't help; testing for these cases will
4848      prevent infinite recursion here and speed things up.
4849 
4850      We can only do this for EQ and NE comparisons as otherwise we may
4851      lose or introduce overflow which we cannot disregard as undefined as
4852      we do not know the signedness of the operation on either the left or
4853      the right hand side of the comparison.  */
4854 
4855   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4856       && (code == EQ || code == NE)
4857       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4858 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4859       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4860       /* We cannot do this if tem is a nonzero address.  */
4861       && ! nonzero_address_p (tem))
4862     return simplify_const_relational_operation (signed_condition (code),
4863 						mode, tem, const0_rtx);
4864 
4865   if (! HONOR_NANS (mode) && code == ORDERED)
4866     return const_true_rtx;
4867 
4868   if (! HONOR_NANS (mode) && code == UNORDERED)
4869     return const0_rtx;
4870 
4871   /* For modes without NaNs, if the two operands are equal, we know the
4872      result except if they have side-effects.  Even with NaNs we know
4873      the result of unordered comparisons and, if signaling NaNs are
4874      irrelevant, also the result of LT/GT/LTGT.  */
4875   if ((! HONOR_NANS (trueop0)
4876        || code == UNEQ || code == UNLE || code == UNGE
4877        || ((code == LT || code == GT || code == LTGT)
4878 	   && ! HONOR_SNANS (trueop0)))
4879       && rtx_equal_p (trueop0, trueop1)
4880       && ! side_effects_p (trueop0))
4881     return comparison_result (code, CMP_EQ);
4882 
4883   /* If the operands are floating-point constants, see if we can fold
4884      the result.  */
4885   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4886       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4887       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4888     {
4889       REAL_VALUE_TYPE d0, d1;
4890 
4891       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4892       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4893 
4894       /* Comparisons are unordered iff at least one of the values is NaN.  */
4895       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4896 	switch (code)
4897 	  {
4898 	  case UNEQ:
4899 	  case UNLT:
4900 	  case UNGT:
4901 	  case UNLE:
4902 	  case UNGE:
4903 	  case NE:
4904 	  case UNORDERED:
4905 	    return const_true_rtx;
4906 	  case EQ:
4907 	  case LT:
4908 	  case GT:
4909 	  case LE:
4910 	  case GE:
4911 	  case LTGT:
4912 	  case ORDERED:
4913 	    return const0_rtx;
4914 	  default:
4915 	    return 0;
4916 	  }
4917 
4918       return comparison_result (code,
4919 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4920 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4921     }
4922 
4923   /* Otherwise, see if the operands are both integers.  */
4924   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4925       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4926     {
4927       /* It would be nice if we really had a mode here.  However, the
4928 	 largest int representable on the target is as good as
4929 	 infinite.  */
4930       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4931       rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4932       rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4933 
4934       if (wi::eq_p (ptrueop0, ptrueop1))
4935 	return comparison_result (code, CMP_EQ);
4936       else
4937 	{
4938 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4939 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4940 	  return comparison_result (code, cr);
4941 	}
4942     }
4943 
4944   /* Optimize comparisons with upper and lower bounds.  */
4945   if (HWI_COMPUTABLE_MODE_P (mode)
4946       && CONST_INT_P (trueop1)
4947       && !side_effects_p (trueop0))
4948     {
4949       int sign;
4950       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4951       HOST_WIDE_INT val = INTVAL (trueop1);
4952       HOST_WIDE_INT mmin, mmax;
4953 
4954       if (code == GEU
4955 	  || code == LEU
4956 	  || code == GTU
4957 	  || code == LTU)
4958 	sign = 0;
4959       else
4960 	sign = 1;
4961 
4962       /* Get a reduced range if the sign bit is zero.  */
4963       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4964 	{
4965 	  mmin = 0;
4966 	  mmax = nonzero;
4967 	}
4968       else
4969 	{
4970 	  rtx mmin_rtx, mmax_rtx;
4971 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4972 
4973 	  mmin = INTVAL (mmin_rtx);
4974 	  mmax = INTVAL (mmax_rtx);
4975 	  if (sign)
4976 	    {
4977 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4978 
4979 	      mmin >>= (sign_copies - 1);
4980 	      mmax >>= (sign_copies - 1);
4981 	    }
4982 	}
4983 
4984       switch (code)
4985 	{
4986 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
4987 	case GEU:
4988 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4989 	    return const_true_rtx;
4990 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4991 	    return const0_rtx;
4992 	  break;
4993 	case GE:
4994 	  if (val <= mmin)
4995 	    return const_true_rtx;
4996 	  if (val > mmax)
4997 	    return const0_rtx;
4998 	  break;
4999 
5000 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5001 	case LEU:
5002 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5003 	    return const_true_rtx;
5004 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5005 	    return const0_rtx;
5006 	  break;
5007 	case LE:
5008 	  if (val >= mmax)
5009 	    return const_true_rtx;
5010 	  if (val < mmin)
5011 	    return const0_rtx;
5012 	  break;
5013 
5014 	case EQ:
5015 	  /* x == y is always false for y out of range.  */
5016 	  if (val < mmin || val > mmax)
5017 	    return const0_rtx;
5018 	  break;
5019 
5020 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5021 	case GTU:
5022 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5023 	    return const0_rtx;
5024 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5025 	    return const_true_rtx;
5026 	  break;
5027 	case GT:
5028 	  if (val >= mmax)
5029 	    return const0_rtx;
5030 	  if (val < mmin)
5031 	    return const_true_rtx;
5032 	  break;
5033 
5034 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5035 	case LTU:
5036 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5037 	    return const0_rtx;
5038 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5039 	    return const_true_rtx;
5040 	  break;
5041 	case LT:
5042 	  if (val <= mmin)
5043 	    return const0_rtx;
5044 	  if (val > mmax)
5045 	    return const_true_rtx;
5046 	  break;
5047 
5048 	case NE:
5049 	  /* x != y is always true for y out of range.  */
5050 	  if (val < mmin || val > mmax)
5051 	    return const_true_rtx;
5052 	  break;
5053 
5054 	default:
5055 	  break;
5056 	}
5057     }
5058 
5059   /* Optimize integer comparisons with zero.  */
5060   if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5061     {
5062       /* Some addresses are known to be nonzero.  We don't know
5063 	 their sign, but equality comparisons are known.  */
5064       if (nonzero_address_p (trueop0))
5065 	{
5066 	  if (code == EQ || code == LEU)
5067 	    return const0_rtx;
5068 	  if (code == NE || code == GTU)
5069 	    return const_true_rtx;
5070 	}
5071 
5072       /* See if the first operand is an IOR with a constant.  If so, we
5073 	 may be able to determine the result of this comparison.  */
5074       if (GET_CODE (op0) == IOR)
5075 	{
5076 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5077 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5078 	    {
5079 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5080 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5081 			      && (UINTVAL (inner_const)
5082 				  & ((unsigned HOST_WIDE_INT) 1
5083 				     << sign_bitnum)));
5084 
5085 	      switch (code)
5086 		{
5087 		case EQ:
5088 		case LEU:
5089 		  return const0_rtx;
5090 		case NE:
5091 		case GTU:
5092 		  return const_true_rtx;
5093 		case LT:
5094 		case LE:
5095 		  if (has_sign)
5096 		    return const_true_rtx;
5097 		  break;
5098 		case GT:
5099 		case GE:
5100 		  if (has_sign)
5101 		    return const0_rtx;
5102 		  break;
5103 		default:
5104 		  break;
5105 		}
5106 	    }
5107 	}
5108     }
5109 
5110   /* Optimize comparison of ABS with zero.  */
5111   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5112       && (GET_CODE (trueop0) == ABS
5113 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5114 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5115     {
5116       switch (code)
5117 	{
5118 	case LT:
5119 	  /* Optimize abs(x) < 0.0.  */
5120 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5121 	    return const0_rtx;
5122 	  break;
5123 
5124 	case GE:
5125 	  /* Optimize abs(x) >= 0.0.  */
5126 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5127 	    return const_true_rtx;
5128 	  break;
5129 
5130 	case UNGE:
5131 	  /* Optimize ! (abs(x) < 0.0).  */
5132 	  return const_true_rtx;
5133 
5134 	default:
5135 	  break;
5136 	}
5137     }
5138 
5139   return 0;
5140 }
5141 
5142 /* Simplify CODE, an operation with result mode MODE and three operands,
5143    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5144    a constant.  Return 0 if no simplifications is possible.  */
5145 
5146 rtx
5147 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5148 			    machine_mode op0_mode, rtx op0, rtx op1,
5149 			    rtx op2)
5150 {
5151   unsigned int width = GET_MODE_PRECISION (mode);
5152   bool any_change = false;
5153   rtx tem, trueop2;
5154 
5155   /* VOIDmode means "infinite" precision.  */
5156   if (width == 0)
5157     width = HOST_BITS_PER_WIDE_INT;
5158 
5159   switch (code)
5160     {
5161     case FMA:
5162       /* Simplify negations around the multiplication.  */
5163       /* -a * -b + c  =>  a * b + c.  */
5164       if (GET_CODE (op0) == NEG)
5165 	{
5166 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5167 	  if (tem)
5168 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5169 	}
5170       else if (GET_CODE (op1) == NEG)
5171 	{
5172 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5173 	  if (tem)
5174 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5175 	}
5176 
5177       /* Canonicalize the two multiplication operands.  */
5178       /* a * -b + c  =>  -b * a + c.  */
5179       if (swap_commutative_operands_p (op0, op1))
5180 	tem = op0, op0 = op1, op1 = tem, any_change = true;
5181 
5182       if (any_change)
5183 	return gen_rtx_FMA (mode, op0, op1, op2);
5184       return NULL_RTX;
5185 
5186     case SIGN_EXTRACT:
5187     case ZERO_EXTRACT:
5188       if (CONST_INT_P (op0)
5189 	  && CONST_INT_P (op1)
5190 	  && CONST_INT_P (op2)
5191 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5192 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5193 	{
5194 	  /* Extracting a bit-field from a constant */
5195 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5196 	  HOST_WIDE_INT op1val = INTVAL (op1);
5197 	  HOST_WIDE_INT op2val = INTVAL (op2);
5198 	  if (BITS_BIG_ENDIAN)
5199 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5200 	  else
5201 	    val >>= op2val;
5202 
5203 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5204 	    {
5205 	      /* First zero-extend.  */
5206 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5207 	      /* If desired, propagate sign bit.  */
5208 	      if (code == SIGN_EXTRACT
5209 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5210 		     != 0)
5211 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5212 	    }
5213 
5214 	  return gen_int_mode (val, mode);
5215 	}
5216       break;
5217 
5218     case IF_THEN_ELSE:
5219       if (CONST_INT_P (op0))
5220 	return op0 != const0_rtx ? op1 : op2;
5221 
5222       /* Convert c ? a : a into "a".  */
5223       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5224 	return op1;
5225 
5226       /* Convert a != b ? a : b into "a".  */
5227       if (GET_CODE (op0) == NE
5228 	  && ! side_effects_p (op0)
5229 	  && ! HONOR_NANS (mode)
5230 	  && ! HONOR_SIGNED_ZEROS (mode)
5231 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5232 	       && rtx_equal_p (XEXP (op0, 1), op2))
5233 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5234 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5235 	return op1;
5236 
5237       /* Convert a == b ? a : b into "b".  */
5238       if (GET_CODE (op0) == EQ
5239 	  && ! side_effects_p (op0)
5240 	  && ! HONOR_NANS (mode)
5241 	  && ! HONOR_SIGNED_ZEROS (mode)
5242 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5243 	       && rtx_equal_p (XEXP (op0, 1), op2))
5244 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5245 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5246 	return op2;
5247 
5248       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5249 	{
5250 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5251 					? GET_MODE (XEXP (op0, 1))
5252 					: GET_MODE (XEXP (op0, 0)));
5253 	  rtx temp;
5254 
5255 	  /* Look for happy constants in op1 and op2.  */
5256 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5257 	    {
5258 	      HOST_WIDE_INT t = INTVAL (op1);
5259 	      HOST_WIDE_INT f = INTVAL (op2);
5260 
5261 	      if (t == STORE_FLAG_VALUE && f == 0)
5262 	        code = GET_CODE (op0);
5263 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5264 		{
5265 		  enum rtx_code tmp;
5266 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5267 		  if (tmp == UNKNOWN)
5268 		    break;
5269 		  code = tmp;
5270 		}
5271 	      else
5272 		break;
5273 
5274 	      return simplify_gen_relational (code, mode, cmp_mode,
5275 					      XEXP (op0, 0), XEXP (op0, 1));
5276 	    }
5277 
5278 	  if (cmp_mode == VOIDmode)
5279 	    cmp_mode = op0_mode;
5280 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5281 			  			cmp_mode, XEXP (op0, 0),
5282 						XEXP (op0, 1));
5283 
5284 	  /* See if any simplifications were possible.  */
5285 	  if (temp)
5286 	    {
5287 	      if (CONST_INT_P (temp))
5288 		return temp == const0_rtx ? op2 : op1;
5289 	      else if (temp)
5290 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5291 	    }
5292 	}
5293       break;
5294 
5295     case VEC_MERGE:
5296       gcc_assert (GET_MODE (op0) == mode);
5297       gcc_assert (GET_MODE (op1) == mode);
5298       gcc_assert (VECTOR_MODE_P (mode));
5299       trueop2 = avoid_constant_pool_reference (op2);
5300       if (CONST_INT_P (trueop2))
5301 	{
5302 	  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5303 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5304 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5305 	  unsigned HOST_WIDE_INT mask;
5306 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
5307 	    mask = -1;
5308 	  else
5309 	    mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5310 
5311 	  if (!(sel & mask) && !side_effects_p (op0))
5312 	    return op1;
5313 	  if ((sel & mask) == mask && !side_effects_p (op1))
5314 	    return op0;
5315 
5316 	  rtx trueop0 = avoid_constant_pool_reference (op0);
5317 	  rtx trueop1 = avoid_constant_pool_reference (op1);
5318 	  if (GET_CODE (trueop0) == CONST_VECTOR
5319 	      && GET_CODE (trueop1) == CONST_VECTOR)
5320 	    {
5321 	      rtvec v = rtvec_alloc (n_elts);
5322 	      unsigned int i;
5323 
5324 	      for (i = 0; i < n_elts; i++)
5325 		RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5326 				    ? CONST_VECTOR_ELT (trueop0, i)
5327 				    : CONST_VECTOR_ELT (trueop1, i));
5328 	      return gen_rtx_CONST_VECTOR (mode, v);
5329 	    }
5330 
5331 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5332 	     if no element from a appears in the result.  */
5333 	  if (GET_CODE (op0) == VEC_MERGE)
5334 	    {
5335 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
5336 	      if (CONST_INT_P (tem))
5337 		{
5338 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5339 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5340 		    return simplify_gen_ternary (code, mode, mode,
5341 						 XEXP (op0, 1), op1, op2);
5342 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5343 		    return simplify_gen_ternary (code, mode, mode,
5344 						 XEXP (op0, 0), op1, op2);
5345 		}
5346 	    }
5347 	  if (GET_CODE (op1) == VEC_MERGE)
5348 	    {
5349 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
5350 	      if (CONST_INT_P (tem))
5351 		{
5352 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5353 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5354 		    return simplify_gen_ternary (code, mode, mode,
5355 						 op0, XEXP (op1, 1), op2);
5356 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5357 		    return simplify_gen_ternary (code, mode, mode,
5358 						 op0, XEXP (op1, 0), op2);
5359 		}
5360 	    }
5361 
5362 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5363 	     with a.  */
5364 	  if (GET_CODE (op0) == VEC_DUPLICATE
5365 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5366 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5367 	      && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5368 	    {
5369 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5370 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
5371 		{
5372 		  if (XEXP (XEXP (op0, 0), 0) == op1
5373 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5374 		    return op1;
5375 		}
5376 	    }
5377 	}
5378 
5379       if (rtx_equal_p (op0, op1)
5380 	  && !side_effects_p (op2) && !side_effects_p (op1))
5381 	return op0;
5382 
5383       break;
5384 
5385     default:
5386       gcc_unreachable ();
5387     }
5388 
5389   return 0;
5390 }
5391 
5392 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5393    or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5394    CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5395 
5396    Works by unpacking OP into a collection of 8-bit values
5397    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5398    and then repacking them again for OUTERMODE.  */
5399 
5400 static rtx
5401 simplify_immed_subreg (machine_mode outermode, rtx op,
5402 		       machine_mode innermode, unsigned int byte)
5403 {
5404   enum {
5405     value_bit = 8,
5406     value_mask = (1 << value_bit) - 1
5407   };
5408   unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5409   int value_start;
5410   int i;
5411   int elem;
5412 
5413   int num_elem;
5414   rtx * elems;
5415   int elem_bitsize;
5416   rtx result_s;
5417   rtvec result_v = NULL;
5418   enum mode_class outer_class;
5419   machine_mode outer_submode;
5420   int max_bitsize;
5421 
5422   /* Some ports misuse CCmode.  */
5423   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5424     return op;
5425 
5426   /* We have no way to represent a complex constant at the rtl level.  */
5427   if (COMPLEX_MODE_P (outermode))
5428     return NULL_RTX;
5429 
5430   /* We support any size mode.  */
5431   max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5432 		     GET_MODE_BITSIZE (innermode));
5433 
5434   /* Unpack the value.  */
5435 
5436   if (GET_CODE (op) == CONST_VECTOR)
5437     {
5438       num_elem = CONST_VECTOR_NUNITS (op);
5439       elems = &CONST_VECTOR_ELT (op, 0);
5440       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5441     }
5442   else
5443     {
5444       num_elem = 1;
5445       elems = &op;
5446       elem_bitsize = max_bitsize;
5447     }
5448   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5449   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5450   /* I don't know how to handle endianness of sub-units.  */
5451   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5452 
5453   for (elem = 0; elem < num_elem; elem++)
5454     {
5455       unsigned char * vp;
5456       rtx el = elems[elem];
5457 
5458       /* Vectors are kept in target memory order.  (This is probably
5459 	 a mistake.)  */
5460       {
5461 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5462 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5463 			  / BITS_PER_UNIT);
5464 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5465 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5466 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5467 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5468 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5469       }
5470 
5471       switch (GET_CODE (el))
5472 	{
5473 	case CONST_INT:
5474 	  for (i = 0;
5475 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5476 	       i += value_bit)
5477 	    *vp++ = INTVAL (el) >> i;
5478 	  /* CONST_INTs are always logically sign-extended.  */
5479 	  for (; i < elem_bitsize; i += value_bit)
5480 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5481 	  break;
5482 
5483 	case CONST_WIDE_INT:
5484 	  {
5485 	    rtx_mode_t val = std::make_pair (el, innermode);
5486 	    unsigned char extend = wi::sign_mask (val);
5487 
5488 	    for (i = 0; i < elem_bitsize; i += value_bit)
5489 	      *vp++ = wi::extract_uhwi (val, i, value_bit);
5490 	    for (; i < elem_bitsize; i += value_bit)
5491 	      *vp++ = extend;
5492 	  }
5493 	  break;
5494 
5495 	case CONST_DOUBLE:
5496 	  if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5497 	    {
5498 	      unsigned char extend = 0;
5499 	      /* If this triggers, someone should have generated a
5500 		 CONST_INT instead.  */
5501 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5502 
5503 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5504 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5505 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5506 		{
5507 		  *vp++
5508 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5509 		  i += value_bit;
5510 		}
5511 
5512 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5513 		extend = -1;
5514 	      for (; i < elem_bitsize; i += value_bit)
5515 		*vp++ = extend;
5516 	    }
5517 	  else
5518 	    {
5519 	      /* This is big enough for anything on the platform.  */
5520 	      long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5521 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5522 
5523 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5524 	      gcc_assert (bitsize <= elem_bitsize);
5525 	      gcc_assert (bitsize % value_bit == 0);
5526 
5527 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5528 			      GET_MODE (el));
5529 
5530 	      /* real_to_target produces its result in words affected by
5531 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5532 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5533 	         of SUBREG in rtl.texi.  */
5534 	      for (i = 0; i < bitsize; i += value_bit)
5535 		{
5536 		  int ibase;
5537 		  if (WORDS_BIG_ENDIAN)
5538 		    ibase = bitsize - 1 - i;
5539 		  else
5540 		    ibase = i;
5541 		  *vp++ = tmp[ibase / 32] >> i % 32;
5542 		}
5543 
5544 	      /* It shouldn't matter what's done here, so fill it with
5545 		 zero.  */
5546 	      for (; i < elem_bitsize; i += value_bit)
5547 		*vp++ = 0;
5548 	    }
5549 	  break;
5550 
5551         case CONST_FIXED:
5552 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5553 	    {
5554 	      for (i = 0; i < elem_bitsize; i += value_bit)
5555 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5556 	    }
5557 	  else
5558 	    {
5559 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5560 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5561               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5562 		   i += value_bit)
5563 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5564 			>> (i - HOST_BITS_PER_WIDE_INT);
5565 	      for (; i < elem_bitsize; i += value_bit)
5566 		*vp++ = 0;
5567 	    }
5568           break;
5569 
5570 	default:
5571 	  gcc_unreachable ();
5572 	}
5573     }
5574 
5575   /* Now, pick the right byte to start with.  */
5576   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5577      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5578      will already have offset 0.  */
5579   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5580     {
5581       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5582 			- byte);
5583       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5584       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5585       byte = (subword_byte % UNITS_PER_WORD
5586 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5587     }
5588 
5589   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5590      so if it's become negative it will instead be very large.)  */
5591   gcc_assert (byte < GET_MODE_SIZE (innermode));
5592 
5593   /* Convert from bytes to chunks of size value_bit.  */
5594   value_start = byte * (BITS_PER_UNIT / value_bit);
5595 
5596   /* Re-pack the value.  */
5597 
5598   if (VECTOR_MODE_P (outermode))
5599     {
5600       num_elem = GET_MODE_NUNITS (outermode);
5601       result_v = rtvec_alloc (num_elem);
5602       elems = &RTVEC_ELT (result_v, 0);
5603       outer_submode = GET_MODE_INNER (outermode);
5604     }
5605   else
5606     {
5607       num_elem = 1;
5608       elems = &result_s;
5609       outer_submode = outermode;
5610     }
5611 
5612   outer_class = GET_MODE_CLASS (outer_submode);
5613   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5614 
5615   gcc_assert (elem_bitsize % value_bit == 0);
5616   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5617 
5618   for (elem = 0; elem < num_elem; elem++)
5619     {
5620       unsigned char *vp;
5621 
5622       /* Vectors are stored in target memory order.  (This is probably
5623 	 a mistake.)  */
5624       {
5625 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5626 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5627 			  / BITS_PER_UNIT);
5628 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5629 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5630 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5631 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5632 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5633       }
5634 
5635       switch (outer_class)
5636 	{
5637 	case MODE_INT:
5638 	case MODE_PARTIAL_INT:
5639 	  {
5640 	    int u;
5641 	    int base = 0;
5642 	    int units
5643 	      = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5644 	      / HOST_BITS_PER_WIDE_INT;
5645 	    HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5646 	    wide_int r;
5647 
5648 	    if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5649 	      return NULL_RTX;
5650 	    for (u = 0; u < units; u++)
5651 	      {
5652 		unsigned HOST_WIDE_INT buf = 0;
5653 		for (i = 0;
5654 		     i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5655 		     i += value_bit)
5656 		  buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5657 
5658 		tmp[u] = buf;
5659 		base += HOST_BITS_PER_WIDE_INT;
5660 	      }
5661 	    r = wide_int::from_array (tmp, units,
5662 				      GET_MODE_PRECISION (outer_submode));
5663 #if TARGET_SUPPORTS_WIDE_INT == 0
5664 	    /* Make sure r will fit into CONST_INT or CONST_DOUBLE.  */
5665 	    if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5666 	      return NULL_RTX;
5667 #endif
5668 	    elems[elem] = immed_wide_int_const (r, outer_submode);
5669 	  }
5670 	  break;
5671 
5672 	case MODE_FLOAT:
5673 	case MODE_DECIMAL_FLOAT:
5674 	  {
5675 	    REAL_VALUE_TYPE r;
5676 	    long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5677 
5678 	    /* real_from_target wants its input in words affected by
5679 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5680 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5681 	       of SUBREG in rtl.texi.  */
5682 	    for (i = 0; i < max_bitsize / 32; i++)
5683 	      tmp[i] = 0;
5684 	    for (i = 0; i < elem_bitsize; i += value_bit)
5685 	      {
5686 		int ibase;
5687 		if (WORDS_BIG_ENDIAN)
5688 		  ibase = elem_bitsize - 1 - i;
5689 		else
5690 		  ibase = i;
5691 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5692 	      }
5693 
5694 	    real_from_target (&r, tmp, outer_submode);
5695 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5696 	  }
5697 	  break;
5698 
5699 	case MODE_FRACT:
5700 	case MODE_UFRACT:
5701 	case MODE_ACCUM:
5702 	case MODE_UACCUM:
5703 	  {
5704 	    FIXED_VALUE_TYPE f;
5705 	    f.data.low = 0;
5706 	    f.data.high = 0;
5707 	    f.mode = outer_submode;
5708 
5709 	    for (i = 0;
5710 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5711 		 i += value_bit)
5712 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5713 	    for (; i < elem_bitsize; i += value_bit)
5714 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5715 			     << (i - HOST_BITS_PER_WIDE_INT));
5716 
5717 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5718           }
5719           break;
5720 
5721 	default:
5722 	  gcc_unreachable ();
5723 	}
5724     }
5725   if (VECTOR_MODE_P (outermode))
5726     return gen_rtx_CONST_VECTOR (outermode, result_v);
5727   else
5728     return result_s;
5729 }
5730 
5731 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5732    Return 0 if no simplifications are possible.  */
5733 rtx
5734 simplify_subreg (machine_mode outermode, rtx op,
5735 		 machine_mode innermode, unsigned int byte)
5736 {
5737   /* Little bit of sanity checking.  */
5738   gcc_assert (innermode != VOIDmode);
5739   gcc_assert (outermode != VOIDmode);
5740   gcc_assert (innermode != BLKmode);
5741   gcc_assert (outermode != BLKmode);
5742 
5743   gcc_assert (GET_MODE (op) == innermode
5744 	      || GET_MODE (op) == VOIDmode);
5745 
5746   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5747     return NULL_RTX;
5748 
5749   if (byte >= GET_MODE_SIZE (innermode))
5750     return NULL_RTX;
5751 
5752   if (outermode == innermode && !byte)
5753     return op;
5754 
5755   if (CONST_SCALAR_INT_P (op)
5756       || CONST_DOUBLE_AS_FLOAT_P (op)
5757       || GET_CODE (op) == CONST_FIXED
5758       || GET_CODE (op) == CONST_VECTOR)
5759     return simplify_immed_subreg (outermode, op, innermode, byte);
5760 
5761   /* Changing mode twice with SUBREG => just change it once,
5762      or not at all if changing back op starting mode.  */
5763   if (GET_CODE (op) == SUBREG)
5764     {
5765       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5766       int final_offset = byte + SUBREG_BYTE (op);
5767       rtx newx;
5768 
5769       if (outermode == innermostmode
5770 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5771 	return SUBREG_REG (op);
5772 
5773       /* The SUBREG_BYTE represents offset, as if the value were stored
5774 	 in memory.  Irritating exception is paradoxical subreg, where
5775 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5776 	 value should be negative.  For a moment, undo this exception.  */
5777       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5778 	{
5779 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5780 	  if (WORDS_BIG_ENDIAN)
5781 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5782 	  if (BYTES_BIG_ENDIAN)
5783 	    final_offset += difference % UNITS_PER_WORD;
5784 	}
5785       if (SUBREG_BYTE (op) == 0
5786 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5787 	{
5788 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5789 	  if (WORDS_BIG_ENDIAN)
5790 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5791 	  if (BYTES_BIG_ENDIAN)
5792 	    final_offset += difference % UNITS_PER_WORD;
5793 	}
5794 
5795       /* See whether resulting subreg will be paradoxical.  */
5796       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5797 	{
5798 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5799 	  if (final_offset < 0)
5800 	    return NULL_RTX;
5801 	  /* Bail out in case resulting subreg would be incorrect.  */
5802 	  if (final_offset % GET_MODE_SIZE (outermode)
5803 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5804 	    return NULL_RTX;
5805 	}
5806       else
5807 	{
5808 	  int offset = 0;
5809 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5810 
5811 	  /* In paradoxical subreg, see if we are still looking on lower part.
5812 	     If so, our SUBREG_BYTE will be 0.  */
5813 	  if (WORDS_BIG_ENDIAN)
5814 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5815 	  if (BYTES_BIG_ENDIAN)
5816 	    offset += difference % UNITS_PER_WORD;
5817 	  if (offset == final_offset)
5818 	    final_offset = 0;
5819 	  else
5820 	    return NULL_RTX;
5821 	}
5822 
5823       /* Recurse for further possible simplifications.  */
5824       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5825 			      final_offset);
5826       if (newx)
5827 	return newx;
5828       if (validate_subreg (outermode, innermostmode,
5829 			   SUBREG_REG (op), final_offset))
5830 	{
5831 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5832 	  if (SUBREG_PROMOTED_VAR_P (op)
5833 	      && SUBREG_PROMOTED_SIGN (op) >= 0
5834 	      && GET_MODE_CLASS (outermode) == MODE_INT
5835 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5836 			   GET_MODE_SIZE (innermode),
5837 			   GET_MODE_SIZE (innermostmode))
5838 	      && subreg_lowpart_p (newx))
5839 	    {
5840 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5841 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5842 	    }
5843 	  return newx;
5844 	}
5845       return NULL_RTX;
5846     }
5847 
5848   /* SUBREG of a hard register => just change the register number
5849      and/or mode.  If the hard register is not valid in that mode,
5850      suppress this simplification.  If the hard register is the stack,
5851      frame, or argument pointer, leave this as a SUBREG.  */
5852 
5853   if (REG_P (op) && HARD_REGISTER_P (op))
5854     {
5855       unsigned int regno, final_regno;
5856 
5857       regno = REGNO (op);
5858       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5859       if (HARD_REGISTER_NUM_P (final_regno))
5860 	{
5861 	  rtx x;
5862 	  int final_offset = byte;
5863 
5864 	  /* Adjust offset for paradoxical subregs.  */
5865 	  if (byte == 0
5866 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5867 	    {
5868 	      int difference = (GET_MODE_SIZE (innermode)
5869 				- GET_MODE_SIZE (outermode));
5870 	      if (WORDS_BIG_ENDIAN)
5871 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5872 	      if (BYTES_BIG_ENDIAN)
5873 		final_offset += difference % UNITS_PER_WORD;
5874 	    }
5875 
5876 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5877 
5878 	  /* Propagate original regno.  We don't have any way to specify
5879 	     the offset inside original regno, so do so only for lowpart.
5880 	     The information is used only by alias analysis that can not
5881 	     grog partial register anyway.  */
5882 
5883 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5884 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5885 	  return x;
5886 	}
5887     }
5888 
5889   /* If we have a SUBREG of a register that we are replacing and we are
5890      replacing it with a MEM, make a new MEM and try replacing the
5891      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5892      or if we would be widening it.  */
5893 
5894   if (MEM_P (op)
5895       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5896       /* Allow splitting of volatile memory references in case we don't
5897          have instruction to move the whole thing.  */
5898       && (! MEM_VOLATILE_P (op)
5899 	  || ! have_insn_for (SET, innermode))
5900       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5901     return adjust_address_nv (op, outermode, byte);
5902 
5903   /* Handle complex values represented as CONCAT
5904      of real and imaginary part.  */
5905   if (GET_CODE (op) == CONCAT)
5906     {
5907       unsigned int part_size, final_offset;
5908       rtx part, res;
5909 
5910       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5911       if (byte < part_size)
5912 	{
5913 	  part = XEXP (op, 0);
5914 	  final_offset = byte;
5915 	}
5916       else
5917 	{
5918 	  part = XEXP (op, 1);
5919 	  final_offset = byte - part_size;
5920 	}
5921 
5922       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5923 	return NULL_RTX;
5924 
5925       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5926       if (res)
5927 	return res;
5928       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5929 	return gen_rtx_SUBREG (outermode, part, final_offset);
5930       return NULL_RTX;
5931     }
5932 
5933   /* A SUBREG resulting from a zero extension may fold to zero if
5934      it extracts higher bits that the ZERO_EXTEND's source bits.  */
5935   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5936     {
5937       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5938       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5939 	return CONST0_RTX (outermode);
5940     }
5941 
5942   if (SCALAR_INT_MODE_P (outermode)
5943       && SCALAR_INT_MODE_P (innermode)
5944       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5945       && byte == subreg_lowpart_offset (outermode, innermode))
5946     {
5947       rtx tem = simplify_truncation (outermode, op, innermode);
5948       if (tem)
5949 	return tem;
5950     }
5951 
5952   return NULL_RTX;
5953 }
5954 
5955 /* Make a SUBREG operation or equivalent if it folds.  */
5956 
5957 rtx
5958 simplify_gen_subreg (machine_mode outermode, rtx op,
5959 		     machine_mode innermode, unsigned int byte)
5960 {
5961   rtx newx;
5962 
5963   newx = simplify_subreg (outermode, op, innermode, byte);
5964   if (newx)
5965     return newx;
5966 
5967   if (GET_CODE (op) == SUBREG
5968       || GET_CODE (op) == CONCAT
5969       || GET_MODE (op) == VOIDmode)
5970     return NULL_RTX;
5971 
5972   if (validate_subreg (outermode, innermode, op, byte))
5973     return gen_rtx_SUBREG (outermode, op, byte);
5974 
5975   return NULL_RTX;
5976 }
5977 
5978 /* Simplify X, an rtx expression.
5979 
5980    Return the simplified expression or NULL if no simplifications
5981    were possible.
5982 
5983    This is the preferred entry point into the simplification routines;
5984    however, we still allow passes to call the more specific routines.
5985 
5986    Right now GCC has three (yes, three) major bodies of RTL simplification
5987    code that need to be unified.
5988 
5989 	1. fold_rtx in cse.c.  This code uses various CSE specific
5990 	   information to aid in RTL simplification.
5991 
5992 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5993 	   it uses combine specific information to aid in RTL
5994 	   simplification.
5995 
5996 	3. The routines in this file.
5997 
5998 
5999    Long term we want to only have one body of simplification code; to
6000    get to that state I recommend the following steps:
6001 
6002 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
6003 	   which are not pass dependent state into these routines.
6004 
6005 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6006 	   use this routine whenever possible.
6007 
6008 	3. Allow for pass dependent state to be provided to these
6009 	   routines and add simplifications based on the pass dependent
6010 	   state.  Remove code from cse.c & combine.c that becomes
6011 	   redundant/dead.
6012 
6013     It will take time, but ultimately the compiler will be easier to
6014     maintain and improve.  It's totally silly that when we add a
6015     simplification that it needs to be added to 4 places (3 for RTL
6016     simplification and 1 for tree simplification.  */
6017 
6018 rtx
6019 simplify_rtx (const_rtx x)
6020 {
6021   const enum rtx_code code = GET_CODE (x);
6022   const machine_mode mode = GET_MODE (x);
6023 
6024   switch (GET_RTX_CLASS (code))
6025     {
6026     case RTX_UNARY:
6027       return simplify_unary_operation (code, mode,
6028 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6029     case RTX_COMM_ARITH:
6030       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6031 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6032 
6033       /* Fall through....  */
6034 
6035     case RTX_BIN_ARITH:
6036       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6037 
6038     case RTX_TERNARY:
6039     case RTX_BITFIELD_OPS:
6040       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6041 					 XEXP (x, 0), XEXP (x, 1),
6042 					 XEXP (x, 2));
6043 
6044     case RTX_COMPARE:
6045     case RTX_COMM_COMPARE:
6046       return simplify_relational_operation (code, mode,
6047                                             ((GET_MODE (XEXP (x, 0))
6048                                              != VOIDmode)
6049                                             ? GET_MODE (XEXP (x, 0))
6050                                             : GET_MODE (XEXP (x, 1))),
6051                                             XEXP (x, 0),
6052                                             XEXP (x, 1));
6053 
6054     case RTX_EXTRA:
6055       if (code == SUBREG)
6056 	return simplify_subreg (mode, SUBREG_REG (x),
6057 				GET_MODE (SUBREG_REG (x)),
6058 				SUBREG_BYTE (x));
6059       break;
6060 
6061     case RTX_OBJ:
6062       if (code == LO_SUM)
6063 	{
6064 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6065 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6066 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6067 	  return XEXP (x, 1);
6068 	}
6069       break;
6070 
6071     default:
6072       break;
6073     }
6074   return NULL;
6075 }
6076