xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/simplify-rtx.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4    Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42 
43 /* Simplification and canonicalization of RTL.  */
44 
45 /* Much code operates on (low, high) pairs; the low value is an
46    unsigned wide int, the high value a signed wide int.  We
47    occasionally need to sign extend from low to high as if low were a
48    signed wide int.  */
49 #define HWI_SIGN_EXTEND(low) \
50  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 				  unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 					   rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 					    enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 					rtx, rtx, rtx, rtx);
65 
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67    maximally negative number can overflow).  */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71   return gen_int_mode (- INTVAL (i), mode);
72 }
73 
74 /* Test whether expression, X, is an immediate constant that represents
75    the most significant bit of machine mode MODE.  */
76 
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80   unsigned HOST_WIDE_INT val;
81   unsigned int width;
82 
83   if (GET_MODE_CLASS (mode) != MODE_INT)
84     return false;
85 
86   width = GET_MODE_BITSIZE (mode);
87   if (width == 0)
88     return false;
89 
90   if (width <= HOST_BITS_PER_WIDE_INT
91       && CONST_INT_P (x))
92     val = INTVAL (x);
93   else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 	   && GET_CODE (x) == CONST_DOUBLE
95 	   && CONST_DOUBLE_LOW (x) == 0)
96     {
97       val = CONST_DOUBLE_HIGH (x);
98       width -= HOST_BITS_PER_WIDE_INT;
99     }
100   else
101     return false;
102 
103   if (width < HOST_BITS_PER_WIDE_INT)
104     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 
108 /* Make a binary operation by properly ordering the operands and
109    seeing if the expression folds.  */
110 
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 		     rtx op1)
114 {
115   rtx tem;
116 
117   /* If this simplifies, do it.  */
118   tem = simplify_binary_operation (code, mode, op0, op1);
119   if (tem)
120     return tem;
121 
122   /* Put complex operands first and constants second if commutative.  */
123   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124       && swap_commutative_operands_p (op0, op1))
125     tem = op0, op0 = op1, op1 = tem;
126 
127   return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 
130 /* If X is a MEM referencing the constant pool, return the real value.
131    Otherwise return X.  */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135   rtx c, tmp, addr;
136   enum machine_mode cmode;
137   HOST_WIDE_INT offset = 0;
138 
139   switch (GET_CODE (x))
140     {
141     case MEM:
142       break;
143 
144     case FLOAT_EXTEND:
145       /* Handle float extensions of constant pool references.  */
146       tmp = XEXP (x, 0);
147       c = avoid_constant_pool_reference (tmp);
148       if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 	{
150 	  REAL_VALUE_TYPE d;
151 
152 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 	}
155       return x;
156 
157     default:
158       return x;
159     }
160 
161   if (GET_MODE (x) == BLKmode)
162     return x;
163 
164   addr = XEXP (x, 0);
165 
166   /* Call target hook to avoid the effects of -fpic etc....  */
167   addr = targetm.delegitimize_address (addr);
168 
169   /* Split the address into a base and integer offset.  */
170   if (GET_CODE (addr) == CONST
171       && GET_CODE (XEXP (addr, 0)) == PLUS
172       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173     {
174       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175       addr = XEXP (XEXP (addr, 0), 0);
176     }
177 
178   if (GET_CODE (addr) == LO_SUM)
179     addr = XEXP (addr, 1);
180 
181   /* If this is a constant pool reference, we can turn it into its
182      constant and hope that simplifications happen.  */
183   if (GET_CODE (addr) == SYMBOL_REF
184       && CONSTANT_POOL_ADDRESS_P (addr))
185     {
186       c = get_pool_constant (addr);
187       cmode = get_pool_mode (addr);
188 
189       /* If we're accessing the constant in a different mode than it was
190          originally stored, attempt to fix that up via subreg simplifications.
191          If that fails we have no choice but to return the original memory.  */
192       if (offset != 0 || cmode != GET_MODE (x))
193         {
194           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195           if (tem && CONSTANT_P (tem))
196             return tem;
197         }
198       else
199         return c;
200     }
201 
202   return x;
203 }
204 
205 /* Simplify a MEM based on its attributes.  This is the default
206    delegitimize_address target hook, and it's recommended that every
207    overrider call it.  */
208 
209 rtx
210 delegitimize_mem_from_attrs (rtx x)
211 {
212   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
213      use their base addresses as equivalent.  */
214   if (MEM_P (x)
215       && MEM_EXPR (x)
216       && MEM_OFFSET (x))
217     {
218       tree decl = MEM_EXPR (x);
219       enum machine_mode mode = GET_MODE (x);
220       HOST_WIDE_INT offset = 0;
221 
222       switch (TREE_CODE (decl))
223 	{
224 	default:
225 	  decl = NULL;
226 	  break;
227 
228 	case VAR_DECL:
229 	  break;
230 
231 	case ARRAY_REF:
232 	case ARRAY_RANGE_REF:
233 	case COMPONENT_REF:
234 	case BIT_FIELD_REF:
235 	case REALPART_EXPR:
236 	case IMAGPART_EXPR:
237 	case VIEW_CONVERT_EXPR:
238 	  {
239 	    HOST_WIDE_INT bitsize, bitpos;
240 	    tree toffset;
241 	    int unsignedp = 0, volatilep = 0;
242 
243 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
244 					&mode, &unsignedp, &volatilep, false);
245 	    if (bitsize != GET_MODE_BITSIZE (mode)
246 		|| (bitpos % BITS_PER_UNIT)
247 		|| (toffset && !host_integerp (toffset, 0)))
248 	      decl = NULL;
249 	    else
250 	      {
251 		offset += bitpos / BITS_PER_UNIT;
252 		if (toffset)
253 		  offset += TREE_INT_CST_LOW (toffset);
254 	      }
255 	    break;
256 	  }
257 	}
258 
259       if (decl
260 	  && mode == GET_MODE (x)
261 	  && TREE_CODE (decl) == VAR_DECL
262 	  && (TREE_STATIC (decl)
263 	      || DECL_THREAD_LOCAL_P (decl))
264 	  && DECL_RTL_SET_P (decl)
265 	  && MEM_P (DECL_RTL (decl)))
266 	{
267 	  rtx newx;
268 
269 	  offset += INTVAL (MEM_OFFSET (x));
270 
271 	  newx = DECL_RTL (decl);
272 
273 	  if (MEM_P (newx))
274 	    {
275 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 
277 	      /* Avoid creating a new MEM needlessly if we already had
278 		 the same address.  We do if there's no OFFSET and the
279 		 old address X is identical to NEWX, or if X is of the
280 		 form (plus NEWX OFFSET), or the NEWX is of the form
281 		 (plus Y (const_int Z)) and X is that with the offset
282 		 added: (plus Y (const_int Z+OFFSET)).  */
283 	      if (!((offset == 0
284 		     || (GET_CODE (o) == PLUS
285 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 			 && (offset == INTVAL (XEXP (o, 1))
287 			     || (GET_CODE (n) == PLUS
288 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 				 && (INTVAL (XEXP (n, 1)) + offset
290 				     == INTVAL (XEXP (o, 1)))
291 				 && (n = XEXP (n, 0))))
292 			 && (o = XEXP (o, 0))))
293 		    && rtx_equal_p (o, n)))
294 		x = adjust_address_nv (newx, mode, offset);
295 	    }
296 	  else if (GET_MODE (x) == GET_MODE (newx)
297 		   && offset == 0)
298 	    x = newx;
299 	}
300     }
301 
302   return x;
303 }
304 
305 /* Make a unary operation by first seeing if it folds and otherwise making
306    the specified operation.  */
307 
308 rtx
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 		    enum machine_mode op_mode)
311 {
312   rtx tem;
313 
314   /* If this simplifies, use it.  */
315   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316     return tem;
317 
318   return gen_rtx_fmt_e (code, mode, op);
319 }
320 
321 /* Likewise for ternary operations.  */
322 
323 rtx
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 {
327   rtx tem;
328 
329   /* If this simplifies, use it.  */
330   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 					      op0, op1, op2)))
332     return tem;
333 
334   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
335 }
336 
337 /* Likewise, for relational operations.
338    CMP_MODE specifies mode comparison is done in.  */
339 
340 rtx
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 {
344   rtx tem;
345 
346   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 						 op0, op1)))
348     return tem;
349 
350   return gen_rtx_fmt_ee (code, mode, op0, op1);
351 }
352 
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354    and simplify the result.  If FN is non-NULL, call this callback on each
355    X, if it returns non-NULL, replace X with its return value and simplify the
356    result.  */
357 
358 rtx
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 {
362   enum rtx_code code = GET_CODE (x);
363   enum machine_mode mode = GET_MODE (x);
364   enum machine_mode op_mode;
365   const char *fmt;
366   rtx op0, op1, op2, newx, op;
367   rtvec vec, newvec;
368   int i, j;
369 
370   if (__builtin_expect (fn != NULL, 0))
371     {
372       newx = fn (x, old_rtx, data);
373       if (newx)
374 	return newx;
375     }
376   else if (rtx_equal_p (x, old_rtx))
377     return copy_rtx ((rtx) data);
378 
379   switch (GET_RTX_CLASS (code))
380     {
381     case RTX_UNARY:
382       op0 = XEXP (x, 0);
383       op_mode = GET_MODE (op0);
384       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385       if (op0 == XEXP (x, 0))
386 	return x;
387       return simplify_gen_unary (code, mode, op0, op_mode);
388 
389     case RTX_BIN_ARITH:
390     case RTX_COMM_ARITH:
391       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
394 	return x;
395       return simplify_gen_binary (code, mode, op0, op1);
396 
397     case RTX_COMPARE:
398     case RTX_COMM_COMPARE:
399       op0 = XEXP (x, 0);
400       op1 = XEXP (x, 1);
401       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
405 	return x;
406       return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 
408     case RTX_TERNARY:
409     case RTX_BITFIELD_OPS:
410       op0 = XEXP (x, 0);
411       op_mode = GET_MODE (op0);
412       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
416 	return x;
417       if (op_mode == VOIDmode)
418 	op_mode = GET_MODE (op0);
419       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 
421     case RTX_EXTRA:
422       if (code == SUBREG)
423 	{
424 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 	  if (op0 == SUBREG_REG (x))
426 	    return x;
427 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 				     GET_MODE (SUBREG_REG (x)),
429 				     SUBREG_BYTE (x));
430 	  return op0 ? op0 : x;
431 	}
432       break;
433 
434     case RTX_OBJ:
435       if (code == MEM)
436 	{
437 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 	  if (op0 == XEXP (x, 0))
439 	    return x;
440 	  return replace_equiv_address_nv (x, op0);
441 	}
442       else if (code == LO_SUM)
443 	{
444 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 
447 	  /* (lo_sum (high x) x) -> x  */
448 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
449 	    return op1;
450 
451 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 	    return x;
453 	  return gen_rtx_LO_SUM (mode, op0, op1);
454 	}
455       break;
456 
457     default:
458       break;
459     }
460 
461   newx = x;
462   fmt = GET_RTX_FORMAT (code);
463   for (i = 0; fmt[i]; i++)
464     switch (fmt[i])
465       {
466       case 'E':
467 	vec = XVEC (x, i);
468 	newvec = XVEC (newx, i);
469 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 	  {
471 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
472 					  old_rtx, fn, data);
473 	    if (op != RTVEC_ELT (vec, j))
474 	      {
475 		if (newvec == vec)
476 		  {
477 		    newvec = shallow_copy_rtvec (vec);
478 		    if (x == newx)
479 		      newx = shallow_copy_rtx (x);
480 		    XVEC (newx, i) = newvec;
481 		  }
482 		RTVEC_ELT (newvec, j) = op;
483 	      }
484 	  }
485 	break;
486 
487       case 'e':
488 	if (XEXP (x, i))
489 	  {
490 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 	    if (op != XEXP (x, i))
492 	      {
493 		if (x == newx)
494 		  newx = shallow_copy_rtx (x);
495 		XEXP (newx, i) = op;
496 	      }
497 	  }
498 	break;
499       }
500   return newx;
501 }
502 
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504    resulting RTX.  Return a new RTX which is as simplified as possible.  */
505 
506 rtx
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 {
509   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
510 }
511 
512 /* Try to simplify a unary operation CODE whose output mode is to be
513    MODE with input operand OP whose mode was originally OP_MODE.
514    Return zero if no simplification can be made.  */
515 rtx
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 			  rtx op, enum machine_mode op_mode)
518 {
519   rtx trueop, tem;
520 
521   trueop = avoid_constant_pool_reference (op);
522 
523   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
524   if (tem)
525     return tem;
526 
527   return simplify_unary_operation_1 (code, mode, op);
528 }
529 
530 /* Perform some simplifications we can do even if the operands
531    aren't constant.  */
532 static rtx
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 {
535   enum rtx_code reversed;
536   rtx temp;
537 
538   switch (code)
539     {
540     case NOT:
541       /* (not (not X)) == X.  */
542       if (GET_CODE (op) == NOT)
543 	return XEXP (op, 0);
544 
545       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 	 comparison is all ones.   */
547       if (COMPARISON_P (op)
548 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
549 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 	return simplify_gen_relational (reversed, mode, VOIDmode,
551 					XEXP (op, 0), XEXP (op, 1));
552 
553       /* (not (plus X -1)) can become (neg X).  */
554       if (GET_CODE (op) == PLUS
555 	  && XEXP (op, 1) == constm1_rtx)
556 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 
558       /* Similarly, (not (neg X)) is (plus X -1).  */
559       if (GET_CODE (op) == NEG)
560 	return plus_constant (XEXP (op, 0), -1);
561 
562       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
563       if (GET_CODE (op) == XOR
564 	  && CONST_INT_P (XEXP (op, 1))
565 	  && (temp = simplify_unary_operation (NOT, mode,
566 					       XEXP (op, 1), mode)) != 0)
567 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 
569       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
570       if (GET_CODE (op) == PLUS
571 	  && CONST_INT_P (XEXP (op, 1))
572 	  && mode_signbit_p (mode, XEXP (op, 1))
573 	  && (temp = simplify_unary_operation (NOT, mode,
574 					       XEXP (op, 1), mode)) != 0)
575 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
576 
577 
578       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
579 	 operands other than 1, but that is not valid.  We could do a
580 	 similar simplification for (not (lshiftrt C X)) where C is
581 	 just the sign bit, but this doesn't seem common enough to
582 	 bother with.  */
583       if (GET_CODE (op) == ASHIFT
584 	  && XEXP (op, 0) == const1_rtx)
585 	{
586 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
588 	}
589 
590       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 	 so we can perform the above simplification.  */
593 
594       if (STORE_FLAG_VALUE == -1
595 	  && GET_CODE (op) == ASHIFTRT
596 	  && GET_CODE (XEXP (op, 1))
597 	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 	return simplify_gen_relational (GE, mode, VOIDmode,
599 					XEXP (op, 0), const0_rtx);
600 
601 
602       if (GET_CODE (op) == SUBREG
603 	  && subreg_lowpart_p (op)
604 	  && (GET_MODE_SIZE (GET_MODE (op))
605 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 	{
609 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
610 	  rtx x;
611 
612 	  x = gen_rtx_ROTATE (inner_mode,
613 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
614 						  inner_mode),
615 			      XEXP (SUBREG_REG (op), 1));
616 	  return rtl_hooks.gen_lowpart_no_emit (mode, x);
617 	}
618 
619       /* Apply De Morgan's laws to reduce number of patterns for machines
620 	 with negating logical insns (and-not, nand, etc.).  If result has
621 	 only one NOT, put it first, since that is how the patterns are
622 	 coded.  */
623 
624       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 	{
626 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 	  enum machine_mode op_mode;
628 
629 	  op_mode = GET_MODE (in1);
630 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 
632 	  op_mode = GET_MODE (in2);
633 	  if (op_mode == VOIDmode)
634 	    op_mode = mode;
635 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 
637 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 	    {
639 	      rtx tem = in2;
640 	      in2 = in1; in1 = tem;
641 	    }
642 
643 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
644 				 mode, in1, in2);
645 	}
646       break;
647 
648     case NEG:
649       /* (neg (neg X)) == X.  */
650       if (GET_CODE (op) == NEG)
651 	return XEXP (op, 0);
652 
653       /* (neg (plus X 1)) can become (not X).  */
654       if (GET_CODE (op) == PLUS
655 	  && XEXP (op, 1) == const1_rtx)
656 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 
658       /* Similarly, (neg (not X)) is (plus X 1).  */
659       if (GET_CODE (op) == NOT)
660 	return plus_constant (XEXP (op, 0), 1);
661 
662       /* (neg (minus X Y)) can become (minus Y X).  This transformation
663 	 isn't safe for modes with signed zeros, since if X and Y are
664 	 both +0, (minus Y X) is the same as (minus X Y).  If the
665 	 rounding mode is towards +infinity (or -infinity) then the two
666 	 expressions will be rounded differently.  */
667       if (GET_CODE (op) == MINUS
668 	  && !HONOR_SIGNED_ZEROS (mode)
669 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 
672       if (GET_CODE (op) == PLUS
673 	  && !HONOR_SIGNED_ZEROS (mode)
674 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 	{
676 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
677 	  if (CONST_INT_P (XEXP (op, 1))
678 	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 	    {
680 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
681 	      if (temp)
682 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
683 	    }
684 
685 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
686 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
688 	}
689 
690       /* (neg (mult A B)) becomes (mult (neg A) B).
691 	 This works even for floating-point values.  */
692       if (GET_CODE (op) == MULT
693 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 	{
695 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
696 	  return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
697 	}
698 
699       /* NEG commutes with ASHIFT since it is multiplication.  Only do
700 	 this if we can then eliminate the NEG (e.g., if the operand
701 	 is a constant).  */
702       if (GET_CODE (op) == ASHIFT)
703 	{
704 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
705 	  if (temp)
706 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
707 	}
708 
709       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 	 C is equal to the width of MODE minus 1.  */
711       if (GET_CODE (op) == ASHIFTRT
712 	  && CONST_INT_P (XEXP (op, 1))
713 	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 	return simplify_gen_binary (LSHIFTRT, mode,
715 				    XEXP (op, 0), XEXP (op, 1));
716 
717       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 	 C is equal to the width of MODE minus 1.  */
719       if (GET_CODE (op) == LSHIFTRT
720 	  && CONST_INT_P (XEXP (op, 1))
721 	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 	return simplify_gen_binary (ASHIFTRT, mode,
723 				    XEXP (op, 0), XEXP (op, 1));
724 
725       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
726       if (GET_CODE (op) == XOR
727 	  && XEXP (op, 1) == const1_rtx
728 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
729 	return plus_constant (XEXP (op, 0), -1);
730 
731       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
732       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
733       if (GET_CODE (op) == LT
734 	  && XEXP (op, 1) == const0_rtx
735 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 	{
737 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 	  int isize = GET_MODE_BITSIZE (inner);
739 	  if (STORE_FLAG_VALUE == 1)
740 	    {
741 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 					  GEN_INT (isize - 1));
743 	      if (mode == inner)
744 		return temp;
745 	      if (GET_MODE_BITSIZE (mode) > isize)
746 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 	    }
749 	  else if (STORE_FLAG_VALUE == -1)
750 	    {
751 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 					  GEN_INT (isize - 1));
753 	      if (mode == inner)
754 		return temp;
755 	      if (GET_MODE_BITSIZE (mode) > isize)
756 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
758 	    }
759 	}
760       break;
761 
762     case TRUNCATE:
763       /* We can't handle truncation to a partial integer mode here
764          because we don't know the real bitsize of the partial
765          integer mode.  */
766       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
767         break;
768 
769       /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
770       if ((GET_CODE (op) == SIGN_EXTEND
771 	   || GET_CODE (op) == ZERO_EXTEND)
772 	  && GET_MODE (XEXP (op, 0)) == mode)
773 	return XEXP (op, 0);
774 
775       /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 	 (OP:SI foo:SI) if OP is NEG or ABS.  */
777       if ((GET_CODE (op) == ABS
778 	   || GET_CODE (op) == NEG)
779 	  && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 	      || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 	return simplify_gen_unary (GET_CODE (op), mode,
783 				   XEXP (XEXP (op, 0), 0), mode);
784 
785       /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 	 (truncate:A X).  */
787       if (GET_CODE (op) == SUBREG
788 	  && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 	  && subreg_lowpart_p (op))
790 	return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 				   GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 
793       /* If we know that the value is already truncated, we can
794          replace the TRUNCATE with a SUBREG.  Note that this is also
795          valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796          modes we just have to apply a different definition for
797          truncation.  But don't do this for an (LSHIFTRT (MULT ...))
798          since this will cause problems with the umulXi3_highpart
799          patterns.  */
800       if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 				 GET_MODE_BITSIZE (GET_MODE (op)))
802 	   ? (num_sign_bit_copies (op, GET_MODE (op))
803 	      > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 				- GET_MODE_BITSIZE (mode)))
805 	   : truncated_to_mode (mode, op))
806 	  && ! (GET_CODE (op) == LSHIFTRT
807 		&& GET_CODE (XEXP (op, 0)) == MULT))
808 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 
810       /* A truncate of a comparison can be replaced with a subreg if
811          STORE_FLAG_VALUE permits.  This is like the previous test,
812          but it works even if the comparison is done in a mode larger
813          than HOST_BITS_PER_WIDE_INT.  */
814       if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
815 	  && COMPARISON_P (op)
816 	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
818       break;
819 
820     case FLOAT_TRUNCATE:
821       if (DECIMAL_FLOAT_MODE_P (mode))
822 	break;
823 
824       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
825       if (GET_CODE (op) == FLOAT_EXTEND
826 	  && GET_MODE (XEXP (op, 0)) == mode)
827 	return XEXP (op, 0);
828 
829       /* (float_truncate:SF (float_truncate:DF foo:XF))
830          = (float_truncate:SF foo:XF).
831 	 This may eliminate double rounding, so it is unsafe.
832 
833          (float_truncate:SF (float_extend:XF foo:DF))
834          = (float_truncate:SF foo:DF).
835 
836          (float_truncate:DF (float_extend:XF foo:SF))
837          = (float_extend:SF foo:DF).  */
838       if ((GET_CODE (op) == FLOAT_TRUNCATE
839 	   && flag_unsafe_math_optimizations)
840 	  || GET_CODE (op) == FLOAT_EXTEND)
841 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
842 							    0)))
843 				   > GET_MODE_SIZE (mode)
844 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
845 				   mode,
846 				   XEXP (op, 0), mode);
847 
848       /*  (float_truncate (float x)) is (float x)  */
849       if (GET_CODE (op) == FLOAT
850 	  && (flag_unsafe_math_optimizations
851 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 		  && ((unsigned)significand_size (GET_MODE (op))
853 		      >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 			  - num_sign_bit_copies (XEXP (op, 0),
855 						 GET_MODE (XEXP (op, 0))))))))
856 	return simplify_gen_unary (FLOAT, mode,
857 				   XEXP (op, 0),
858 				   GET_MODE (XEXP (op, 0)));
859 
860       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
862       if ((GET_CODE (op) == ABS
863 	   || GET_CODE (op) == NEG)
864 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 	return simplify_gen_unary (GET_CODE (op), mode,
867 				   XEXP (XEXP (op, 0), 0), mode);
868 
869       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 	 is (float_truncate:SF x).  */
871       if (GET_CODE (op) == SUBREG
872 	  && subreg_lowpart_p (op)
873 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 	return SUBREG_REG (op);
875       break;
876 
877     case FLOAT_EXTEND:
878       if (DECIMAL_FLOAT_MODE_P (mode))
879 	break;
880 
881       /*  (float_extend (float_extend x)) is (float_extend x)
882 
883 	  (float_extend (float x)) is (float x) assuming that double
884 	  rounding can't happen.
885           */
886       if (GET_CODE (op) == FLOAT_EXTEND
887 	  || (GET_CODE (op) == FLOAT
888 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 	      && ((unsigned)significand_size (GET_MODE (op))
890 		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 		      - num_sign_bit_copies (XEXP (op, 0),
892 					     GET_MODE (XEXP (op, 0)))))))
893 	return simplify_gen_unary (GET_CODE (op), mode,
894 				   XEXP (op, 0),
895 				   GET_MODE (XEXP (op, 0)));
896 
897       break;
898 
899     case ABS:
900       /* (abs (neg <foo>)) -> (abs <foo>) */
901       if (GET_CODE (op) == NEG)
902 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 				   GET_MODE (XEXP (op, 0)));
904 
905       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906          do nothing.  */
907       if (GET_MODE (op) == VOIDmode)
908 	break;
909 
910       /* If operand is something known to be positive, ignore the ABS.  */
911       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 	  || ((GET_MODE_BITSIZE (GET_MODE (op))
913 	       <= HOST_BITS_PER_WIDE_INT)
914 	      && ((nonzero_bits (op, GET_MODE (op))
915 		   & ((HOST_WIDE_INT) 1
916 		      << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
917 		  == 0)))
918 	return op;
919 
920       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
921       if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 	return gen_rtx_NEG (mode, op);
923 
924       break;
925 
926     case FFS:
927       /* (ffs (*_extend <X>)) = (ffs <X>) */
928       if (GET_CODE (op) == SIGN_EXTEND
929 	  || GET_CODE (op) == ZERO_EXTEND)
930 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 				   GET_MODE (XEXP (op, 0)));
932       break;
933 
934     case POPCOUNT:
935       switch (GET_CODE (op))
936 	{
937 	case BSWAP:
938 	case ZERO_EXTEND:
939 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 				     GET_MODE (XEXP (op, 0)));
942 
943 	case ROTATE:
944 	case ROTATERT:
945 	  /* Rotations don't affect popcount.  */
946 	  if (!side_effects_p (XEXP (op, 1)))
947 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 				       GET_MODE (XEXP (op, 0)));
949 	  break;
950 
951 	default:
952 	  break;
953 	}
954       break;
955 
956     case PARITY:
957       switch (GET_CODE (op))
958 	{
959 	case NOT:
960 	case BSWAP:
961 	case ZERO_EXTEND:
962 	case SIGN_EXTEND:
963 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 				     GET_MODE (XEXP (op, 0)));
965 
966 	case ROTATE:
967 	case ROTATERT:
968 	  /* Rotations don't affect parity.  */
969 	  if (!side_effects_p (XEXP (op, 1)))
970 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 				       GET_MODE (XEXP (op, 0)));
972 	  break;
973 
974 	default:
975 	  break;
976 	}
977       break;
978 
979     case BSWAP:
980       /* (bswap (bswap x)) -> x.  */
981       if (GET_CODE (op) == BSWAP)
982 	return XEXP (op, 0);
983       break;
984 
985     case FLOAT:
986       /* (float (sign_extend <X>)) = (float <X>).  */
987       if (GET_CODE (op) == SIGN_EXTEND)
988 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 				   GET_MODE (XEXP (op, 0)));
990       break;
991 
992     case SIGN_EXTEND:
993       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 	 becomes just the MINUS if its mode is MODE.  This allows
995 	 folding switch statements on machines using casesi (such as
996 	 the VAX).  */
997       if (GET_CODE (op) == TRUNCATE
998 	  && GET_MODE (XEXP (op, 0)) == mode
999 	  && GET_CODE (XEXP (op, 0)) == MINUS
1000 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 	return XEXP (op, 0);
1003 
1004       /* Check for a sign extension of a subreg of a promoted
1005 	 variable, where the promotion is sign-extended, and the
1006 	 target mode is the same as the variable's promotion.  */
1007       if (GET_CODE (op) == SUBREG
1008 	  && SUBREG_PROMOTED_VAR_P (op)
1009 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 
1013 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1014       /* As we do not know which address space the pointer is refering to,
1015 	 we can do this only if the target does not support different pointer
1016 	 or address modes depending on the address space.  */
1017       if (target_default_pointer_address_modes_p ()
1018 	  && ! POINTERS_EXTEND_UNSIGNED
1019 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1020 	  && (CONSTANT_P (op)
1021 	      || (GET_CODE (op) == SUBREG
1022 		  && REG_P (SUBREG_REG (op))
1023 		  && REG_POINTER (SUBREG_REG (op))
1024 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1025 	return convert_memory_address (Pmode, op);
1026 #endif
1027       break;
1028 
1029     case ZERO_EXTEND:
1030       /* Check for a zero extension of a subreg of a promoted
1031 	 variable, where the promotion is zero-extended, and the
1032 	 target mode is the same as the variable's promotion.  */
1033       if (GET_CODE (op) == SUBREG
1034 	  && SUBREG_PROMOTED_VAR_P (op)
1035 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1036 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1037 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1038 
1039 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1040       /* As we do not know which address space the pointer is refering to,
1041 	 we can do this only if the target does not support different pointer
1042 	 or address modes depending on the address space.  */
1043       if (target_default_pointer_address_modes_p ()
1044 	  && POINTERS_EXTEND_UNSIGNED > 0
1045 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1046 	  && (CONSTANT_P (op)
1047 	      || (GET_CODE (op) == SUBREG
1048 		  && REG_P (SUBREG_REG (op))
1049 		  && REG_POINTER (SUBREG_REG (op))
1050 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1051 	return convert_memory_address (Pmode, op);
1052 #endif
1053       break;
1054 
1055     default:
1056       break;
1057     }
1058 
1059   return 0;
1060 }
1061 
1062 /* Try to compute the value of a unary operation CODE whose output mode is to
1063    be MODE with input operand OP whose mode was originally OP_MODE.
1064    Return zero if the value cannot be computed.  */
1065 rtx
1066 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1067 				rtx op, enum machine_mode op_mode)
1068 {
1069   unsigned int width = GET_MODE_BITSIZE (mode);
1070 
1071   if (code == VEC_DUPLICATE)
1072     {
1073       gcc_assert (VECTOR_MODE_P (mode));
1074       if (GET_MODE (op) != VOIDmode)
1075       {
1076 	if (!VECTOR_MODE_P (GET_MODE (op)))
1077 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1078 	else
1079 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1080 						(GET_MODE (op)));
1081       }
1082       if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1083 	  || GET_CODE (op) == CONST_VECTOR)
1084 	{
1085           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1086           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1087 	  rtvec v = rtvec_alloc (n_elts);
1088 	  unsigned int i;
1089 
1090 	  if (GET_CODE (op) != CONST_VECTOR)
1091 	    for (i = 0; i < n_elts; i++)
1092 	      RTVEC_ELT (v, i) = op;
1093 	  else
1094 	    {
1095 	      enum machine_mode inmode = GET_MODE (op);
1096               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1097               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1098 
1099 	      gcc_assert (in_n_elts < n_elts);
1100 	      gcc_assert ((n_elts % in_n_elts) == 0);
1101 	      for (i = 0; i < n_elts; i++)
1102 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1103 	    }
1104 	  return gen_rtx_CONST_VECTOR (mode, v);
1105 	}
1106     }
1107 
1108   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1109     {
1110       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1111       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1112       enum machine_mode opmode = GET_MODE (op);
1113       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1114       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1115       rtvec v = rtvec_alloc (n_elts);
1116       unsigned int i;
1117 
1118       gcc_assert (op_n_elts == n_elts);
1119       for (i = 0; i < n_elts; i++)
1120 	{
1121 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1122 					    CONST_VECTOR_ELT (op, i),
1123 					    GET_MODE_INNER (opmode));
1124 	  if (!x)
1125 	    return 0;
1126 	  RTVEC_ELT (v, i) = x;
1127 	}
1128       return gen_rtx_CONST_VECTOR (mode, v);
1129     }
1130 
1131   /* The order of these tests is critical so that, for example, we don't
1132      check the wrong mode (input vs. output) for a conversion operation,
1133      such as FIX.  At some point, this should be simplified.  */
1134 
1135   if (code == FLOAT && GET_MODE (op) == VOIDmode
1136       && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1137     {
1138       HOST_WIDE_INT hv, lv;
1139       REAL_VALUE_TYPE d;
1140 
1141       if (CONST_INT_P (op))
1142 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1143       else
1144 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1145 
1146       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1147       d = real_value_truncate (mode, d);
1148       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1149     }
1150   else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1151 	   && (GET_CODE (op) == CONST_DOUBLE
1152 	       || CONST_INT_P (op)))
1153     {
1154       HOST_WIDE_INT hv, lv;
1155       REAL_VALUE_TYPE d;
1156 
1157       if (CONST_INT_P (op))
1158 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1159       else
1160 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1161 
1162       if (op_mode == VOIDmode)
1163 	{
1164 	  /* We don't know how to interpret negative-looking numbers in
1165 	     this case, so don't try to fold those.  */
1166 	  if (hv < 0)
1167 	    return 0;
1168 	}
1169       else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1170 	;
1171       else
1172 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1173 
1174       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1175       d = real_value_truncate (mode, d);
1176       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1177     }
1178 
1179   if (CONST_INT_P (op)
1180       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1181     {
1182       HOST_WIDE_INT arg0 = INTVAL (op);
1183       HOST_WIDE_INT val;
1184 
1185       switch (code)
1186 	{
1187 	case NOT:
1188 	  val = ~ arg0;
1189 	  break;
1190 
1191 	case NEG:
1192 	  val = - arg0;
1193 	  break;
1194 
1195 	case ABS:
1196 	  val = (arg0 >= 0 ? arg0 : - arg0);
1197 	  break;
1198 
1199 	case FFS:
1200 	  /* Don't use ffs here.  Instead, get low order bit and then its
1201 	     number.  If arg0 is zero, this will return 0, as desired.  */
1202 	  arg0 &= GET_MODE_MASK (mode);
1203 	  val = exact_log2 (arg0 & (- arg0)) + 1;
1204 	  break;
1205 
1206 	case CLZ:
1207 	  arg0 &= GET_MODE_MASK (mode);
1208 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1209 	    ;
1210 	  else
1211 	    val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1212 	  break;
1213 
1214 	case CTZ:
1215 	  arg0 &= GET_MODE_MASK (mode);
1216 	  if (arg0 == 0)
1217 	    {
1218 	      /* Even if the value at zero is undefined, we have to come
1219 		 up with some replacement.  Seems good enough.  */
1220 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1221 		val = GET_MODE_BITSIZE (mode);
1222 	    }
1223 	  else
1224 	    val = exact_log2 (arg0 & -arg0);
1225 	  break;
1226 
1227 	case POPCOUNT:
1228 	  arg0 &= GET_MODE_MASK (mode);
1229 	  val = 0;
1230 	  while (arg0)
1231 	    val++, arg0 &= arg0 - 1;
1232 	  break;
1233 
1234 	case PARITY:
1235 	  arg0 &= GET_MODE_MASK (mode);
1236 	  val = 0;
1237 	  while (arg0)
1238 	    val++, arg0 &= arg0 - 1;
1239 	  val &= 1;
1240 	  break;
1241 
1242 	case BSWAP:
1243 	  {
1244 	    unsigned int s;
1245 
1246 	    val = 0;
1247 	    for (s = 0; s < width; s += 8)
1248 	      {
1249 		unsigned int d = width - s - 8;
1250 		unsigned HOST_WIDE_INT byte;
1251 		byte = (arg0 >> s) & 0xff;
1252 		val |= byte << d;
1253 	      }
1254 	  }
1255 	  break;
1256 
1257 	case TRUNCATE:
1258 	  val = arg0;
1259 	  break;
1260 
1261 	case ZERO_EXTEND:
1262 	  /* When zero-extending a CONST_INT, we need to know its
1263              original mode.  */
1264 	  gcc_assert (op_mode != VOIDmode);
1265 	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1266 	    {
1267 	      /* If we were really extending the mode,
1268 		 we would have to distinguish between zero-extension
1269 		 and sign-extension.  */
1270 	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1271 	      val = arg0;
1272 	    }
1273 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1274 	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1275 	  else
1276 	    return 0;
1277 	  break;
1278 
1279 	case SIGN_EXTEND:
1280 	  if (op_mode == VOIDmode)
1281 	    op_mode = mode;
1282 	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1283 	    {
1284 	      /* If we were really extending the mode,
1285 		 we would have to distinguish between zero-extension
1286 		 and sign-extension.  */
1287 	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1288 	      val = arg0;
1289 	    }
1290 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1291 	    {
1292 	      val
1293 		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1294 	      if (val
1295 		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1296 		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 	    }
1298 	  else
1299 	    return 0;
1300 	  break;
1301 
1302 	case SQRT:
1303 	case FLOAT_EXTEND:
1304 	case FLOAT_TRUNCATE:
1305 	case SS_TRUNCATE:
1306 	case US_TRUNCATE:
1307 	case SS_NEG:
1308 	case US_NEG:
1309 	case SS_ABS:
1310 	  return 0;
1311 
1312 	default:
1313 	  gcc_unreachable ();
1314 	}
1315 
1316       return gen_int_mode (val, mode);
1317     }
1318 
1319   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1320      for a DImode operation on a CONST_INT.  */
1321   else if (GET_MODE (op) == VOIDmode
1322 	   && width <= HOST_BITS_PER_WIDE_INT * 2
1323 	   && (GET_CODE (op) == CONST_DOUBLE
1324 	       || CONST_INT_P (op)))
1325     {
1326       unsigned HOST_WIDE_INT l1, lv;
1327       HOST_WIDE_INT h1, hv;
1328 
1329       if (GET_CODE (op) == CONST_DOUBLE)
1330 	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1331       else
1332 	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1333 
1334       switch (code)
1335 	{
1336 	case NOT:
1337 	  lv = ~ l1;
1338 	  hv = ~ h1;
1339 	  break;
1340 
1341 	case NEG:
1342 	  neg_double (l1, h1, &lv, &hv);
1343 	  break;
1344 
1345 	case ABS:
1346 	  if (h1 < 0)
1347 	    neg_double (l1, h1, &lv, &hv);
1348 	  else
1349 	    lv = l1, hv = h1;
1350 	  break;
1351 
1352 	case FFS:
1353 	  hv = 0;
1354 	  if (l1 == 0)
1355 	    {
1356 	      if (h1 == 0)
1357 		lv = 0;
1358 	      else
1359 		lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1360 	    }
1361 	  else
1362 	    lv = exact_log2 (l1 & -l1) + 1;
1363 	  break;
1364 
1365 	case CLZ:
1366 	  hv = 0;
1367 	  if (h1 != 0)
1368 	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1369 	      - HOST_BITS_PER_WIDE_INT;
1370 	  else if (l1 != 0)
1371 	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1372 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1373 	    lv = GET_MODE_BITSIZE (mode);
1374 	  break;
1375 
1376 	case CTZ:
1377 	  hv = 0;
1378 	  if (l1 != 0)
1379 	    lv = exact_log2 (l1 & -l1);
1380 	  else if (h1 != 0)
1381 	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1382 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1383 	    lv = GET_MODE_BITSIZE (mode);
1384 	  break;
1385 
1386 	case POPCOUNT:
1387 	  hv = 0;
1388 	  lv = 0;
1389 	  while (l1)
1390 	    lv++, l1 &= l1 - 1;
1391 	  while (h1)
1392 	    lv++, h1 &= h1 - 1;
1393 	  break;
1394 
1395 	case PARITY:
1396 	  hv = 0;
1397 	  lv = 0;
1398 	  while (l1)
1399 	    lv++, l1 &= l1 - 1;
1400 	  while (h1)
1401 	    lv++, h1 &= h1 - 1;
1402 	  lv &= 1;
1403 	  break;
1404 
1405 	case BSWAP:
1406 	  {
1407 	    unsigned int s;
1408 
1409 	    hv = 0;
1410 	    lv = 0;
1411 	    for (s = 0; s < width; s += 8)
1412 	      {
1413 		unsigned int d = width - s - 8;
1414 		unsigned HOST_WIDE_INT byte;
1415 
1416 		if (s < HOST_BITS_PER_WIDE_INT)
1417 		  byte = (l1 >> s) & 0xff;
1418 		else
1419 		  byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1420 
1421 		if (d < HOST_BITS_PER_WIDE_INT)
1422 		  lv |= byte << d;
1423 		else
1424 		  hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1425 	      }
1426 	  }
1427 	  break;
1428 
1429 	case TRUNCATE:
1430 	  /* This is just a change-of-mode, so do nothing.  */
1431 	  lv = l1, hv = h1;
1432 	  break;
1433 
1434 	case ZERO_EXTEND:
1435 	  gcc_assert (op_mode != VOIDmode);
1436 
1437 	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1438 	    return 0;
1439 
1440 	  hv = 0;
1441 	  lv = l1 & GET_MODE_MASK (op_mode);
1442 	  break;
1443 
1444 	case SIGN_EXTEND:
1445 	  if (op_mode == VOIDmode
1446 	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1447 	    return 0;
1448 	  else
1449 	    {
1450 	      lv = l1 & GET_MODE_MASK (op_mode);
1451 	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1452 		  && (lv & ((HOST_WIDE_INT) 1
1453 			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1454 		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1455 
1456 	      hv = HWI_SIGN_EXTEND (lv);
1457 	    }
1458 	  break;
1459 
1460 	case SQRT:
1461 	  return 0;
1462 
1463 	default:
1464 	  return 0;
1465 	}
1466 
1467       return immed_double_const (lv, hv, mode);
1468     }
1469 
1470   else if (GET_CODE (op) == CONST_DOUBLE
1471 	   && SCALAR_FLOAT_MODE_P (mode)
1472 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1473     {
1474       REAL_VALUE_TYPE d, t;
1475       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1476 
1477       switch (code)
1478 	{
1479 	case SQRT:
1480 	  if (HONOR_SNANS (mode) && real_isnan (&d))
1481 	    return 0;
1482 	  real_sqrt (&t, mode, &d);
1483 	  d = t;
1484 	  break;
1485 	case ABS:
1486 	  d = REAL_VALUE_ABS (d);
1487 	  break;
1488 	case NEG:
1489 	  d = REAL_VALUE_NEGATE (d);
1490 	  break;
1491 	case FLOAT_TRUNCATE:
1492 	  d = real_value_truncate (mode, d);
1493 	  break;
1494 	case FLOAT_EXTEND:
1495 	  /* All this does is change the mode, unless changing
1496 	     mode class.  */
1497 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1498 	    real_convert (&d, mode, &d);
1499 	  break;
1500 	case FIX:
1501 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1502 	  break;
1503 	case NOT:
1504 	  {
1505 	    long tmp[4];
1506 	    int i;
1507 
1508 	    real_to_target (tmp, &d, GET_MODE (op));
1509 	    for (i = 0; i < 4; i++)
1510 	      tmp[i] = ~tmp[i];
1511 	    real_from_target (&d, tmp, mode);
1512 	    break;
1513 	  }
1514 	default:
1515 	  gcc_unreachable ();
1516 	}
1517       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1518     }
1519 
1520   else if (GET_CODE (op) == CONST_DOUBLE
1521 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1522 	   && GET_MODE_CLASS (mode) == MODE_INT
1523 	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1524     {
1525       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1526 	 operators are intentionally left unspecified (to ease implementation
1527 	 by target backends), for consistency, this routine implements the
1528 	 same semantics for constant folding as used by the middle-end.  */
1529 
1530       /* This was formerly used only for non-IEEE float.
1531 	 eggert@twinsun.com says it is safe for IEEE also.  */
1532       HOST_WIDE_INT xh, xl, th, tl;
1533       REAL_VALUE_TYPE x, t;
1534       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1535       switch (code)
1536 	{
1537 	case FIX:
1538 	  if (REAL_VALUE_ISNAN (x))
1539 	    return const0_rtx;
1540 
1541 	  /* Test against the signed upper bound.  */
1542 	  if (width > HOST_BITS_PER_WIDE_INT)
1543 	    {
1544 	      th = ((unsigned HOST_WIDE_INT) 1
1545 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1546 	      tl = -1;
1547 	    }
1548 	  else
1549 	    {
1550 	      th = 0;
1551 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1552 	    }
1553 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1554 	  if (REAL_VALUES_LESS (t, x))
1555 	    {
1556 	      xh = th;
1557 	      xl = tl;
1558 	      break;
1559 	    }
1560 
1561 	  /* Test against the signed lower bound.  */
1562 	  if (width > HOST_BITS_PER_WIDE_INT)
1563 	    {
1564 	      th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1565 	      tl = 0;
1566 	    }
1567 	  else
1568 	    {
1569 	      th = -1;
1570 	      tl = (HOST_WIDE_INT) -1 << (width - 1);
1571 	    }
1572 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1573 	  if (REAL_VALUES_LESS (x, t))
1574 	    {
1575 	      xh = th;
1576 	      xl = tl;
1577 	      break;
1578 	    }
1579 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1580 	  break;
1581 
1582 	case UNSIGNED_FIX:
1583 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1584 	    return const0_rtx;
1585 
1586 	  /* Test against the unsigned upper bound.  */
1587 	  if (width == 2*HOST_BITS_PER_WIDE_INT)
1588 	    {
1589 	      th = -1;
1590 	      tl = -1;
1591 	    }
1592 	  else if (width >= HOST_BITS_PER_WIDE_INT)
1593 	    {
1594 	      th = ((unsigned HOST_WIDE_INT) 1
1595 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1596 	      tl = -1;
1597 	    }
1598 	  else
1599 	    {
1600 	      th = 0;
1601 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1602 	    }
1603 	  real_from_integer (&t, VOIDmode, tl, th, 1);
1604 	  if (REAL_VALUES_LESS (t, x))
1605 	    {
1606 	      xh = th;
1607 	      xl = tl;
1608 	      break;
1609 	    }
1610 
1611 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1612 	  break;
1613 
1614 	default:
1615 	  gcc_unreachable ();
1616 	}
1617       return immed_double_const (xl, xh, mode);
1618     }
1619 
1620   return NULL_RTX;
1621 }
1622 
1623 /* Subroutine of simplify_binary_operation to simplify a commutative,
1624    associative binary operation CODE with result mode MODE, operating
1625    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1626    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1627    canonicalization is possible.  */
1628 
1629 static rtx
1630 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1631 				rtx op0, rtx op1)
1632 {
1633   rtx tem;
1634 
1635   /* Linearize the operator to the left.  */
1636   if (GET_CODE (op1) == code)
1637     {
1638       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1639       if (GET_CODE (op0) == code)
1640 	{
1641 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1642 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1643 	}
1644 
1645       /* "a op (b op c)" becomes "(b op c) op a".  */
1646       if (! swap_commutative_operands_p (op1, op0))
1647 	return simplify_gen_binary (code, mode, op1, op0);
1648 
1649       tem = op0;
1650       op0 = op1;
1651       op1 = tem;
1652     }
1653 
1654   if (GET_CODE (op0) == code)
1655     {
1656       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1657       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1658 	{
1659 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1660 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1661 	}
1662 
1663       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1664       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1665       if (tem != 0)
1666         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1667 
1668       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1669       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1670       if (tem != 0)
1671         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1672     }
1673 
1674   return 0;
1675 }
1676 
1677 
1678 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1679    and OP1.  Return 0 if no simplification is possible.
1680 
1681    Don't use this for relational operations such as EQ or LT.
1682    Use simplify_relational_operation instead.  */
1683 rtx
1684 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1685 			   rtx op0, rtx op1)
1686 {
1687   rtx trueop0, trueop1;
1688   rtx tem;
1689 
1690   /* Relational operations don't work here.  We must know the mode
1691      of the operands in order to do the comparison correctly.
1692      Assuming a full word can give incorrect results.
1693      Consider comparing 128 with -128 in QImode.  */
1694   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1695   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1696 
1697   /* Make sure the constant is second.  */
1698   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1699       && swap_commutative_operands_p (op0, op1))
1700     {
1701       tem = op0, op0 = op1, op1 = tem;
1702     }
1703 
1704   trueop0 = avoid_constant_pool_reference (op0);
1705   trueop1 = avoid_constant_pool_reference (op1);
1706 
1707   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1708   if (tem)
1709     return tem;
1710   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1711 }
1712 
1713 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
1714    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1715    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1716    actual constants.  */
1717 
1718 static rtx
1719 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1720 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1721 {
1722   rtx tem, reversed, opleft, opright;
1723   HOST_WIDE_INT val;
1724   unsigned int width = GET_MODE_BITSIZE (mode);
1725 
1726   /* Even if we can't compute a constant result,
1727      there are some cases worth simplifying.  */
1728 
1729   switch (code)
1730     {
1731     case PLUS:
1732       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1733 	 when x is NaN, infinite, or finite and nonzero.  They aren't
1734 	 when x is -0 and the rounding mode is not towards -infinity,
1735 	 since (-0) + 0 is then 0.  */
1736       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1737 	return op0;
1738 
1739       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1740 	 transformations are safe even for IEEE.  */
1741       if (GET_CODE (op0) == NEG)
1742 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1743       else if (GET_CODE (op1) == NEG)
1744 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1745 
1746       /* (~a) + 1 -> -a */
1747       if (INTEGRAL_MODE_P (mode)
1748 	  && GET_CODE (op0) == NOT
1749 	  && trueop1 == const1_rtx)
1750 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1751 
1752       /* Handle both-operands-constant cases.  We can only add
1753 	 CONST_INTs to constants since the sum of relocatable symbols
1754 	 can't be handled by most assemblers.  Don't add CONST_INT
1755 	 to CONST_INT since overflow won't be computed properly if wider
1756 	 than HOST_BITS_PER_WIDE_INT.  */
1757 
1758       if ((GET_CODE (op0) == CONST
1759 	   || GET_CODE (op0) == SYMBOL_REF
1760 	   || GET_CODE (op0) == LABEL_REF)
1761 	  && CONST_INT_P (op1))
1762 	return plus_constant (op0, INTVAL (op1));
1763       else if ((GET_CODE (op1) == CONST
1764 		|| GET_CODE (op1) == SYMBOL_REF
1765 		|| GET_CODE (op1) == LABEL_REF)
1766 	       && CONST_INT_P (op0))
1767 	return plus_constant (op1, INTVAL (op0));
1768 
1769       /* See if this is something like X * C - X or vice versa or
1770 	 if the multiplication is written as a shift.  If so, we can
1771 	 distribute and make a new multiply, shift, or maybe just
1772 	 have X (if C is 2 in the example above).  But don't make
1773 	 something more expensive than we had before.  */
1774 
1775       if (SCALAR_INT_MODE_P (mode))
1776 	{
1777 	  HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1778 	  unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1779 	  rtx lhs = op0, rhs = op1;
1780 
1781 	  if (GET_CODE (lhs) == NEG)
1782 	    {
1783 	      coeff0l = -1;
1784 	      coeff0h = -1;
1785 	      lhs = XEXP (lhs, 0);
1786 	    }
1787 	  else if (GET_CODE (lhs) == MULT
1788 		   && CONST_INT_P (XEXP (lhs, 1)))
1789 	    {
1790 	      coeff0l = INTVAL (XEXP (lhs, 1));
1791 	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1792 	      lhs = XEXP (lhs, 0);
1793 	    }
1794 	  else if (GET_CODE (lhs) == ASHIFT
1795 		   && CONST_INT_P (XEXP (lhs, 1))
1796 		   && INTVAL (XEXP (lhs, 1)) >= 0
1797 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1798 	    {
1799 	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1800 	      coeff0h = 0;
1801 	      lhs = XEXP (lhs, 0);
1802 	    }
1803 
1804 	  if (GET_CODE (rhs) == NEG)
1805 	    {
1806 	      coeff1l = -1;
1807 	      coeff1h = -1;
1808 	      rhs = XEXP (rhs, 0);
1809 	    }
1810 	  else if (GET_CODE (rhs) == MULT
1811 		   && CONST_INT_P (XEXP (rhs, 1)))
1812 	    {
1813 	      coeff1l = INTVAL (XEXP (rhs, 1));
1814 	      coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1815 	      rhs = XEXP (rhs, 0);
1816 	    }
1817 	  else if (GET_CODE (rhs) == ASHIFT
1818 		   && CONST_INT_P (XEXP (rhs, 1))
1819 		   && INTVAL (XEXP (rhs, 1)) >= 0
1820 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1821 	    {
1822 	      coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1823 	      coeff1h = 0;
1824 	      rhs = XEXP (rhs, 0);
1825 	    }
1826 
1827 	  if (rtx_equal_p (lhs, rhs))
1828 	    {
1829 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
1830 	      rtx coeff;
1831 	      unsigned HOST_WIDE_INT l;
1832 	      HOST_WIDE_INT h;
1833 	      bool speed = optimize_function_for_speed_p (cfun);
1834 
1835 	      add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1836 	      coeff = immed_double_const (l, h, mode);
1837 
1838 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1839 	      return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1840 		? tem : 0;
1841 	    }
1842 	}
1843 
1844       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
1845       if ((CONST_INT_P (op1)
1846 	   || GET_CODE (op1) == CONST_DOUBLE)
1847 	  && GET_CODE (op0) == XOR
1848 	  && (CONST_INT_P (XEXP (op0, 1))
1849 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1850 	  && mode_signbit_p (mode, op1))
1851 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1852 				    simplify_gen_binary (XOR, mode, op1,
1853 							 XEXP (op0, 1)));
1854 
1855       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
1856       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1857 	  && GET_CODE (op0) == MULT
1858 	  && GET_CODE (XEXP (op0, 0)) == NEG)
1859 	{
1860 	  rtx in1, in2;
1861 
1862 	  in1 = XEXP (XEXP (op0, 0), 0);
1863 	  in2 = XEXP (op0, 1);
1864 	  return simplify_gen_binary (MINUS, mode, op1,
1865 				      simplify_gen_binary (MULT, mode,
1866 							   in1, in2));
1867 	}
1868 
1869       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1870 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1871 	 is 1.  */
1872       if (COMPARISON_P (op0)
1873 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1874 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1875 	  && (reversed = reversed_comparison (op0, mode)))
1876 	return
1877 	  simplify_gen_unary (NEG, mode, reversed, mode);
1878 
1879       /* If one of the operands is a PLUS or a MINUS, see if we can
1880 	 simplify this by the associative law.
1881 	 Don't use the associative law for floating point.
1882 	 The inaccuracy makes it nonassociative,
1883 	 and subtle programs can break if operations are associated.  */
1884 
1885       if (INTEGRAL_MODE_P (mode)
1886 	  && (plus_minus_operand_p (op0)
1887 	      || plus_minus_operand_p (op1))
1888 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1889 	return tem;
1890 
1891       /* Reassociate floating point addition only when the user
1892 	 specifies associative math operations.  */
1893       if (FLOAT_MODE_P (mode)
1894 	  && flag_associative_math)
1895 	{
1896 	  tem = simplify_associative_operation (code, mode, op0, op1);
1897 	  if (tem)
1898 	    return tem;
1899 	}
1900       break;
1901 
1902     case COMPARE:
1903       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1904       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1905 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1906 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1907 	{
1908 	  rtx xop00 = XEXP (op0, 0);
1909 	  rtx xop10 = XEXP (op1, 0);
1910 
1911 #ifdef HAVE_cc0
1912 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1913 #else
1914 	    if (REG_P (xop00) && REG_P (xop10)
1915 		&& GET_MODE (xop00) == GET_MODE (xop10)
1916 		&& REGNO (xop00) == REGNO (xop10)
1917 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1918 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1919 #endif
1920 	      return xop00;
1921 	}
1922       break;
1923 
1924     case MINUS:
1925       /* We can't assume x-x is 0 even with non-IEEE floating point,
1926 	 but since it is zero except in very strange circumstances, we
1927 	 will treat it as zero with -ffinite-math-only.  */
1928       if (rtx_equal_p (trueop0, trueop1)
1929 	  && ! side_effects_p (op0)
1930 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1931 	return CONST0_RTX (mode);
1932 
1933       /* Change subtraction from zero into negation.  (0 - x) is the
1934 	 same as -x when x is NaN, infinite, or finite and nonzero.
1935 	 But if the mode has signed zeros, and does not round towards
1936 	 -infinity, then 0 - 0 is 0, not -0.  */
1937       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1938 	return simplify_gen_unary (NEG, mode, op1, mode);
1939 
1940       /* (-1 - a) is ~a.  */
1941       if (trueop0 == constm1_rtx)
1942 	return simplify_gen_unary (NOT, mode, op1, mode);
1943 
1944       /* Subtracting 0 has no effect unless the mode has signed zeros
1945 	 and supports rounding towards -infinity.  In such a case,
1946 	 0 - 0 is -0.  */
1947       if (!(HONOR_SIGNED_ZEROS (mode)
1948 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1949 	  && trueop1 == CONST0_RTX (mode))
1950 	return op0;
1951 
1952       /* See if this is something like X * C - X or vice versa or
1953 	 if the multiplication is written as a shift.  If so, we can
1954 	 distribute and make a new multiply, shift, or maybe just
1955 	 have X (if C is 2 in the example above).  But don't make
1956 	 something more expensive than we had before.  */
1957 
1958       if (SCALAR_INT_MODE_P (mode))
1959 	{
1960 	  HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1961 	  unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1962 	  rtx lhs = op0, rhs = op1;
1963 
1964 	  if (GET_CODE (lhs) == NEG)
1965 	    {
1966 	      coeff0l = -1;
1967 	      coeff0h = -1;
1968 	      lhs = XEXP (lhs, 0);
1969 	    }
1970 	  else if (GET_CODE (lhs) == MULT
1971 		   && CONST_INT_P (XEXP (lhs, 1)))
1972 	    {
1973 	      coeff0l = INTVAL (XEXP (lhs, 1));
1974 	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1975 	      lhs = XEXP (lhs, 0);
1976 	    }
1977 	  else if (GET_CODE (lhs) == ASHIFT
1978 		   && CONST_INT_P (XEXP (lhs, 1))
1979 		   && INTVAL (XEXP (lhs, 1)) >= 0
1980 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1981 	    {
1982 	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1983 	      coeff0h = 0;
1984 	      lhs = XEXP (lhs, 0);
1985 	    }
1986 
1987 	  if (GET_CODE (rhs) == NEG)
1988 	    {
1989 	      negcoeff1l = 1;
1990 	      negcoeff1h = 0;
1991 	      rhs = XEXP (rhs, 0);
1992 	    }
1993 	  else if (GET_CODE (rhs) == MULT
1994 		   && CONST_INT_P (XEXP (rhs, 1)))
1995 	    {
1996 	      negcoeff1l = -INTVAL (XEXP (rhs, 1));
1997 	      negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1998 	      rhs = XEXP (rhs, 0);
1999 	    }
2000 	  else if (GET_CODE (rhs) == ASHIFT
2001 		   && CONST_INT_P (XEXP (rhs, 1))
2002 		   && INTVAL (XEXP (rhs, 1)) >= 0
2003 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2004 	    {
2005 	      negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
2006 	      negcoeff1h = -1;
2007 	      rhs = XEXP (rhs, 0);
2008 	    }
2009 
2010 	  if (rtx_equal_p (lhs, rhs))
2011 	    {
2012 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2013 	      rtx coeff;
2014 	      unsigned HOST_WIDE_INT l;
2015 	      HOST_WIDE_INT h;
2016 	      bool speed = optimize_function_for_speed_p (cfun);
2017 
2018 	      add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2019 	      coeff = immed_double_const (l, h, mode);
2020 
2021 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2022 	      return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2023 		? tem : 0;
2024 	    }
2025 	}
2026 
2027       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2028       if (GET_CODE (op1) == NEG)
2029 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2030 
2031       /* (-x - c) may be simplified as (-c - x).  */
2032       if (GET_CODE (op0) == NEG
2033 	  && (CONST_INT_P (op1)
2034 	      || GET_CODE (op1) == CONST_DOUBLE))
2035 	{
2036 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2037 	  if (tem)
2038 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2039 	}
2040 
2041       /* Don't let a relocatable value get a negative coeff.  */
2042       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2043 	return simplify_gen_binary (PLUS, mode,
2044 				    op0,
2045 				    neg_const_int (mode, op1));
2046 
2047       /* (x - (x & y)) -> (x & ~y) */
2048       if (GET_CODE (op1) == AND)
2049 	{
2050 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2051 	    {
2052 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2053 					GET_MODE (XEXP (op1, 1)));
2054 	      return simplify_gen_binary (AND, mode, op0, tem);
2055 	    }
2056 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2057 	    {
2058 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2059 					GET_MODE (XEXP (op1, 0)));
2060 	      return simplify_gen_binary (AND, mode, op0, tem);
2061 	    }
2062 	}
2063 
2064       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2065 	 by reversing the comparison code if valid.  */
2066       if (STORE_FLAG_VALUE == 1
2067 	  && trueop0 == const1_rtx
2068 	  && COMPARISON_P (op1)
2069 	  && (reversed = reversed_comparison (op1, mode)))
2070 	return reversed;
2071 
2072       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2073       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2074 	  && GET_CODE (op1) == MULT
2075 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2076 	{
2077 	  rtx in1, in2;
2078 
2079 	  in1 = XEXP (XEXP (op1, 0), 0);
2080 	  in2 = XEXP (op1, 1);
2081 	  return simplify_gen_binary (PLUS, mode,
2082 				      simplify_gen_binary (MULT, mode,
2083 							   in1, in2),
2084 				      op0);
2085 	}
2086 
2087       /* Canonicalize (minus (neg A) (mult B C)) to
2088 	 (minus (mult (neg B) C) A).  */
2089       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2090 	  && GET_CODE (op1) == MULT
2091 	  && GET_CODE (op0) == NEG)
2092 	{
2093 	  rtx in1, in2;
2094 
2095 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2096 	  in2 = XEXP (op1, 1);
2097 	  return simplify_gen_binary (MINUS, mode,
2098 				      simplify_gen_binary (MULT, mode,
2099 							   in1, in2),
2100 				      XEXP (op0, 0));
2101 	}
2102 
2103       /* If one of the operands is a PLUS or a MINUS, see if we can
2104 	 simplify this by the associative law.  This will, for example,
2105          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2106 	 Don't use the associative law for floating point.
2107 	 The inaccuracy makes it nonassociative,
2108 	 and subtle programs can break if operations are associated.  */
2109 
2110       if (INTEGRAL_MODE_P (mode)
2111 	  && (plus_minus_operand_p (op0)
2112 	      || plus_minus_operand_p (op1))
2113 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2114 	return tem;
2115       break;
2116 
2117     case MULT:
2118       if (trueop1 == constm1_rtx)
2119 	return simplify_gen_unary (NEG, mode, op0, mode);
2120 
2121       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2122 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2123 	 when the mode has signed zeros, since multiplying a negative
2124 	 number by 0 will give -0, not 0.  */
2125       if (!HONOR_NANS (mode)
2126 	  && !HONOR_SIGNED_ZEROS (mode)
2127 	  && trueop1 == CONST0_RTX (mode)
2128 	  && ! side_effects_p (op0))
2129 	return op1;
2130 
2131       /* In IEEE floating point, x*1 is not equivalent to x for
2132 	 signalling NaNs.  */
2133       if (!HONOR_SNANS (mode)
2134 	  && trueop1 == CONST1_RTX (mode))
2135 	return op0;
2136 
2137       /* Convert multiply by constant power of two into shift unless
2138 	 we are still generating RTL.  This test is a kludge.  */
2139       if (CONST_INT_P (trueop1)
2140 	  && (val = exact_log2 (INTVAL (trueop1))) >= 0
2141 	  /* If the mode is larger than the host word size, and the
2142 	     uppermost bit is set, then this isn't a power of two due
2143 	     to implicit sign extension.  */
2144 	  && (width <= HOST_BITS_PER_WIDE_INT
2145 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2146 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2147 
2148       /* Likewise for multipliers wider than a word.  */
2149       if (GET_CODE (trueop1) == CONST_DOUBLE
2150 	  && (GET_MODE (trueop1) == VOIDmode
2151 	      || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2152 	  && GET_MODE (op0) == mode
2153 	  && CONST_DOUBLE_LOW (trueop1) == 0
2154 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2155 	return simplify_gen_binary (ASHIFT, mode, op0,
2156 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2157 
2158       /* x*2 is x+x and x*(-1) is -x */
2159       if (GET_CODE (trueop1) == CONST_DOUBLE
2160 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2161 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2162 	  && GET_MODE (op0) == mode)
2163 	{
2164 	  REAL_VALUE_TYPE d;
2165 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2166 
2167 	  if (REAL_VALUES_EQUAL (d, dconst2))
2168 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2169 
2170 	  if (!HONOR_SNANS (mode)
2171 	      && REAL_VALUES_EQUAL (d, dconstm1))
2172 	    return simplify_gen_unary (NEG, mode, op0, mode);
2173 	}
2174 
2175       /* Optimize -x * -x as x * x.  */
2176       if (FLOAT_MODE_P (mode)
2177 	  && GET_CODE (op0) == NEG
2178 	  && GET_CODE (op1) == NEG
2179 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2180 	  && !side_effects_p (XEXP (op0, 0)))
2181 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2182 
2183       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2184       if (SCALAR_FLOAT_MODE_P (mode)
2185 	  && GET_CODE (op0) == ABS
2186 	  && GET_CODE (op1) == ABS
2187 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2188 	  && !side_effects_p (XEXP (op0, 0)))
2189 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2190 
2191       /* Reassociate multiplication, but for floating point MULTs
2192 	 only when the user specifies unsafe math optimizations.  */
2193       if (! FLOAT_MODE_P (mode)
2194 	  || flag_unsafe_math_optimizations)
2195 	{
2196 	  tem = simplify_associative_operation (code, mode, op0, op1);
2197 	  if (tem)
2198 	    return tem;
2199 	}
2200       break;
2201 
2202     case IOR:
2203       if (trueop1 == const0_rtx)
2204 	return op0;
2205       if (CONST_INT_P (trueop1)
2206 	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2207 	      == GET_MODE_MASK (mode)))
2208 	return op1;
2209       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2210 	return op0;
2211       /* A | (~A) -> -1 */
2212       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2213 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2214 	  && ! side_effects_p (op0)
2215 	  && SCALAR_INT_MODE_P (mode))
2216 	return constm1_rtx;
2217 
2218       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2219       if (CONST_INT_P (op1)
2220 	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 	  && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2222 	return op1;
2223 
2224       /* Canonicalize (X & C1) | C2.  */
2225       if (GET_CODE (op0) == AND
2226 	  && CONST_INT_P (trueop1)
2227 	  && CONST_INT_P (XEXP (op0, 1)))
2228 	{
2229 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2230 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2231 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2232 
2233 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2234 	  if ((c1 & c2) == c1
2235 	      && !side_effects_p (XEXP (op0, 0)))
2236 	    return trueop1;
2237 
2238 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2239 	  if (((c1|c2) & mask) == mask)
2240 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2241 
2242 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2243 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2244 	    {
2245 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2246 					 gen_int_mode (c1 & ~c2, mode));
2247 	      return simplify_gen_binary (IOR, mode, tem, op1);
2248 	    }
2249 	}
2250 
2251       /* Convert (A & B) | A to A.  */
2252       if (GET_CODE (op0) == AND
2253 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2254 	      || rtx_equal_p (XEXP (op0, 1), op1))
2255 	  && ! side_effects_p (XEXP (op0, 0))
2256 	  && ! side_effects_p (XEXP (op0, 1)))
2257 	return op1;
2258 
2259       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2260          mode size to (rotate A CX).  */
2261 
2262       if (GET_CODE (op1) == ASHIFT
2263           || GET_CODE (op1) == SUBREG)
2264         {
2265 	  opleft = op1;
2266 	  opright = op0;
2267 	}
2268       else
2269         {
2270 	  opright = op1;
2271 	  opleft = op0;
2272 	}
2273 
2274       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2275           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2276           && CONST_INT_P (XEXP (opleft, 1))
2277           && CONST_INT_P (XEXP (opright, 1))
2278           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2279               == GET_MODE_BITSIZE (mode)))
2280         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2281 
2282       /* Same, but for ashift that has been "simplified" to a wider mode
2283         by simplify_shift_const.  */
2284 
2285       if (GET_CODE (opleft) == SUBREG
2286           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2287           && GET_CODE (opright) == LSHIFTRT
2288           && GET_CODE (XEXP (opright, 0)) == SUBREG
2289           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2290           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2291           && (GET_MODE_SIZE (GET_MODE (opleft))
2292               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2293           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2294                           SUBREG_REG (XEXP (opright, 0)))
2295           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2296           && CONST_INT_P (XEXP (opright, 1))
2297           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2298               == GET_MODE_BITSIZE (mode)))
2299         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2300                                XEXP (SUBREG_REG (opleft), 1));
2301 
2302       /* If we have (ior (and (X C1) C2)), simplify this by making
2303 	 C1 as small as possible if C1 actually changes.  */
2304       if (CONST_INT_P (op1)
2305 	  && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 	      || INTVAL (op1) > 0)
2307 	  && GET_CODE (op0) == AND
2308 	  && CONST_INT_P (XEXP (op0, 1))
2309 	  && CONST_INT_P (op1)
2310 	  && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2311 	return simplify_gen_binary (IOR, mode,
2312 				    simplify_gen_binary
2313 					  (AND, mode, XEXP (op0, 0),
2314 					   GEN_INT (INTVAL (XEXP (op0, 1))
2315 						    & ~INTVAL (op1))),
2316 				    op1);
2317 
2318       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2319          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2320 	 the PLUS does not affect any of the bits in OP1: then we can do
2321 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2322          can be safely shifted left C bits.  */
2323       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2324           && GET_CODE (XEXP (op0, 0)) == PLUS
2325           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2326           && CONST_INT_P (XEXP (op0, 1))
2327           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2328         {
2329           int count = INTVAL (XEXP (op0, 1));
2330           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2331 
2332           if (mask >> count == INTVAL (trueop1)
2333               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2334 	    return simplify_gen_binary (ASHIFTRT, mode,
2335 					plus_constant (XEXP (op0, 0), mask),
2336 					XEXP (op0, 1));
2337         }
2338 
2339       tem = simplify_associative_operation (code, mode, op0, op1);
2340       if (tem)
2341 	return tem;
2342       break;
2343 
2344     case XOR:
2345       if (trueop1 == const0_rtx)
2346 	return op0;
2347       if (CONST_INT_P (trueop1)
2348 	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2349 	      == GET_MODE_MASK (mode)))
2350 	return simplify_gen_unary (NOT, mode, op0, mode);
2351       if (rtx_equal_p (trueop0, trueop1)
2352 	  && ! side_effects_p (op0)
2353 	  && GET_MODE_CLASS (mode) != MODE_CC)
2354 	 return CONST0_RTX (mode);
2355 
2356       /* Canonicalize XOR of the most significant bit to PLUS.  */
2357       if ((CONST_INT_P (op1)
2358 	   || GET_CODE (op1) == CONST_DOUBLE)
2359 	  && mode_signbit_p (mode, op1))
2360 	return simplify_gen_binary (PLUS, mode, op0, op1);
2361       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2362       if ((CONST_INT_P (op1)
2363 	   || GET_CODE (op1) == CONST_DOUBLE)
2364 	  && GET_CODE (op0) == PLUS
2365 	  && (CONST_INT_P (XEXP (op0, 1))
2366 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2367 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2368 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2369 				    simplify_gen_binary (XOR, mode, op1,
2370 							 XEXP (op0, 1)));
2371 
2372       /* If we are XORing two things that have no bits in common,
2373 	 convert them into an IOR.  This helps to detect rotation encoded
2374 	 using those methods and possibly other simplifications.  */
2375 
2376       if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2377 	  && (nonzero_bits (op0, mode)
2378 	      & nonzero_bits (op1, mode)) == 0)
2379 	return (simplify_gen_binary (IOR, mode, op0, op1));
2380 
2381       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2382 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2383 	 (NOT y).  */
2384       {
2385 	int num_negated = 0;
2386 
2387 	if (GET_CODE (op0) == NOT)
2388 	  num_negated++, op0 = XEXP (op0, 0);
2389 	if (GET_CODE (op1) == NOT)
2390 	  num_negated++, op1 = XEXP (op1, 0);
2391 
2392 	if (num_negated == 2)
2393 	  return simplify_gen_binary (XOR, mode, op0, op1);
2394 	else if (num_negated == 1)
2395 	  return simplify_gen_unary (NOT, mode,
2396 				     simplify_gen_binary (XOR, mode, op0, op1),
2397 				     mode);
2398       }
2399 
2400       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2401 	 correspond to a machine insn or result in further simplifications
2402 	 if B is a constant.  */
2403 
2404       if (GET_CODE (op0) == AND
2405 	  && rtx_equal_p (XEXP (op0, 1), op1)
2406 	  && ! side_effects_p (op1))
2407 	return simplify_gen_binary (AND, mode,
2408 				    simplify_gen_unary (NOT, mode,
2409 							XEXP (op0, 0), mode),
2410 				    op1);
2411 
2412       else if (GET_CODE (op0) == AND
2413 	       && rtx_equal_p (XEXP (op0, 0), op1)
2414 	       && ! side_effects_p (op1))
2415 	return simplify_gen_binary (AND, mode,
2416 				    simplify_gen_unary (NOT, mode,
2417 							XEXP (op0, 1), mode),
2418 				    op1);
2419 
2420       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2421 	 comparison if STORE_FLAG_VALUE is 1.  */
2422       if (STORE_FLAG_VALUE == 1
2423 	  && trueop1 == const1_rtx
2424 	  && COMPARISON_P (op0)
2425 	  && (reversed = reversed_comparison (op0, mode)))
2426 	return reversed;
2427 
2428       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2429 	 is (lt foo (const_int 0)), so we can perform the above
2430 	 simplification if STORE_FLAG_VALUE is 1.  */
2431 
2432       if (STORE_FLAG_VALUE == 1
2433 	  && trueop1 == const1_rtx
2434 	  && GET_CODE (op0) == LSHIFTRT
2435 	  && CONST_INT_P (XEXP (op0, 1))
2436 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2437 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2438 
2439       /* (xor (comparison foo bar) (const_int sign-bit))
2440 	 when STORE_FLAG_VALUE is the sign bit.  */
2441       if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2442 	  && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2443 	      == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2444 	  && trueop1 == const_true_rtx
2445 	  && COMPARISON_P (op0)
2446 	  && (reversed = reversed_comparison (op0, mode)))
2447 	return reversed;
2448 
2449       tem = simplify_associative_operation (code, mode, op0, op1);
2450       if (tem)
2451 	return tem;
2452       break;
2453 
2454     case AND:
2455       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2456 	return trueop1;
2457       if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2458 	{
2459 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2460 	  HOST_WIDE_INT nzop1;
2461 	  if (CONST_INT_P (trueop1))
2462 	    {
2463 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2464 	      /* If we are turning off bits already known off in OP0, we need
2465 		 not do an AND.  */
2466 	      if ((nzop0 & ~val1) == 0)
2467 		return op0;
2468 	    }
2469 	  nzop1 = nonzero_bits (trueop1, mode);
2470 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2471 	  if ((nzop1 & nzop0) == 0
2472 	      && !side_effects_p (op0) && !side_effects_p (op1))
2473 	    return CONST0_RTX (mode);
2474 	}
2475       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2476 	  && GET_MODE_CLASS (mode) != MODE_CC)
2477 	return op0;
2478       /* A & (~A) -> 0 */
2479       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 	  && ! side_effects_p (op0)
2482 	  && GET_MODE_CLASS (mode) != MODE_CC)
2483 	return CONST0_RTX (mode);
2484 
2485       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2486 	 there are no nonzero bits of C outside of X's mode.  */
2487       if ((GET_CODE (op0) == SIGN_EXTEND
2488 	   || GET_CODE (op0) == ZERO_EXTEND)
2489 	  && CONST_INT_P (trueop1)
2490 	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2491 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2492 	      & INTVAL (trueop1)) == 0)
2493 	{
2494 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2495 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2496 				     gen_int_mode (INTVAL (trueop1),
2497 						   imode));
2498 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2499 	}
2500 
2501       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2502 	 we might be able to further simplify the AND with X and potentially
2503 	 remove the truncation altogether.  */
2504       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2505 	{
2506 	  rtx x = XEXP (op0, 0);
2507 	  enum machine_mode xmode = GET_MODE (x);
2508 	  tem = simplify_gen_binary (AND, xmode, x,
2509 				     gen_int_mode (INTVAL (trueop1), xmode));
2510 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2511 	}
2512 
2513       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2514       if (GET_CODE (op0) == IOR
2515 	  && CONST_INT_P (trueop1)
2516 	  && CONST_INT_P (XEXP (op0, 1)))
2517 	{
2518 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2519 	  return simplify_gen_binary (IOR, mode,
2520 				      simplify_gen_binary (AND, mode,
2521 							   XEXP (op0, 0), op1),
2522 				      gen_int_mode (tmp, mode));
2523 	}
2524 
2525       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2526 	 insn (and may simplify more).  */
2527       if (GET_CODE (op0) == XOR
2528 	  && rtx_equal_p (XEXP (op0, 0), op1)
2529 	  && ! side_effects_p (op1))
2530 	return simplify_gen_binary (AND, mode,
2531 				    simplify_gen_unary (NOT, mode,
2532 							XEXP (op0, 1), mode),
2533 				    op1);
2534 
2535       if (GET_CODE (op0) == XOR
2536 	  && rtx_equal_p (XEXP (op0, 1), op1)
2537 	  && ! side_effects_p (op1))
2538 	return simplify_gen_binary (AND, mode,
2539 				    simplify_gen_unary (NOT, mode,
2540 							XEXP (op0, 0), mode),
2541 				    op1);
2542 
2543       /* Similarly for (~(A ^ B)) & A.  */
2544       if (GET_CODE (op0) == NOT
2545 	  && GET_CODE (XEXP (op0, 0)) == XOR
2546 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2547 	  && ! side_effects_p (op1))
2548 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2549 
2550       if (GET_CODE (op0) == NOT
2551 	  && GET_CODE (XEXP (op0, 0)) == XOR
2552 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2553 	  && ! side_effects_p (op1))
2554 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2555 
2556       /* Convert (A | B) & A to A.  */
2557       if (GET_CODE (op0) == IOR
2558 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2559 	      || rtx_equal_p (XEXP (op0, 1), op1))
2560 	  && ! side_effects_p (XEXP (op0, 0))
2561 	  && ! side_effects_p (XEXP (op0, 1)))
2562 	return op1;
2563 
2564       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2565 	 ((A & N) + B) & M -> (A + B) & M
2566 	 Similarly if (N & M) == 0,
2567 	 ((A | N) + B) & M -> (A + B) & M
2568 	 and for - instead of + and/or ^ instead of |.
2569          Also, if (N & M) == 0, then
2570 	 (A +- N) & M -> A & M.  */
2571       if (CONST_INT_P (trueop1)
2572 	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2573 	  && ~INTVAL (trueop1)
2574 	  && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2575 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2576 	{
2577 	  rtx pmop[2];
2578 	  int which;
2579 
2580 	  pmop[0] = XEXP (op0, 0);
2581 	  pmop[1] = XEXP (op0, 1);
2582 
2583 	  if (CONST_INT_P (pmop[1])
2584 	      && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2585 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
2586 
2587 	  for (which = 0; which < 2; which++)
2588 	    {
2589 	      tem = pmop[which];
2590 	      switch (GET_CODE (tem))
2591 		{
2592 		case AND:
2593 		  if (CONST_INT_P (XEXP (tem, 1))
2594 		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2595 		      == INTVAL (trueop1))
2596 		    pmop[which] = XEXP (tem, 0);
2597 		  break;
2598 		case IOR:
2599 		case XOR:
2600 		  if (CONST_INT_P (XEXP (tem, 1))
2601 		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2602 		    pmop[which] = XEXP (tem, 0);
2603 		  break;
2604 		default:
2605 		  break;
2606 		}
2607 	    }
2608 
2609 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2610 	    {
2611 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
2612 					 pmop[0], pmop[1]);
2613 	      return simplify_gen_binary (code, mode, tem, op1);
2614 	    }
2615 	}
2616 
2617       /* (and X (ior (not X) Y) -> (and X Y) */
2618       if (GET_CODE (op1) == IOR
2619 	  && GET_CODE (XEXP (op1, 0)) == NOT
2620 	  && op0 == XEXP (XEXP (op1, 0), 0))
2621        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2622 
2623       /* (and (ior (not X) Y) X) -> (and X Y) */
2624       if (GET_CODE (op0) == IOR
2625 	  && GET_CODE (XEXP (op0, 0)) == NOT
2626 	  && op1 == XEXP (XEXP (op0, 0), 0))
2627 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2628 
2629       tem = simplify_associative_operation (code, mode, op0, op1);
2630       if (tem)
2631 	return tem;
2632       break;
2633 
2634     case UDIV:
2635       /* 0/x is 0 (or x&0 if x has side-effects).  */
2636       if (trueop0 == CONST0_RTX (mode))
2637 	{
2638 	  if (side_effects_p (op1))
2639 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2640 	  return trueop0;
2641 	}
2642       /* x/1 is x.  */
2643       if (trueop1 == CONST1_RTX (mode))
2644 	return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2645       /* Convert divide by power of two into shift.  */
2646       if (CONST_INT_P (trueop1)
2647 	  && (val = exact_log2 (INTVAL (trueop1))) > 0)
2648 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2649       break;
2650 
2651     case DIV:
2652       /* Handle floating point and integers separately.  */
2653       if (SCALAR_FLOAT_MODE_P (mode))
2654 	{
2655 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2656 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
2657 	     NaN rather than 0.0.  Nor is it safe for modes with signed
2658 	     zeros, since dividing 0 by a negative number gives -0.0  */
2659 	  if (trueop0 == CONST0_RTX (mode)
2660 	      && !HONOR_NANS (mode)
2661 	      && !HONOR_SIGNED_ZEROS (mode)
2662 	      && ! side_effects_p (op1))
2663 	    return op0;
2664 	  /* x/1.0 is x.  */
2665 	  if (trueop1 == CONST1_RTX (mode)
2666 	      && !HONOR_SNANS (mode))
2667 	    return op0;
2668 
2669 	  if (GET_CODE (trueop1) == CONST_DOUBLE
2670 	      && trueop1 != CONST0_RTX (mode))
2671 	    {
2672 	      REAL_VALUE_TYPE d;
2673 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2674 
2675 	      /* x/-1.0 is -x.  */
2676 	      if (REAL_VALUES_EQUAL (d, dconstm1)
2677 		  && !HONOR_SNANS (mode))
2678 		return simplify_gen_unary (NEG, mode, op0, mode);
2679 
2680 	      /* Change FP division by a constant into multiplication.
2681 		 Only do this with -freciprocal-math.  */
2682 	      if (flag_reciprocal_math
2683 		  && !REAL_VALUES_EQUAL (d, dconst0))
2684 		{
2685 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2686 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2687 		  return simplify_gen_binary (MULT, mode, op0, tem);
2688 		}
2689 	    }
2690 	}
2691       else
2692 	{
2693 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
2694 	  if (trueop0 == CONST0_RTX (mode))
2695 	    {
2696 	      if (side_effects_p (op1))
2697 		return simplify_gen_binary (AND, mode, op1, trueop0);
2698 	      return trueop0;
2699 	    }
2700 	  /* x/1 is x.  */
2701 	  if (trueop1 == CONST1_RTX (mode))
2702 	    return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2703 	  /* x/-1 is -x.  */
2704 	  if (trueop1 == constm1_rtx)
2705 	    {
2706 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2707 	      return simplify_gen_unary (NEG, mode, x, mode);
2708 	    }
2709 	}
2710       break;
2711 
2712     case UMOD:
2713       /* 0%x is 0 (or x&0 if x has side-effects).  */
2714       if (trueop0 == CONST0_RTX (mode))
2715 	{
2716 	  if (side_effects_p (op1))
2717 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2718 	  return trueop0;
2719 	}
2720       /* x%1 is 0 (of x&0 if x has side-effects).  */
2721       if (trueop1 == CONST1_RTX (mode))
2722 	{
2723 	  if (side_effects_p (op0))
2724 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2725 	  return CONST0_RTX (mode);
2726 	}
2727       /* Implement modulus by power of two as AND.  */
2728       if (CONST_INT_P (trueop1)
2729 	  && exact_log2 (INTVAL (trueop1)) > 0)
2730 	return simplify_gen_binary (AND, mode, op0,
2731 				    GEN_INT (INTVAL (op1) - 1));
2732       break;
2733 
2734     case MOD:
2735       /* 0%x is 0 (or x&0 if x has side-effects).  */
2736       if (trueop0 == CONST0_RTX (mode))
2737 	{
2738 	  if (side_effects_p (op1))
2739 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2740 	  return trueop0;
2741 	}
2742       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
2743       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2744 	{
2745 	  if (side_effects_p (op0))
2746 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2747 	  return CONST0_RTX (mode);
2748 	}
2749       break;
2750 
2751     case ROTATERT:
2752     case ROTATE:
2753     case ASHIFTRT:
2754       if (trueop1 == CONST0_RTX (mode))
2755 	return op0;
2756       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2757 	return op0;
2758       /* Rotating ~0 always results in ~0.  */
2759       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2760 	  && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2761 	  && ! side_effects_p (op1))
2762 	return op0;
2763     canonicalize_shift:
2764       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2765 	{
2766 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2767 	  if (val != INTVAL (op1))
2768 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2769 	}
2770       break;
2771 
2772     case ASHIFT:
2773     case SS_ASHIFT:
2774     case US_ASHIFT:
2775       if (trueop1 == CONST0_RTX (mode))
2776 	return op0;
2777       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2778 	return op0;
2779       goto canonicalize_shift;
2780 
2781     case LSHIFTRT:
2782       if (trueop1 == CONST0_RTX (mode))
2783 	return op0;
2784       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2785 	return op0;
2786       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
2787       if (GET_CODE (op0) == CLZ
2788 	  && CONST_INT_P (trueop1)
2789 	  && STORE_FLAG_VALUE == 1
2790 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2791 	{
2792 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2793 	  unsigned HOST_WIDE_INT zero_val = 0;
2794 
2795 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2796 	      && zero_val == GET_MODE_BITSIZE (imode)
2797 	      && INTVAL (trueop1) == exact_log2 (zero_val))
2798 	    return simplify_gen_relational (EQ, mode, imode,
2799 					    XEXP (op0, 0), const0_rtx);
2800 	}
2801       goto canonicalize_shift;
2802 
2803     case SMIN:
2804       if (width <= HOST_BITS_PER_WIDE_INT
2805 	  && CONST_INT_P (trueop1)
2806 	  && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2807 	  && ! side_effects_p (op0))
2808 	return op1;
2809       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2810 	return op0;
2811       tem = simplify_associative_operation (code, mode, op0, op1);
2812       if (tem)
2813 	return tem;
2814       break;
2815 
2816     case SMAX:
2817       if (width <= HOST_BITS_PER_WIDE_INT
2818 	  && CONST_INT_P (trueop1)
2819 	  && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2820 	      == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2821 	  && ! side_effects_p (op0))
2822 	return op1;
2823       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2824 	return op0;
2825       tem = simplify_associative_operation (code, mode, op0, op1);
2826       if (tem)
2827 	return tem;
2828       break;
2829 
2830     case UMIN:
2831       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2832 	return op1;
2833       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2834 	return op0;
2835       tem = simplify_associative_operation (code, mode, op0, op1);
2836       if (tem)
2837 	return tem;
2838       break;
2839 
2840     case UMAX:
2841       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2842 	return op1;
2843       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2844 	return op0;
2845       tem = simplify_associative_operation (code, mode, op0, op1);
2846       if (tem)
2847 	return tem;
2848       break;
2849 
2850     case SS_PLUS:
2851     case US_PLUS:
2852     case SS_MINUS:
2853     case US_MINUS:
2854     case SS_MULT:
2855     case US_MULT:
2856     case SS_DIV:
2857     case US_DIV:
2858       /* ??? There are simplifications that can be done.  */
2859       return 0;
2860 
2861     case VEC_SELECT:
2862       if (!VECTOR_MODE_P (mode))
2863 	{
2864 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2865 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2866 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2867 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
2868 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2869 
2870 	  if (GET_CODE (trueop0) == CONST_VECTOR)
2871 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2872 						      (trueop1, 0, 0)));
2873 
2874 	  /* Extract a scalar element from a nested VEC_SELECT expression
2875 	     (with optional nested VEC_CONCAT expression).  Some targets
2876 	     (i386) extract scalar element from a vector using chain of
2877 	     nested VEC_SELECT expressions.  When input operand is a memory
2878 	     operand, this operation can be simplified to a simple scalar
2879 	     load from an offseted memory address.  */
2880 	  if (GET_CODE (trueop0) == VEC_SELECT)
2881 	    {
2882 	      rtx op0 = XEXP (trueop0, 0);
2883 	      rtx op1 = XEXP (trueop0, 1);
2884 
2885 	      enum machine_mode opmode = GET_MODE (op0);
2886 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2887 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2888 
2889 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
2890 	      int elem;
2891 
2892 	      rtvec vec;
2893 	      rtx tmp_op, tmp;
2894 
2895 	      gcc_assert (GET_CODE (op1) == PARALLEL);
2896 	      gcc_assert (i < n_elts);
2897 
2898 	      /* Select element, pointed by nested selector.  */
2899 	      elem = INTVAL (XVECEXP (op1, 0, i));
2900 
2901 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
2902 	      if (GET_CODE (op0) == VEC_CONCAT)
2903 		{
2904 		  rtx op00 = XEXP (op0, 0);
2905 		  rtx op01 = XEXP (op0, 1);
2906 
2907 		  enum machine_mode mode00, mode01;
2908 		  int n_elts00, n_elts01;
2909 
2910 		  mode00 = GET_MODE (op00);
2911 		  mode01 = GET_MODE (op01);
2912 
2913 		  /* Find out number of elements of each operand.  */
2914 		  if (VECTOR_MODE_P (mode00))
2915 		    {
2916 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2917 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2918 		    }
2919 		  else
2920 		    n_elts00 = 1;
2921 
2922 		  if (VECTOR_MODE_P (mode01))
2923 		    {
2924 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2925 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2926 		    }
2927 		  else
2928 		    n_elts01 = 1;
2929 
2930 		  gcc_assert (n_elts == n_elts00 + n_elts01);
2931 
2932 		  /* Select correct operand of VEC_CONCAT
2933 		     and adjust selector. */
2934 		  if (elem < n_elts01)
2935 		    tmp_op = op00;
2936 		  else
2937 		    {
2938 		      tmp_op = op01;
2939 		      elem -= n_elts00;
2940 		    }
2941 		}
2942 	      else
2943 		tmp_op = op0;
2944 
2945 	      vec = rtvec_alloc (1);
2946 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
2947 
2948 	      tmp = gen_rtx_fmt_ee (code, mode,
2949 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2950 	      return tmp;
2951 	    }
2952 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
2953 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
2954 	    return XEXP (trueop0, 0);
2955 	}
2956       else
2957 	{
2958 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2959 	  gcc_assert (GET_MODE_INNER (mode)
2960 		      == GET_MODE_INNER (GET_MODE (trueop0)));
2961 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2962 
2963 	  if (GET_CODE (trueop0) == CONST_VECTOR)
2964 	    {
2965 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2966 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2967 	      rtvec v = rtvec_alloc (n_elts);
2968 	      unsigned int i;
2969 
2970 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2971 	      for (i = 0; i < n_elts; i++)
2972 		{
2973 		  rtx x = XVECEXP (trueop1, 0, i);
2974 
2975 		  gcc_assert (CONST_INT_P (x));
2976 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2977 						       INTVAL (x));
2978 		}
2979 
2980 	      return gen_rtx_CONST_VECTOR (mode, v);
2981 	    }
2982 	}
2983 
2984       if (XVECLEN (trueop1, 0) == 1
2985 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2986 	  && GET_CODE (trueop0) == VEC_CONCAT)
2987 	{
2988 	  rtx vec = trueop0;
2989 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2990 
2991 	  /* Try to find the element in the VEC_CONCAT.  */
2992 	  while (GET_MODE (vec) != mode
2993 		 && GET_CODE (vec) == VEC_CONCAT)
2994 	    {
2995 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2996 	      if (offset < vec_size)
2997 		vec = XEXP (vec, 0);
2998 	      else
2999 		{
3000 		  offset -= vec_size;
3001 		  vec = XEXP (vec, 1);
3002 		}
3003 	      vec = avoid_constant_pool_reference (vec);
3004 	    }
3005 
3006 	  if (GET_MODE (vec) == mode)
3007 	    return vec;
3008 	}
3009 
3010       return 0;
3011     case VEC_CONCAT:
3012       {
3013 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3014 				      ? GET_MODE (trueop0)
3015 				      : GET_MODE_INNER (mode));
3016 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3017 				      ? GET_MODE (trueop1)
3018 				      : GET_MODE_INNER (mode));
3019 
3020 	gcc_assert (VECTOR_MODE_P (mode));
3021 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3022 		    == GET_MODE_SIZE (mode));
3023 
3024 	if (VECTOR_MODE_P (op0_mode))
3025 	  gcc_assert (GET_MODE_INNER (mode)
3026 		      == GET_MODE_INNER (op0_mode));
3027 	else
3028 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3029 
3030 	if (VECTOR_MODE_P (op1_mode))
3031 	  gcc_assert (GET_MODE_INNER (mode)
3032 		      == GET_MODE_INNER (op1_mode));
3033 	else
3034 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3035 
3036 	if ((GET_CODE (trueop0) == CONST_VECTOR
3037 	     || CONST_INT_P (trueop0)
3038 	     || GET_CODE (trueop0) == CONST_DOUBLE)
3039 	    && (GET_CODE (trueop1) == CONST_VECTOR
3040 		|| CONST_INT_P (trueop1)
3041 		|| GET_CODE (trueop1) == CONST_DOUBLE))
3042 	  {
3043 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3044 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3045 	    rtvec v = rtvec_alloc (n_elts);
3046 	    unsigned int i;
3047 	    unsigned in_n_elts = 1;
3048 
3049 	    if (VECTOR_MODE_P (op0_mode))
3050 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3051 	    for (i = 0; i < n_elts; i++)
3052 	      {
3053 		if (i < in_n_elts)
3054 		  {
3055 		    if (!VECTOR_MODE_P (op0_mode))
3056 		      RTVEC_ELT (v, i) = trueop0;
3057 		    else
3058 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3059 		  }
3060 		else
3061 		  {
3062 		    if (!VECTOR_MODE_P (op1_mode))
3063 		      RTVEC_ELT (v, i) = trueop1;
3064 		    else
3065 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3066 							   i - in_n_elts);
3067 		  }
3068 	      }
3069 
3070 	    return gen_rtx_CONST_VECTOR (mode, v);
3071 	  }
3072       }
3073       return 0;
3074 
3075     default:
3076       gcc_unreachable ();
3077     }
3078 
3079   return 0;
3080 }
3081 
3082 rtx
3083 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3084 				 rtx op0, rtx op1)
3085 {
3086   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3087   HOST_WIDE_INT val;
3088   unsigned int width = GET_MODE_BITSIZE (mode);
3089 
3090   if (VECTOR_MODE_P (mode)
3091       && code != VEC_CONCAT
3092       && GET_CODE (op0) == CONST_VECTOR
3093       && GET_CODE (op1) == CONST_VECTOR)
3094     {
3095       unsigned n_elts = GET_MODE_NUNITS (mode);
3096       enum machine_mode op0mode = GET_MODE (op0);
3097       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3098       enum machine_mode op1mode = GET_MODE (op1);
3099       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3100       rtvec v = rtvec_alloc (n_elts);
3101       unsigned int i;
3102 
3103       gcc_assert (op0_n_elts == n_elts);
3104       gcc_assert (op1_n_elts == n_elts);
3105       for (i = 0; i < n_elts; i++)
3106 	{
3107 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3108 					     CONST_VECTOR_ELT (op0, i),
3109 					     CONST_VECTOR_ELT (op1, i));
3110 	  if (!x)
3111 	    return 0;
3112 	  RTVEC_ELT (v, i) = x;
3113 	}
3114 
3115       return gen_rtx_CONST_VECTOR (mode, v);
3116     }
3117 
3118   if (VECTOR_MODE_P (mode)
3119       && code == VEC_CONCAT
3120       && (CONST_INT_P (op0)
3121 	  || GET_CODE (op0) == CONST_DOUBLE
3122 	  || GET_CODE (op0) == CONST_FIXED)
3123       && (CONST_INT_P (op1)
3124 	  || GET_CODE (op1) == CONST_DOUBLE
3125 	  || GET_CODE (op1) == CONST_FIXED))
3126     {
3127       unsigned n_elts = GET_MODE_NUNITS (mode);
3128       rtvec v = rtvec_alloc (n_elts);
3129 
3130       gcc_assert (n_elts >= 2);
3131       if (n_elts == 2)
3132 	{
3133 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3134 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3135 
3136 	  RTVEC_ELT (v, 0) = op0;
3137 	  RTVEC_ELT (v, 1) = op1;
3138 	}
3139       else
3140 	{
3141 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3142 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3143 	  unsigned i;
3144 
3145 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3146 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3147 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3148 
3149 	  for (i = 0; i < op0_n_elts; ++i)
3150 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3151 	  for (i = 0; i < op1_n_elts; ++i)
3152 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3153 	}
3154 
3155       return gen_rtx_CONST_VECTOR (mode, v);
3156     }
3157 
3158   if (SCALAR_FLOAT_MODE_P (mode)
3159       && GET_CODE (op0) == CONST_DOUBLE
3160       && GET_CODE (op1) == CONST_DOUBLE
3161       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3162     {
3163       if (code == AND
3164 	  || code == IOR
3165 	  || code == XOR)
3166 	{
3167 	  long tmp0[4];
3168 	  long tmp1[4];
3169 	  REAL_VALUE_TYPE r;
3170 	  int i;
3171 
3172 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3173 			  GET_MODE (op0));
3174 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3175 			  GET_MODE (op1));
3176 	  for (i = 0; i < 4; i++)
3177 	    {
3178 	      switch (code)
3179 	      {
3180 	      case AND:
3181 		tmp0[i] &= tmp1[i];
3182 		break;
3183 	      case IOR:
3184 		tmp0[i] |= tmp1[i];
3185 		break;
3186 	      case XOR:
3187 		tmp0[i] ^= tmp1[i];
3188 		break;
3189 	      default:
3190 		gcc_unreachable ();
3191 	      }
3192 	    }
3193 	   real_from_target (&r, tmp0, mode);
3194 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3195 	}
3196       else
3197 	{
3198 	  REAL_VALUE_TYPE f0, f1, value, result;
3199 	  bool inexact;
3200 
3201 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3202 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3203 	  real_convert (&f0, mode, &f0);
3204 	  real_convert (&f1, mode, &f1);
3205 
3206 	  if (HONOR_SNANS (mode)
3207 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3208 	    return 0;
3209 
3210 	  if (code == DIV
3211 	      && REAL_VALUES_EQUAL (f1, dconst0)
3212 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3213 	    return 0;
3214 
3215 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3216 	      && flag_trapping_math
3217 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3218 	    {
3219 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3220 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3221 
3222 	      switch (code)
3223 		{
3224 		case PLUS:
3225 		  /* Inf + -Inf = NaN plus exception.  */
3226 		  if (s0 != s1)
3227 		    return 0;
3228 		  break;
3229 		case MINUS:
3230 		  /* Inf - Inf = NaN plus exception.  */
3231 		  if (s0 == s1)
3232 		    return 0;
3233 		  break;
3234 		case DIV:
3235 		  /* Inf / Inf = NaN plus exception.  */
3236 		  return 0;
3237 		default:
3238 		  break;
3239 		}
3240 	    }
3241 
3242 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3243 	      && flag_trapping_math
3244 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3245 		  || (REAL_VALUE_ISINF (f1)
3246 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3247 	    /* Inf * 0 = NaN plus exception.  */
3248 	    return 0;
3249 
3250 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3251 				     &f0, &f1);
3252 	  real_convert (&result, mode, &value);
3253 
3254 	  /* Don't constant fold this floating point operation if
3255 	     the result has overflowed and flag_trapping_math.  */
3256 
3257 	  if (flag_trapping_math
3258 	      && MODE_HAS_INFINITIES (mode)
3259 	      && REAL_VALUE_ISINF (result)
3260 	      && !REAL_VALUE_ISINF (f0)
3261 	      && !REAL_VALUE_ISINF (f1))
3262 	    /* Overflow plus exception.  */
3263 	    return 0;
3264 
3265 	  /* Don't constant fold this floating point operation if the
3266 	     result may dependent upon the run-time rounding mode and
3267 	     flag_rounding_math is set, or if GCC's software emulation
3268 	     is unable to accurately represent the result.  */
3269 
3270 	  if ((flag_rounding_math
3271 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3272 	      && (inexact || !real_identical (&result, &value)))
3273 	    return NULL_RTX;
3274 
3275 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3276 	}
3277     }
3278 
3279   /* We can fold some multi-word operations.  */
3280   if (GET_MODE_CLASS (mode) == MODE_INT
3281       && width == HOST_BITS_PER_WIDE_INT * 2
3282       && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3283       && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3284     {
3285       unsigned HOST_WIDE_INT l1, l2, lv, lt;
3286       HOST_WIDE_INT h1, h2, hv, ht;
3287 
3288       if (GET_CODE (op0) == CONST_DOUBLE)
3289 	l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3290       else
3291 	l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3292 
3293       if (GET_CODE (op1) == CONST_DOUBLE)
3294 	l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3295       else
3296 	l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3297 
3298       switch (code)
3299 	{
3300 	case MINUS:
3301 	  /* A - B == A + (-B).  */
3302 	  neg_double (l2, h2, &lv, &hv);
3303 	  l2 = lv, h2 = hv;
3304 
3305 	  /* Fall through....  */
3306 
3307 	case PLUS:
3308 	  add_double (l1, h1, l2, h2, &lv, &hv);
3309 	  break;
3310 
3311 	case MULT:
3312 	  mul_double (l1, h1, l2, h2, &lv, &hv);
3313 	  break;
3314 
3315 	case DIV:
3316 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3317 				    &lv, &hv, &lt, &ht))
3318 	    return 0;
3319 	  break;
3320 
3321 	case MOD:
3322 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3323 				    &lt, &ht, &lv, &hv))
3324 	    return 0;
3325 	  break;
3326 
3327 	case UDIV:
3328 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3329 				    &lv, &hv, &lt, &ht))
3330 	    return 0;
3331 	  break;
3332 
3333 	case UMOD:
3334 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3335 				    &lt, &ht, &lv, &hv))
3336 	    return 0;
3337 	  break;
3338 
3339 	case AND:
3340 	  lv = l1 & l2, hv = h1 & h2;
3341 	  break;
3342 
3343 	case IOR:
3344 	  lv = l1 | l2, hv = h1 | h2;
3345 	  break;
3346 
3347 	case XOR:
3348 	  lv = l1 ^ l2, hv = h1 ^ h2;
3349 	  break;
3350 
3351 	case SMIN:
3352 	  if (h1 < h2
3353 	      || (h1 == h2
3354 		  && ((unsigned HOST_WIDE_INT) l1
3355 		      < (unsigned HOST_WIDE_INT) l2)))
3356 	    lv = l1, hv = h1;
3357 	  else
3358 	    lv = l2, hv = h2;
3359 	  break;
3360 
3361 	case SMAX:
3362 	  if (h1 > h2
3363 	      || (h1 == h2
3364 		  && ((unsigned HOST_WIDE_INT) l1
3365 		      > (unsigned HOST_WIDE_INT) l2)))
3366 	    lv = l1, hv = h1;
3367 	  else
3368 	    lv = l2, hv = h2;
3369 	  break;
3370 
3371 	case UMIN:
3372 	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3373 	      || (h1 == h2
3374 		  && ((unsigned HOST_WIDE_INT) l1
3375 		      < (unsigned HOST_WIDE_INT) l2)))
3376 	    lv = l1, hv = h1;
3377 	  else
3378 	    lv = l2, hv = h2;
3379 	  break;
3380 
3381 	case UMAX:
3382 	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3383 	      || (h1 == h2
3384 		  && ((unsigned HOST_WIDE_INT) l1
3385 		      > (unsigned HOST_WIDE_INT) l2)))
3386 	    lv = l1, hv = h1;
3387 	  else
3388 	    lv = l2, hv = h2;
3389 	  break;
3390 
3391 	case LSHIFTRT:   case ASHIFTRT:
3392 	case ASHIFT:
3393 	case ROTATE:     case ROTATERT:
3394 	  if (SHIFT_COUNT_TRUNCATED)
3395 	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3396 
3397 	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3398 	    return 0;
3399 
3400 	  if (code == LSHIFTRT || code == ASHIFTRT)
3401 	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3402 			   code == ASHIFTRT);
3403 	  else if (code == ASHIFT)
3404 	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3405 	  else if (code == ROTATE)
3406 	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3407 	  else /* code == ROTATERT */
3408 	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3409 	  break;
3410 
3411 	default:
3412 	  return 0;
3413 	}
3414 
3415       return immed_double_const (lv, hv, mode);
3416     }
3417 
3418   if (CONST_INT_P (op0) && CONST_INT_P (op1)
3419       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3420     {
3421       /* Get the integer argument values in two forms:
3422          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3423 
3424       arg0 = INTVAL (op0);
3425       arg1 = INTVAL (op1);
3426 
3427       if (width < HOST_BITS_PER_WIDE_INT)
3428         {
3429           arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3430           arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3431 
3432           arg0s = arg0;
3433           if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3434 	    arg0s |= ((HOST_WIDE_INT) (-1) << width);
3435 
3436 	  arg1s = arg1;
3437 	  if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3438 	    arg1s |= ((HOST_WIDE_INT) (-1) << width);
3439 	}
3440       else
3441 	{
3442 	  arg0s = arg0;
3443 	  arg1s = arg1;
3444 	}
3445 
3446       /* Compute the value of the arithmetic.  */
3447 
3448       switch (code)
3449 	{
3450 	case PLUS:
3451 	  val = arg0s + arg1s;
3452 	  break;
3453 
3454 	case MINUS:
3455 	  val = arg0s - arg1s;
3456 	  break;
3457 
3458 	case MULT:
3459 	  val = arg0s * arg1s;
3460 	  break;
3461 
3462 	case DIV:
3463 	  if (arg1s == 0
3464 	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3465 		  && arg1s == -1))
3466 	    return 0;
3467 	  val = arg0s / arg1s;
3468 	  break;
3469 
3470 	case MOD:
3471 	  if (arg1s == 0
3472 	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3473 		  && arg1s == -1))
3474 	    return 0;
3475 	  val = arg0s % arg1s;
3476 	  break;
3477 
3478 	case UDIV:
3479 	  if (arg1 == 0
3480 	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3481 		  && arg1s == -1))
3482 	    return 0;
3483 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3484 	  break;
3485 
3486 	case UMOD:
3487 	  if (arg1 == 0
3488 	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3489 		  && arg1s == -1))
3490 	    return 0;
3491 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3492 	  break;
3493 
3494 	case AND:
3495 	  val = arg0 & arg1;
3496 	  break;
3497 
3498 	case IOR:
3499 	  val = arg0 | arg1;
3500 	  break;
3501 
3502 	case XOR:
3503 	  val = arg0 ^ arg1;
3504 	  break;
3505 
3506 	case LSHIFTRT:
3507 	case ASHIFT:
3508 	case ASHIFTRT:
3509 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3510 	     the value is in range.  We can't return any old value for
3511 	     out-of-range arguments because either the middle-end (via
3512 	     shift_truncation_mask) or the back-end might be relying on
3513 	     target-specific knowledge.  Nor can we rely on
3514 	     shift_truncation_mask, since the shift might not be part of an
3515 	     ashlM3, lshrM3 or ashrM3 instruction.  */
3516 	  if (SHIFT_COUNT_TRUNCATED)
3517 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3518 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3519 	    return 0;
3520 
3521 	  val = (code == ASHIFT
3522 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3523 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3524 
3525 	  /* Sign-extend the result for arithmetic right shifts.  */
3526 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3527 	    val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3528 	  break;
3529 
3530 	case ROTATERT:
3531 	  if (arg1 < 0)
3532 	    return 0;
3533 
3534 	  arg1 %= width;
3535 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3536 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3537 	  break;
3538 
3539 	case ROTATE:
3540 	  if (arg1 < 0)
3541 	    return 0;
3542 
3543 	  arg1 %= width;
3544 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3545 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3546 	  break;
3547 
3548 	case COMPARE:
3549 	  /* Do nothing here.  */
3550 	  return 0;
3551 
3552 	case SMIN:
3553 	  val = arg0s <= arg1s ? arg0s : arg1s;
3554 	  break;
3555 
3556 	case UMIN:
3557 	  val = ((unsigned HOST_WIDE_INT) arg0
3558 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3559 	  break;
3560 
3561 	case SMAX:
3562 	  val = arg0s > arg1s ? arg0s : arg1s;
3563 	  break;
3564 
3565 	case UMAX:
3566 	  val = ((unsigned HOST_WIDE_INT) arg0
3567 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3568 	  break;
3569 
3570 	case SS_PLUS:
3571 	case US_PLUS:
3572 	case SS_MINUS:
3573 	case US_MINUS:
3574 	case SS_MULT:
3575 	case US_MULT:
3576 	case SS_DIV:
3577 	case US_DIV:
3578 	case SS_ASHIFT:
3579 	case US_ASHIFT:
3580 	  /* ??? There are simplifications that can be done.  */
3581 	  return 0;
3582 
3583 	default:
3584 	  gcc_unreachable ();
3585 	}
3586 
3587       return gen_int_mode (val, mode);
3588     }
3589 
3590   return NULL_RTX;
3591 }
3592 
3593 
3594 
3595 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3596    PLUS or MINUS.
3597 
3598    Rather than test for specific case, we do this by a brute-force method
3599    and do all possible simplifications until no more changes occur.  Then
3600    we rebuild the operation.  */
3601 
3602 struct simplify_plus_minus_op_data
3603 {
3604   rtx op;
3605   short neg;
3606 };
3607 
3608 static bool
3609 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3610 {
3611   int result;
3612 
3613   result = (commutative_operand_precedence (y)
3614 	    - commutative_operand_precedence (x));
3615   if (result)
3616     return result > 0;
3617 
3618   /* Group together equal REGs to do more simplification.  */
3619   if (REG_P (x) && REG_P (y))
3620     return REGNO (x) > REGNO (y);
3621   else
3622     return false;
3623 }
3624 
3625 static rtx
3626 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3627 		     rtx op1)
3628 {
3629   struct simplify_plus_minus_op_data ops[8];
3630   rtx result, tem;
3631   int n_ops = 2, input_ops = 2;
3632   int changed, n_constants = 0, canonicalized = 0;
3633   int i, j;
3634 
3635   memset (ops, 0, sizeof ops);
3636 
3637   /* Set up the two operands and then expand them until nothing has been
3638      changed.  If we run out of room in our array, give up; this should
3639      almost never happen.  */
3640 
3641   ops[0].op = op0;
3642   ops[0].neg = 0;
3643   ops[1].op = op1;
3644   ops[1].neg = (code == MINUS);
3645 
3646   do
3647     {
3648       changed = 0;
3649 
3650       for (i = 0; i < n_ops; i++)
3651 	{
3652 	  rtx this_op = ops[i].op;
3653 	  int this_neg = ops[i].neg;
3654 	  enum rtx_code this_code = GET_CODE (this_op);
3655 
3656 	  switch (this_code)
3657 	    {
3658 	    case PLUS:
3659 	    case MINUS:
3660 	      if (n_ops == 7)
3661 		return NULL_RTX;
3662 
3663 	      ops[n_ops].op = XEXP (this_op, 1);
3664 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3665 	      n_ops++;
3666 
3667 	      ops[i].op = XEXP (this_op, 0);
3668 	      input_ops++;
3669 	      changed = 1;
3670 	      canonicalized |= this_neg;
3671 	      break;
3672 
3673 	    case NEG:
3674 	      ops[i].op = XEXP (this_op, 0);
3675 	      ops[i].neg = ! this_neg;
3676 	      changed = 1;
3677 	      canonicalized = 1;
3678 	      break;
3679 
3680 	    case CONST:
3681 	      if (n_ops < 7
3682 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
3683 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3684 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3685 		{
3686 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3687 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3688 		  ops[n_ops].neg = this_neg;
3689 		  n_ops++;
3690 		  changed = 1;
3691 	          canonicalized = 1;
3692 		}
3693 	      break;
3694 
3695 	    case NOT:
3696 	      /* ~a -> (-a - 1) */
3697 	      if (n_ops != 7)
3698 		{
3699 		  ops[n_ops].op = constm1_rtx;
3700 		  ops[n_ops++].neg = this_neg;
3701 		  ops[i].op = XEXP (this_op, 0);
3702 		  ops[i].neg = !this_neg;
3703 		  changed = 1;
3704 	          canonicalized = 1;
3705 		}
3706 	      break;
3707 
3708 	    case CONST_INT:
3709 	      n_constants++;
3710 	      if (this_neg)
3711 		{
3712 		  ops[i].op = neg_const_int (mode, this_op);
3713 		  ops[i].neg = 0;
3714 		  changed = 1;
3715 	          canonicalized = 1;
3716 		}
3717 	      break;
3718 
3719 	    default:
3720 	      break;
3721 	    }
3722 	}
3723     }
3724   while (changed);
3725 
3726   if (n_constants > 1)
3727     canonicalized = 1;
3728 
3729   gcc_assert (n_ops >= 2);
3730 
3731   /* If we only have two operands, we can avoid the loops.  */
3732   if (n_ops == 2)
3733     {
3734       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3735       rtx lhs, rhs;
3736 
3737       /* Get the two operands.  Be careful with the order, especially for
3738 	 the cases where code == MINUS.  */
3739       if (ops[0].neg && ops[1].neg)
3740 	{
3741 	  lhs = gen_rtx_NEG (mode, ops[0].op);
3742 	  rhs = ops[1].op;
3743 	}
3744       else if (ops[0].neg)
3745 	{
3746 	  lhs = ops[1].op;
3747 	  rhs = ops[0].op;
3748 	}
3749       else
3750 	{
3751 	  lhs = ops[0].op;
3752 	  rhs = ops[1].op;
3753 	}
3754 
3755       return simplify_const_binary_operation (code, mode, lhs, rhs);
3756     }
3757 
3758   /* Now simplify each pair of operands until nothing changes.  */
3759   do
3760     {
3761       /* Insertion sort is good enough for an eight-element array.  */
3762       for (i = 1; i < n_ops; i++)
3763         {
3764           struct simplify_plus_minus_op_data save;
3765           j = i - 1;
3766           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3767 	    continue;
3768 
3769           canonicalized = 1;
3770           save = ops[i];
3771           do
3772 	    ops[j + 1] = ops[j];
3773           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3774           ops[j + 1] = save;
3775         }
3776 
3777       changed = 0;
3778       for (i = n_ops - 1; i > 0; i--)
3779 	for (j = i - 1; j >= 0; j--)
3780 	  {
3781 	    rtx lhs = ops[j].op, rhs = ops[i].op;
3782 	    int lneg = ops[j].neg, rneg = ops[i].neg;
3783 
3784 	    if (lhs != 0 && rhs != 0)
3785 	      {
3786 		enum rtx_code ncode = PLUS;
3787 
3788 		if (lneg != rneg)
3789 		  {
3790 		    ncode = MINUS;
3791 		    if (lneg)
3792 		      tem = lhs, lhs = rhs, rhs = tem;
3793 		  }
3794 		else if (swap_commutative_operands_p (lhs, rhs))
3795 		  tem = lhs, lhs = rhs, rhs = tem;
3796 
3797 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3798 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3799 		  {
3800 		    rtx tem_lhs, tem_rhs;
3801 
3802 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3803 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3804 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3805 
3806 		    if (tem && !CONSTANT_P (tem))
3807 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
3808 		  }
3809 		else
3810 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3811 
3812 		/* Reject "simplifications" that just wrap the two
3813 		   arguments in a CONST.  Failure to do so can result
3814 		   in infinite recursion with simplify_binary_operation
3815 		   when it calls us to simplify CONST operations.  */
3816 		if (tem
3817 		    && ! (GET_CODE (tem) == CONST
3818 			  && GET_CODE (XEXP (tem, 0)) == ncode
3819 			  && XEXP (XEXP (tem, 0), 0) == lhs
3820 			  && XEXP (XEXP (tem, 0), 1) == rhs))
3821 		  {
3822 		    lneg &= rneg;
3823 		    if (GET_CODE (tem) == NEG)
3824 		      tem = XEXP (tem, 0), lneg = !lneg;
3825 		    if (CONST_INT_P (tem) && lneg)
3826 		      tem = neg_const_int (mode, tem), lneg = 0;
3827 
3828 		    ops[i].op = tem;
3829 		    ops[i].neg = lneg;
3830 		    ops[j].op = NULL_RTX;
3831 		    changed = 1;
3832 		    canonicalized = 1;
3833 		  }
3834 	      }
3835 	  }
3836 
3837       /* If nothing changed, fail.  */
3838       if (!canonicalized)
3839         return NULL_RTX;
3840 
3841       /* Pack all the operands to the lower-numbered entries.  */
3842       for (i = 0, j = 0; j < n_ops; j++)
3843         if (ops[j].op)
3844           {
3845 	    ops[i] = ops[j];
3846 	    i++;
3847           }
3848       n_ops = i;
3849     }
3850   while (changed);
3851 
3852   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
3853   if (n_ops == 2
3854       && CONST_INT_P (ops[1].op)
3855       && CONSTANT_P (ops[0].op)
3856       && ops[0].neg)
3857     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3858 
3859   /* We suppressed creation of trivial CONST expressions in the
3860      combination loop to avoid recursion.  Create one manually now.
3861      The combination loop should have ensured that there is exactly
3862      one CONST_INT, and the sort will have ensured that it is last
3863      in the array and that any other constant will be next-to-last.  */
3864 
3865   if (n_ops > 1
3866       && CONST_INT_P (ops[n_ops - 1].op)
3867       && CONSTANT_P (ops[n_ops - 2].op))
3868     {
3869       rtx value = ops[n_ops - 1].op;
3870       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3871 	value = neg_const_int (mode, value);
3872       ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3873       n_ops--;
3874     }
3875 
3876   /* Put a non-negated operand first, if possible.  */
3877 
3878   for (i = 0; i < n_ops && ops[i].neg; i++)
3879     continue;
3880   if (i == n_ops)
3881     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3882   else if (i != 0)
3883     {
3884       tem = ops[0].op;
3885       ops[0] = ops[i];
3886       ops[i].op = tem;
3887       ops[i].neg = 1;
3888     }
3889 
3890   /* Now make the result by performing the requested operations.  */
3891   result = ops[0].op;
3892   for (i = 1; i < n_ops; i++)
3893     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3894 			     mode, result, ops[i].op);
3895 
3896   return result;
3897 }
3898 
3899 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
3900 static bool
3901 plus_minus_operand_p (const_rtx x)
3902 {
3903   return GET_CODE (x) == PLUS
3904          || GET_CODE (x) == MINUS
3905 	 || (GET_CODE (x) == CONST
3906 	     && GET_CODE (XEXP (x, 0)) == PLUS
3907 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3908 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3909 }
3910 
3911 /* Like simplify_binary_operation except used for relational operators.
3912    MODE is the mode of the result. If MODE is VOIDmode, both operands must
3913    not also be VOIDmode.
3914 
3915    CMP_MODE specifies in which mode the comparison is done in, so it is
3916    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
3917    the operands or, if both are VOIDmode, the operands are compared in
3918    "infinite precision".  */
3919 rtx
3920 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3921 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
3922 {
3923   rtx tem, trueop0, trueop1;
3924 
3925   if (cmp_mode == VOIDmode)
3926     cmp_mode = GET_MODE (op0);
3927   if (cmp_mode == VOIDmode)
3928     cmp_mode = GET_MODE (op1);
3929 
3930   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3931   if (tem)
3932     {
3933       if (SCALAR_FLOAT_MODE_P (mode))
3934 	{
3935           if (tem == const0_rtx)
3936             return CONST0_RTX (mode);
3937 #ifdef FLOAT_STORE_FLAG_VALUE
3938 	  {
3939 	    REAL_VALUE_TYPE val;
3940 	    val = FLOAT_STORE_FLAG_VALUE (mode);
3941 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3942 	  }
3943 #else
3944 	  return NULL_RTX;
3945 #endif
3946 	}
3947       if (VECTOR_MODE_P (mode))
3948 	{
3949 	  if (tem == const0_rtx)
3950 	    return CONST0_RTX (mode);
3951 #ifdef VECTOR_STORE_FLAG_VALUE
3952 	  {
3953 	    int i, units;
3954 	    rtvec v;
3955 
3956 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3957 	    if (val == NULL_RTX)
3958 	      return NULL_RTX;
3959 	    if (val == const1_rtx)
3960 	      return CONST1_RTX (mode);
3961 
3962 	    units = GET_MODE_NUNITS (mode);
3963 	    v = rtvec_alloc (units);
3964 	    for (i = 0; i < units; i++)
3965 	      RTVEC_ELT (v, i) = val;
3966 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
3967 	  }
3968 #else
3969 	  return NULL_RTX;
3970 #endif
3971 	}
3972 
3973       return tem;
3974     }
3975 
3976   /* For the following tests, ensure const0_rtx is op1.  */
3977   if (swap_commutative_operands_p (op0, op1)
3978       || (op0 == const0_rtx && op1 != const0_rtx))
3979     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3980 
3981   /* If op0 is a compare, extract the comparison arguments from it.  */
3982   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3983     return simplify_gen_relational (code, mode, VOIDmode,
3984 				    XEXP (op0, 0), XEXP (op0, 1));
3985 
3986   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3987       || CC0_P (op0))
3988     return NULL_RTX;
3989 
3990   trueop0 = avoid_constant_pool_reference (op0);
3991   trueop1 = avoid_constant_pool_reference (op1);
3992   return simplify_relational_operation_1 (code, mode, cmp_mode,
3993 		  			  trueop0, trueop1);
3994 }
3995 
3996 /* This part of simplify_relational_operation is only used when CMP_MODE
3997    is not in class MODE_CC (i.e. it is a real comparison).
3998 
3999    MODE is the mode of the result, while CMP_MODE specifies in which
4000    mode the comparison is done in, so it is the mode of the operands.  */
4001 
4002 static rtx
4003 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4004 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4005 {
4006   enum rtx_code op0code = GET_CODE (op0);
4007 
4008   if (op1 == const0_rtx && COMPARISON_P (op0))
4009     {
4010       /* If op0 is a comparison, extract the comparison arguments
4011          from it.  */
4012       if (code == NE)
4013 	{
4014 	  if (GET_MODE (op0) == mode)
4015 	    return simplify_rtx (op0);
4016 	  else
4017 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4018 					    XEXP (op0, 0), XEXP (op0, 1));
4019 	}
4020       else if (code == EQ)
4021 	{
4022 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4023 	  if (new_code != UNKNOWN)
4024 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4025 					    XEXP (op0, 0), XEXP (op0, 1));
4026 	}
4027     }
4028 
4029   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4030      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4031   if ((code == LTU || code == GEU)
4032       && GET_CODE (op0) == PLUS
4033       && CONST_INT_P (XEXP (op0, 1))
4034       && (rtx_equal_p (op1, XEXP (op0, 0))
4035 	  || rtx_equal_p (op1, XEXP (op0, 1))))
4036     {
4037       rtx new_cmp
4038 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4039       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4040 				      cmp_mode, XEXP (op0, 0), new_cmp);
4041     }
4042 
4043   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4044   if ((code == LTU || code == GEU)
4045       && GET_CODE (op0) == PLUS
4046       && rtx_equal_p (op1, XEXP (op0, 1))
4047       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4048       && !rtx_equal_p (op1, XEXP (op0, 0)))
4049     return simplify_gen_relational (code, mode, cmp_mode, op0,
4050 				    copy_rtx (XEXP (op0, 0)));
4051 
4052   if (op1 == const0_rtx)
4053     {
4054       /* Canonicalize (GTU x 0) as (NE x 0).  */
4055       if (code == GTU)
4056         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4057       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4058       if (code == LEU)
4059         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4060     }
4061   else if (op1 == const1_rtx)
4062     {
4063       switch (code)
4064         {
4065         case GE:
4066 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4067 	  return simplify_gen_relational (GT, mode, cmp_mode,
4068 					  op0, const0_rtx);
4069 	case GEU:
4070 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4071 	  return simplify_gen_relational (NE, mode, cmp_mode,
4072 					  op0, const0_rtx);
4073 	case LT:
4074 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4075 	  return simplify_gen_relational (LE, mode, cmp_mode,
4076 					  op0, const0_rtx);
4077 	case LTU:
4078 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4079 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4080 					  op0, const0_rtx);
4081 	default:
4082 	  break;
4083 	}
4084     }
4085   else if (op1 == constm1_rtx)
4086     {
4087       /* Canonicalize (LE x -1) as (LT x 0).  */
4088       if (code == LE)
4089         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4090       /* Canonicalize (GT x -1) as (GE x 0).  */
4091       if (code == GT)
4092         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4093     }
4094 
4095   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4096   if ((code == EQ || code == NE)
4097       && (op0code == PLUS || op0code == MINUS)
4098       && CONSTANT_P (op1)
4099       && CONSTANT_P (XEXP (op0, 1))
4100       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4101     {
4102       rtx x = XEXP (op0, 0);
4103       rtx c = XEXP (op0, 1);
4104 
4105       c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4106 			       cmp_mode, op1, c);
4107       return simplify_gen_relational (code, mode, cmp_mode, x, c);
4108     }
4109 
4110   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4111      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4112   if (code == NE
4113       && op1 == const0_rtx
4114       && GET_MODE_CLASS (mode) == MODE_INT
4115       && cmp_mode != VOIDmode
4116       /* ??? Work-around BImode bugs in the ia64 backend.  */
4117       && mode != BImode
4118       && cmp_mode != BImode
4119       && nonzero_bits (op0, cmp_mode) == 1
4120       && STORE_FLAG_VALUE == 1)
4121     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4122 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4123 	   : lowpart_subreg (mode, op0, cmp_mode);
4124 
4125   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4126   if ((code == EQ || code == NE)
4127       && op1 == const0_rtx
4128       && op0code == XOR)
4129     return simplify_gen_relational (code, mode, cmp_mode,
4130 				    XEXP (op0, 0), XEXP (op0, 1));
4131 
4132   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4133   if ((code == EQ || code == NE)
4134       && op0code == XOR
4135       && rtx_equal_p (XEXP (op0, 0), op1)
4136       && !side_effects_p (XEXP (op0, 0)))
4137     return simplify_gen_relational (code, mode, cmp_mode,
4138 				    XEXP (op0, 1), const0_rtx);
4139 
4140   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4141   if ((code == EQ || code == NE)
4142       && op0code == XOR
4143       && rtx_equal_p (XEXP (op0, 1), op1)
4144       && !side_effects_p (XEXP (op0, 1)))
4145     return simplify_gen_relational (code, mode, cmp_mode,
4146 				    XEXP (op0, 0), const0_rtx);
4147 
4148   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4149   if ((code == EQ || code == NE)
4150       && op0code == XOR
4151       && (CONST_INT_P (op1)
4152 	  || GET_CODE (op1) == CONST_DOUBLE)
4153       && (CONST_INT_P (XEXP (op0, 1))
4154 	  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4155     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4156 				    simplify_gen_binary (XOR, cmp_mode,
4157 							 XEXP (op0, 1), op1));
4158 
4159   if (op0code == POPCOUNT && op1 == const0_rtx)
4160     switch (code)
4161       {
4162       case EQ:
4163       case LE:
4164       case LEU:
4165 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4166 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4167 					XEXP (op0, 0), const0_rtx);
4168 
4169       case NE:
4170       case GT:
4171       case GTU:
4172 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4173 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4174 					XEXP (op0, 0), const0_rtx);
4175 
4176       default:
4177 	break;
4178       }
4179 
4180   return NULL_RTX;
4181 }
4182 
4183 enum
4184 {
4185   CMP_EQ = 1,
4186   CMP_LT = 2,
4187   CMP_GT = 4,
4188   CMP_LTU = 8,
4189   CMP_GTU = 16
4190 };
4191 
4192 
4193 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4194    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4195    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4196    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4197    For floating-point comparisons, assume that the operands were ordered.  */
4198 
4199 static rtx
4200 comparison_result (enum rtx_code code, int known_results)
4201 {
4202   switch (code)
4203     {
4204     case EQ:
4205     case UNEQ:
4206       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4207     case NE:
4208     case LTGT:
4209       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4210 
4211     case LT:
4212     case UNLT:
4213       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4214     case GE:
4215     case UNGE:
4216       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4217 
4218     case GT:
4219     case UNGT:
4220       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4221     case LE:
4222     case UNLE:
4223       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4224 
4225     case LTU:
4226       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4227     case GEU:
4228       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4229 
4230     case GTU:
4231       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4232     case LEU:
4233       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4234 
4235     case ORDERED:
4236       return const_true_rtx;
4237     case UNORDERED:
4238       return const0_rtx;
4239     default:
4240       gcc_unreachable ();
4241     }
4242 }
4243 
4244 /* Check if the given comparison (done in the given MODE) is actually a
4245    tautology or a contradiction.
4246    If no simplification is possible, this function returns zero.
4247    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4248 
4249 rtx
4250 simplify_const_relational_operation (enum rtx_code code,
4251 				     enum machine_mode mode,
4252 				     rtx op0, rtx op1)
4253 {
4254   rtx tem;
4255   rtx trueop0;
4256   rtx trueop1;
4257 
4258   gcc_assert (mode != VOIDmode
4259 	      || (GET_MODE (op0) == VOIDmode
4260 		  && GET_MODE (op1) == VOIDmode));
4261 
4262   /* If op0 is a compare, extract the comparison arguments from it.  */
4263   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4264     {
4265       op1 = XEXP (op0, 1);
4266       op0 = XEXP (op0, 0);
4267 
4268       if (GET_MODE (op0) != VOIDmode)
4269 	mode = GET_MODE (op0);
4270       else if (GET_MODE (op1) != VOIDmode)
4271 	mode = GET_MODE (op1);
4272       else
4273 	return 0;
4274     }
4275 
4276   /* We can't simplify MODE_CC values since we don't know what the
4277      actual comparison is.  */
4278   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4279     return 0;
4280 
4281   /* Make sure the constant is second.  */
4282   if (swap_commutative_operands_p (op0, op1))
4283     {
4284       tem = op0, op0 = op1, op1 = tem;
4285       code = swap_condition (code);
4286     }
4287 
4288   trueop0 = avoid_constant_pool_reference (op0);
4289   trueop1 = avoid_constant_pool_reference (op1);
4290 
4291   /* For integer comparisons of A and B maybe we can simplify A - B and can
4292      then simplify a comparison of that with zero.  If A and B are both either
4293      a register or a CONST_INT, this can't help; testing for these cases will
4294      prevent infinite recursion here and speed things up.
4295 
4296      We can only do this for EQ and NE comparisons as otherwise we may
4297      lose or introduce overflow which we cannot disregard as undefined as
4298      we do not know the signedness of the operation on either the left or
4299      the right hand side of the comparison.  */
4300 
4301   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4302       && (code == EQ || code == NE)
4303       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4304 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4305       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4306       /* We cannot do this if tem is a nonzero address.  */
4307       && ! nonzero_address_p (tem))
4308     return simplify_const_relational_operation (signed_condition (code),
4309 						mode, tem, const0_rtx);
4310 
4311   if (! HONOR_NANS (mode) && code == ORDERED)
4312     return const_true_rtx;
4313 
4314   if (! HONOR_NANS (mode) && code == UNORDERED)
4315     return const0_rtx;
4316 
4317   /* For modes without NaNs, if the two operands are equal, we know the
4318      result except if they have side-effects.  Even with NaNs we know
4319      the result of unordered comparisons and, if signaling NaNs are
4320      irrelevant, also the result of LT/GT/LTGT.  */
4321   if ((! HONOR_NANS (GET_MODE (trueop0))
4322        || code == UNEQ || code == UNLE || code == UNGE
4323        || ((code == LT || code == GT || code == LTGT)
4324 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
4325       && rtx_equal_p (trueop0, trueop1)
4326       && ! side_effects_p (trueop0))
4327     return comparison_result (code, CMP_EQ);
4328 
4329   /* If the operands are floating-point constants, see if we can fold
4330      the result.  */
4331   if (GET_CODE (trueop0) == CONST_DOUBLE
4332       && GET_CODE (trueop1) == CONST_DOUBLE
4333       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4334     {
4335       REAL_VALUE_TYPE d0, d1;
4336 
4337       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4338       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4339 
4340       /* Comparisons are unordered iff at least one of the values is NaN.  */
4341       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4342 	switch (code)
4343 	  {
4344 	  case UNEQ:
4345 	  case UNLT:
4346 	  case UNGT:
4347 	  case UNLE:
4348 	  case UNGE:
4349 	  case NE:
4350 	  case UNORDERED:
4351 	    return const_true_rtx;
4352 	  case EQ:
4353 	  case LT:
4354 	  case GT:
4355 	  case LE:
4356 	  case GE:
4357 	  case LTGT:
4358 	  case ORDERED:
4359 	    return const0_rtx;
4360 	  default:
4361 	    return 0;
4362 	  }
4363 
4364       return comparison_result (code,
4365 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4366 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4367     }
4368 
4369   /* Otherwise, see if the operands are both integers.  */
4370   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4371        && (GET_CODE (trueop0) == CONST_DOUBLE
4372 	   || CONST_INT_P (trueop0))
4373        && (GET_CODE (trueop1) == CONST_DOUBLE
4374 	   || CONST_INT_P (trueop1)))
4375     {
4376       int width = GET_MODE_BITSIZE (mode);
4377       HOST_WIDE_INT l0s, h0s, l1s, h1s;
4378       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4379 
4380       /* Get the two words comprising each integer constant.  */
4381       if (GET_CODE (trueop0) == CONST_DOUBLE)
4382 	{
4383 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4384 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4385 	}
4386       else
4387 	{
4388 	  l0u = l0s = INTVAL (trueop0);
4389 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
4390 	}
4391 
4392       if (GET_CODE (trueop1) == CONST_DOUBLE)
4393 	{
4394 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4395 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4396 	}
4397       else
4398 	{
4399 	  l1u = l1s = INTVAL (trueop1);
4400 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
4401 	}
4402 
4403       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4404 	 we have to sign or zero-extend the values.  */
4405       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4406 	{
4407 	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4408 	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4409 
4410 	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4411 	    l0s |= ((HOST_WIDE_INT) (-1) << width);
4412 
4413 	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4414 	    l1s |= ((HOST_WIDE_INT) (-1) << width);
4415 	}
4416       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4417 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4418 
4419       if (h0u == h1u && l0u == l1u)
4420 	return comparison_result (code, CMP_EQ);
4421       else
4422 	{
4423 	  int cr;
4424 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4425 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4426 	  return comparison_result (code, cr);
4427 	}
4428     }
4429 
4430   /* Optimize comparisons with upper and lower bounds.  */
4431   if (SCALAR_INT_MODE_P (mode)
4432       && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4433       && CONST_INT_P (trueop1))
4434     {
4435       int sign;
4436       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4437       HOST_WIDE_INT val = INTVAL (trueop1);
4438       HOST_WIDE_INT mmin, mmax;
4439 
4440       if (code == GEU
4441 	  || code == LEU
4442 	  || code == GTU
4443 	  || code == LTU)
4444 	sign = 0;
4445       else
4446 	sign = 1;
4447 
4448       /* Get a reduced range if the sign bit is zero.  */
4449       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4450 	{
4451 	  mmin = 0;
4452 	  mmax = nonzero;
4453 	}
4454       else
4455 	{
4456 	  rtx mmin_rtx, mmax_rtx;
4457 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4458 
4459 	  mmin = INTVAL (mmin_rtx);
4460 	  mmax = INTVAL (mmax_rtx);
4461 	  if (sign)
4462 	    {
4463 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4464 
4465 	      mmin >>= (sign_copies - 1);
4466 	      mmax >>= (sign_copies - 1);
4467 	    }
4468 	}
4469 
4470       switch (code)
4471 	{
4472 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
4473 	case GEU:
4474 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4475 	    return const_true_rtx;
4476 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4477 	    return const0_rtx;
4478 	  break;
4479 	case GE:
4480 	  if (val <= mmin)
4481 	    return const_true_rtx;
4482 	  if (val > mmax)
4483 	    return const0_rtx;
4484 	  break;
4485 
4486 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
4487 	case LEU:
4488 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4489 	    return const_true_rtx;
4490 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4491 	    return const0_rtx;
4492 	  break;
4493 	case LE:
4494 	  if (val >= mmax)
4495 	    return const_true_rtx;
4496 	  if (val < mmin)
4497 	    return const0_rtx;
4498 	  break;
4499 
4500 	case EQ:
4501 	  /* x == y is always false for y out of range.  */
4502 	  if (val < mmin || val > mmax)
4503 	    return const0_rtx;
4504 	  break;
4505 
4506 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
4507 	case GTU:
4508 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4509 	    return const0_rtx;
4510 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4511 	    return const_true_rtx;
4512 	  break;
4513 	case GT:
4514 	  if (val >= mmax)
4515 	    return const0_rtx;
4516 	  if (val < mmin)
4517 	    return const_true_rtx;
4518 	  break;
4519 
4520 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
4521 	case LTU:
4522 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4523 	    return const0_rtx;
4524 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4525 	    return const_true_rtx;
4526 	  break;
4527 	case LT:
4528 	  if (val <= mmin)
4529 	    return const0_rtx;
4530 	  if (val > mmax)
4531 	    return const_true_rtx;
4532 	  break;
4533 
4534 	case NE:
4535 	  /* x != y is always true for y out of range.  */
4536 	  if (val < mmin || val > mmax)
4537 	    return const_true_rtx;
4538 	  break;
4539 
4540 	default:
4541 	  break;
4542 	}
4543     }
4544 
4545   /* Optimize integer comparisons with zero.  */
4546   if (trueop1 == const0_rtx)
4547     {
4548       /* Some addresses are known to be nonzero.  We don't know
4549 	 their sign, but equality comparisons are known.  */
4550       if (nonzero_address_p (trueop0))
4551 	{
4552 	  if (code == EQ || code == LEU)
4553 	    return const0_rtx;
4554 	  if (code == NE || code == GTU)
4555 	    return const_true_rtx;
4556 	}
4557 
4558       /* See if the first operand is an IOR with a constant.  If so, we
4559 	 may be able to determine the result of this comparison.  */
4560       if (GET_CODE (op0) == IOR)
4561 	{
4562 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4563 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4564 	    {
4565 	      int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4566 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4567 			      && (INTVAL (inner_const)
4568 				  & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4569 
4570 	      switch (code)
4571 		{
4572 		case EQ:
4573 		case LEU:
4574 		  return const0_rtx;
4575 		case NE:
4576 		case GTU:
4577 		  return const_true_rtx;
4578 		case LT:
4579 		case LE:
4580 		  if (has_sign)
4581 		    return const_true_rtx;
4582 		  break;
4583 		case GT:
4584 		case GE:
4585 		  if (has_sign)
4586 		    return const0_rtx;
4587 		  break;
4588 		default:
4589 		  break;
4590 		}
4591 	    }
4592 	}
4593     }
4594 
4595   /* Optimize comparison of ABS with zero.  */
4596   if (trueop1 == CONST0_RTX (mode)
4597       && (GET_CODE (trueop0) == ABS
4598 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
4599 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4600     {
4601       switch (code)
4602 	{
4603 	case LT:
4604 	  /* Optimize abs(x) < 0.0.  */
4605 	  if (!HONOR_SNANS (mode)
4606 	      && (!INTEGRAL_MODE_P (mode)
4607 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4608 	    {
4609 	      if (INTEGRAL_MODE_P (mode)
4610 		  && (issue_strict_overflow_warning
4611 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4612 		warning (OPT_Wstrict_overflow,
4613 			 ("assuming signed overflow does not occur when "
4614 			  "assuming abs (x) < 0 is false"));
4615 	       return const0_rtx;
4616 	    }
4617 	  break;
4618 
4619 	case GE:
4620 	  /* Optimize abs(x) >= 0.0.  */
4621 	  if (!HONOR_NANS (mode)
4622 	      && (!INTEGRAL_MODE_P (mode)
4623 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4624 	    {
4625 	      if (INTEGRAL_MODE_P (mode)
4626 	          && (issue_strict_overflow_warning
4627 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4628 	        warning (OPT_Wstrict_overflow,
4629 			 ("assuming signed overflow does not occur when "
4630 			  "assuming abs (x) >= 0 is true"));
4631 	      return const_true_rtx;
4632 	    }
4633 	  break;
4634 
4635 	case UNGE:
4636 	  /* Optimize ! (abs(x) < 0.0).  */
4637 	  return const_true_rtx;
4638 
4639 	default:
4640 	  break;
4641 	}
4642     }
4643 
4644   return 0;
4645 }
4646 
4647 /* Simplify CODE, an operation with result mode MODE and three operands,
4648    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4649    a constant.  Return 0 if no simplifications is possible.  */
4650 
4651 rtx
4652 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4653 			    enum machine_mode op0_mode, rtx op0, rtx op1,
4654 			    rtx op2)
4655 {
4656   unsigned int width = GET_MODE_BITSIZE (mode);
4657 
4658   /* VOIDmode means "infinite" precision.  */
4659   if (width == 0)
4660     width = HOST_BITS_PER_WIDE_INT;
4661 
4662   switch (code)
4663     {
4664     case SIGN_EXTRACT:
4665     case ZERO_EXTRACT:
4666       if (CONST_INT_P (op0)
4667 	  && CONST_INT_P (op1)
4668 	  && CONST_INT_P (op2)
4669 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4670 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4671 	{
4672 	  /* Extracting a bit-field from a constant */
4673 	  HOST_WIDE_INT val = INTVAL (op0);
4674 
4675 	  if (BITS_BIG_ENDIAN)
4676 	    val >>= (GET_MODE_BITSIZE (op0_mode)
4677 		     - INTVAL (op2) - INTVAL (op1));
4678 	  else
4679 	    val >>= INTVAL (op2);
4680 
4681 	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4682 	    {
4683 	      /* First zero-extend.  */
4684 	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4685 	      /* If desired, propagate sign bit.  */
4686 	      if (code == SIGN_EXTRACT
4687 		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4688 		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4689 	    }
4690 
4691 	  /* Clear the bits that don't belong in our mode,
4692 	     unless they and our sign bit are all one.
4693 	     So we get either a reasonable negative value or a reasonable
4694 	     unsigned value for this mode.  */
4695 	  if (width < HOST_BITS_PER_WIDE_INT
4696 	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4697 		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
4698 	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
4699 
4700 	  return gen_int_mode (val, mode);
4701 	}
4702       break;
4703 
4704     case IF_THEN_ELSE:
4705       if (CONST_INT_P (op0))
4706 	return op0 != const0_rtx ? op1 : op2;
4707 
4708       /* Convert c ? a : a into "a".  */
4709       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4710 	return op1;
4711 
4712       /* Convert a != b ? a : b into "a".  */
4713       if (GET_CODE (op0) == NE
4714 	  && ! side_effects_p (op0)
4715 	  && ! HONOR_NANS (mode)
4716 	  && ! HONOR_SIGNED_ZEROS (mode)
4717 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4718 	       && rtx_equal_p (XEXP (op0, 1), op2))
4719 	      || (rtx_equal_p (XEXP (op0, 0), op2)
4720 		  && rtx_equal_p (XEXP (op0, 1), op1))))
4721 	return op1;
4722 
4723       /* Convert a == b ? a : b into "b".  */
4724       if (GET_CODE (op0) == EQ
4725 	  && ! side_effects_p (op0)
4726 	  && ! HONOR_NANS (mode)
4727 	  && ! HONOR_SIGNED_ZEROS (mode)
4728 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4729 	       && rtx_equal_p (XEXP (op0, 1), op2))
4730 	      || (rtx_equal_p (XEXP (op0, 0), op2)
4731 		  && rtx_equal_p (XEXP (op0, 1), op1))))
4732 	return op2;
4733 
4734       if (COMPARISON_P (op0) && ! side_effects_p (op0))
4735 	{
4736 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4737 					? GET_MODE (XEXP (op0, 1))
4738 					: GET_MODE (XEXP (op0, 0)));
4739 	  rtx temp;
4740 
4741 	  /* Look for happy constants in op1 and op2.  */
4742 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
4743 	    {
4744 	      HOST_WIDE_INT t = INTVAL (op1);
4745 	      HOST_WIDE_INT f = INTVAL (op2);
4746 
4747 	      if (t == STORE_FLAG_VALUE && f == 0)
4748 	        code = GET_CODE (op0);
4749 	      else if (t == 0 && f == STORE_FLAG_VALUE)
4750 		{
4751 		  enum rtx_code tmp;
4752 		  tmp = reversed_comparison_code (op0, NULL_RTX);
4753 		  if (tmp == UNKNOWN)
4754 		    break;
4755 		  code = tmp;
4756 		}
4757 	      else
4758 		break;
4759 
4760 	      return simplify_gen_relational (code, mode, cmp_mode,
4761 					      XEXP (op0, 0), XEXP (op0, 1));
4762 	    }
4763 
4764 	  if (cmp_mode == VOIDmode)
4765 	    cmp_mode = op0_mode;
4766 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4767 			  			cmp_mode, XEXP (op0, 0),
4768 						XEXP (op0, 1));
4769 
4770 	  /* See if any simplifications were possible.  */
4771 	  if (temp)
4772 	    {
4773 	      if (CONST_INT_P (temp))
4774 		return temp == const0_rtx ? op2 : op1;
4775 	      else if (temp)
4776 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4777 	    }
4778 	}
4779       break;
4780 
4781     case VEC_MERGE:
4782       gcc_assert (GET_MODE (op0) == mode);
4783       gcc_assert (GET_MODE (op1) == mode);
4784       gcc_assert (VECTOR_MODE_P (mode));
4785       op2 = avoid_constant_pool_reference (op2);
4786       if (CONST_INT_P (op2))
4787 	{
4788           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4789 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4790 	  int mask = (1 << n_elts) - 1;
4791 
4792 	  if (!(INTVAL (op2) & mask))
4793 	    return op1;
4794 	  if ((INTVAL (op2) & mask) == mask)
4795 	    return op0;
4796 
4797 	  op0 = avoid_constant_pool_reference (op0);
4798 	  op1 = avoid_constant_pool_reference (op1);
4799 	  if (GET_CODE (op0) == CONST_VECTOR
4800 	      && GET_CODE (op1) == CONST_VECTOR)
4801 	    {
4802 	      rtvec v = rtvec_alloc (n_elts);
4803 	      unsigned int i;
4804 
4805 	      for (i = 0; i < n_elts; i++)
4806 		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4807 				    ? CONST_VECTOR_ELT (op0, i)
4808 				    : CONST_VECTOR_ELT (op1, i));
4809 	      return gen_rtx_CONST_VECTOR (mode, v);
4810 	    }
4811 	}
4812       break;
4813 
4814     default:
4815       gcc_unreachable ();
4816     }
4817 
4818   return 0;
4819 }
4820 
4821 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4822    or CONST_VECTOR,
4823    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4824 
4825    Works by unpacking OP into a collection of 8-bit values
4826    represented as a little-endian array of 'unsigned char', selecting by BYTE,
4827    and then repacking them again for OUTERMODE.  */
4828 
4829 static rtx
4830 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4831 		       enum machine_mode innermode, unsigned int byte)
4832 {
4833   /* We support up to 512-bit values (for V8DFmode).  */
4834   enum {
4835     max_bitsize = 512,
4836     value_bit = 8,
4837     value_mask = (1 << value_bit) - 1
4838   };
4839   unsigned char value[max_bitsize / value_bit];
4840   int value_start;
4841   int i;
4842   int elem;
4843 
4844   int num_elem;
4845   rtx * elems;
4846   int elem_bitsize;
4847   rtx result_s;
4848   rtvec result_v = NULL;
4849   enum mode_class outer_class;
4850   enum machine_mode outer_submode;
4851 
4852   /* Some ports misuse CCmode.  */
4853   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4854     return op;
4855 
4856   /* We have no way to represent a complex constant at the rtl level.  */
4857   if (COMPLEX_MODE_P (outermode))
4858     return NULL_RTX;
4859 
4860   /* Unpack the value.  */
4861 
4862   if (GET_CODE (op) == CONST_VECTOR)
4863     {
4864       num_elem = CONST_VECTOR_NUNITS (op);
4865       elems = &CONST_VECTOR_ELT (op, 0);
4866       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4867     }
4868   else
4869     {
4870       num_elem = 1;
4871       elems = &op;
4872       elem_bitsize = max_bitsize;
4873     }
4874   /* If this asserts, it is too complicated; reducing value_bit may help.  */
4875   gcc_assert (BITS_PER_UNIT % value_bit == 0);
4876   /* I don't know how to handle endianness of sub-units.  */
4877   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4878 
4879   for (elem = 0; elem < num_elem; elem++)
4880     {
4881       unsigned char * vp;
4882       rtx el = elems[elem];
4883 
4884       /* Vectors are kept in target memory order.  (This is probably
4885 	 a mistake.)  */
4886       {
4887 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4888 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4889 			  / BITS_PER_UNIT);
4890 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4891 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4892 	unsigned bytele = (subword_byte % UNITS_PER_WORD
4893 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4894 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4895       }
4896 
4897       switch (GET_CODE (el))
4898 	{
4899 	case CONST_INT:
4900 	  for (i = 0;
4901 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4902 	       i += value_bit)
4903 	    *vp++ = INTVAL (el) >> i;
4904 	  /* CONST_INTs are always logically sign-extended.  */
4905 	  for (; i < elem_bitsize; i += value_bit)
4906 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
4907 	  break;
4908 
4909 	case CONST_DOUBLE:
4910 	  if (GET_MODE (el) == VOIDmode)
4911 	    {
4912 	      /* If this triggers, someone should have generated a
4913 		 CONST_INT instead.  */
4914 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4915 
4916 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4917 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
4918 	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4919 		{
4920 		  *vp++
4921 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4922 		  i += value_bit;
4923 		}
4924 	      /* It shouldn't matter what's done here, so fill it with
4925 		 zero.  */
4926 	      for (; i < elem_bitsize; i += value_bit)
4927 		*vp++ = 0;
4928 	    }
4929 	  else
4930 	    {
4931 	      long tmp[max_bitsize / 32];
4932 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4933 
4934 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4935 	      gcc_assert (bitsize <= elem_bitsize);
4936 	      gcc_assert (bitsize % value_bit == 0);
4937 
4938 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4939 			      GET_MODE (el));
4940 
4941 	      /* real_to_target produces its result in words affected by
4942 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4943 		 and use WORDS_BIG_ENDIAN instead; see the documentation
4944 	         of SUBREG in rtl.texi.  */
4945 	      for (i = 0; i < bitsize; i += value_bit)
4946 		{
4947 		  int ibase;
4948 		  if (WORDS_BIG_ENDIAN)
4949 		    ibase = bitsize - 1 - i;
4950 		  else
4951 		    ibase = i;
4952 		  *vp++ = tmp[ibase / 32] >> i % 32;
4953 		}
4954 
4955 	      /* It shouldn't matter what's done here, so fill it with
4956 		 zero.  */
4957 	      for (; i < elem_bitsize; i += value_bit)
4958 		*vp++ = 0;
4959 	    }
4960 	  break;
4961 
4962         case CONST_FIXED:
4963 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4964 	    {
4965 	      for (i = 0; i < elem_bitsize; i += value_bit)
4966 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4967 	    }
4968 	  else
4969 	    {
4970 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4971 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4972               for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4973 		   i += value_bit)
4974 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
4975 			>> (i - HOST_BITS_PER_WIDE_INT);
4976 	      for (; i < elem_bitsize; i += value_bit)
4977 		*vp++ = 0;
4978 	    }
4979           break;
4980 
4981 	default:
4982 	  gcc_unreachable ();
4983 	}
4984     }
4985 
4986   /* Now, pick the right byte to start with.  */
4987   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
4988      case is paradoxical SUBREGs, which shouldn't be adjusted since they
4989      will already have offset 0.  */
4990   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4991     {
4992       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4993 			- byte);
4994       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4995       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4996       byte = (subword_byte % UNITS_PER_WORD
4997 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4998     }
4999 
5000   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5001      so if it's become negative it will instead be very large.)  */
5002   gcc_assert (byte < GET_MODE_SIZE (innermode));
5003 
5004   /* Convert from bytes to chunks of size value_bit.  */
5005   value_start = byte * (BITS_PER_UNIT / value_bit);
5006 
5007   /* Re-pack the value.  */
5008 
5009   if (VECTOR_MODE_P (outermode))
5010     {
5011       num_elem = GET_MODE_NUNITS (outermode);
5012       result_v = rtvec_alloc (num_elem);
5013       elems = &RTVEC_ELT (result_v, 0);
5014       outer_submode = GET_MODE_INNER (outermode);
5015     }
5016   else
5017     {
5018       num_elem = 1;
5019       elems = &result_s;
5020       outer_submode = outermode;
5021     }
5022 
5023   outer_class = GET_MODE_CLASS (outer_submode);
5024   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5025 
5026   gcc_assert (elem_bitsize % value_bit == 0);
5027   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5028 
5029   for (elem = 0; elem < num_elem; elem++)
5030     {
5031       unsigned char *vp;
5032 
5033       /* Vectors are stored in target memory order.  (This is probably
5034 	 a mistake.)  */
5035       {
5036 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5037 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5038 			  / BITS_PER_UNIT);
5039 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5040 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5041 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5042 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5043 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5044       }
5045 
5046       switch (outer_class)
5047 	{
5048 	case MODE_INT:
5049 	case MODE_PARTIAL_INT:
5050 	  {
5051 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5052 
5053 	    for (i = 0;
5054 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5055 		 i += value_bit)
5056 	      lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5057 	    for (; i < elem_bitsize; i += value_bit)
5058 	      hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5059 		     << (i - HOST_BITS_PER_WIDE_INT));
5060 
5061 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5062 	       know why.  */
5063 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5064 	      elems[elem] = gen_int_mode (lo, outer_submode);
5065 	    else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5066 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5067 	    else
5068 	      return NULL_RTX;
5069 	  }
5070 	  break;
5071 
5072 	case MODE_FLOAT:
5073 	case MODE_DECIMAL_FLOAT:
5074 	  {
5075 	    REAL_VALUE_TYPE r;
5076 	    long tmp[max_bitsize / 32];
5077 
5078 	    /* real_from_target wants its input in words affected by
5079 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5080 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5081 	       of SUBREG in rtl.texi.  */
5082 	    for (i = 0; i < max_bitsize / 32; i++)
5083 	      tmp[i] = 0;
5084 	    for (i = 0; i < elem_bitsize; i += value_bit)
5085 	      {
5086 		int ibase;
5087 		if (WORDS_BIG_ENDIAN)
5088 		  ibase = elem_bitsize - 1 - i;
5089 		else
5090 		  ibase = i;
5091 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5092 	      }
5093 
5094 	    real_from_target (&r, tmp, outer_submode);
5095 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5096 	  }
5097 	  break;
5098 
5099 	case MODE_FRACT:
5100 	case MODE_UFRACT:
5101 	case MODE_ACCUM:
5102 	case MODE_UACCUM:
5103 	  {
5104 	    FIXED_VALUE_TYPE f;
5105 	    f.data.low = 0;
5106 	    f.data.high = 0;
5107 	    f.mode = outer_submode;
5108 
5109 	    for (i = 0;
5110 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5111 		 i += value_bit)
5112 	      f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5113 	    for (; i < elem_bitsize; i += value_bit)
5114 	      f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5115 			     << (i - HOST_BITS_PER_WIDE_INT));
5116 
5117 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5118           }
5119           break;
5120 
5121 	default:
5122 	  gcc_unreachable ();
5123 	}
5124     }
5125   if (VECTOR_MODE_P (outermode))
5126     return gen_rtx_CONST_VECTOR (outermode, result_v);
5127   else
5128     return result_s;
5129 }
5130 
5131 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5132    Return 0 if no simplifications are possible.  */
5133 rtx
5134 simplify_subreg (enum machine_mode outermode, rtx op,
5135 		 enum machine_mode innermode, unsigned int byte)
5136 {
5137   /* Little bit of sanity checking.  */
5138   gcc_assert (innermode != VOIDmode);
5139   gcc_assert (outermode != VOIDmode);
5140   gcc_assert (innermode != BLKmode);
5141   gcc_assert (outermode != BLKmode);
5142 
5143   gcc_assert (GET_MODE (op) == innermode
5144 	      || GET_MODE (op) == VOIDmode);
5145 
5146   gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5147   gcc_assert (byte < GET_MODE_SIZE (innermode));
5148 
5149   if (outermode == innermode && !byte)
5150     return op;
5151 
5152   if (CONST_INT_P (op)
5153       || GET_CODE (op) == CONST_DOUBLE
5154       || GET_CODE (op) == CONST_FIXED
5155       || GET_CODE (op) == CONST_VECTOR)
5156     return simplify_immed_subreg (outermode, op, innermode, byte);
5157 
5158   /* Changing mode twice with SUBREG => just change it once,
5159      or not at all if changing back op starting mode.  */
5160   if (GET_CODE (op) == SUBREG)
5161     {
5162       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5163       int final_offset = byte + SUBREG_BYTE (op);
5164       rtx newx;
5165 
5166       if (outermode == innermostmode
5167 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5168 	return SUBREG_REG (op);
5169 
5170       /* The SUBREG_BYTE represents offset, as if the value were stored
5171 	 in memory.  Irritating exception is paradoxical subreg, where
5172 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5173 	 value should be negative.  For a moment, undo this exception.  */
5174       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5175 	{
5176 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5177 	  if (WORDS_BIG_ENDIAN)
5178 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5179 	  if (BYTES_BIG_ENDIAN)
5180 	    final_offset += difference % UNITS_PER_WORD;
5181 	}
5182       if (SUBREG_BYTE (op) == 0
5183 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5184 	{
5185 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5186 	  if (WORDS_BIG_ENDIAN)
5187 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5188 	  if (BYTES_BIG_ENDIAN)
5189 	    final_offset += difference % UNITS_PER_WORD;
5190 	}
5191 
5192       /* See whether resulting subreg will be paradoxical.  */
5193       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5194 	{
5195 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5196 	  if (final_offset < 0)
5197 	    return NULL_RTX;
5198 	  /* Bail out in case resulting subreg would be incorrect.  */
5199 	  if (final_offset % GET_MODE_SIZE (outermode)
5200 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5201 	    return NULL_RTX;
5202 	}
5203       else
5204 	{
5205 	  int offset = 0;
5206 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5207 
5208 	  /* In paradoxical subreg, see if we are still looking on lower part.
5209 	     If so, our SUBREG_BYTE will be 0.  */
5210 	  if (WORDS_BIG_ENDIAN)
5211 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5212 	  if (BYTES_BIG_ENDIAN)
5213 	    offset += difference % UNITS_PER_WORD;
5214 	  if (offset == final_offset)
5215 	    final_offset = 0;
5216 	  else
5217 	    return NULL_RTX;
5218 	}
5219 
5220       /* Recurse for further possible simplifications.  */
5221       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5222 			      final_offset);
5223       if (newx)
5224 	return newx;
5225       if (validate_subreg (outermode, innermostmode,
5226 			   SUBREG_REG (op), final_offset))
5227 	{
5228 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5229 	  if (SUBREG_PROMOTED_VAR_P (op)
5230 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5231 	      && GET_MODE_CLASS (outermode) == MODE_INT
5232 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5233 			   GET_MODE_SIZE (innermode),
5234 			   GET_MODE_SIZE (innermostmode))
5235 	      && subreg_lowpart_p (newx))
5236 	    {
5237 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5238 	      SUBREG_PROMOTED_UNSIGNED_SET
5239 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5240 	    }
5241 	  return newx;
5242 	}
5243       return NULL_RTX;
5244     }
5245 
5246   /* Merge implicit and explicit truncations.  */
5247 
5248   if (GET_CODE (op) == TRUNCATE
5249       && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5250       && subreg_lowpart_offset (outermode, innermode) == byte)
5251     return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5252 			       GET_MODE (XEXP (op, 0)));
5253 
5254   /* SUBREG of a hard register => just change the register number
5255      and/or mode.  If the hard register is not valid in that mode,
5256      suppress this simplification.  If the hard register is the stack,
5257      frame, or argument pointer, leave this as a SUBREG.  */
5258 
5259   if (REG_P (op) && HARD_REGISTER_P (op))
5260     {
5261       unsigned int regno, final_regno;
5262 
5263       regno = REGNO (op);
5264       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5265       if (HARD_REGISTER_NUM_P (final_regno))
5266 	{
5267 	  rtx x;
5268 	  int final_offset = byte;
5269 
5270 	  /* Adjust offset for paradoxical subregs.  */
5271 	  if (byte == 0
5272 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5273 	    {
5274 	      int difference = (GET_MODE_SIZE (innermode)
5275 				- GET_MODE_SIZE (outermode));
5276 	      if (WORDS_BIG_ENDIAN)
5277 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5278 	      if (BYTES_BIG_ENDIAN)
5279 		final_offset += difference % UNITS_PER_WORD;
5280 	    }
5281 
5282 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5283 
5284 	  /* Propagate original regno.  We don't have any way to specify
5285 	     the offset inside original regno, so do so only for lowpart.
5286 	     The information is used only by alias analysis that can not
5287 	     grog partial register anyway.  */
5288 
5289 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5290 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5291 	  return x;
5292 	}
5293     }
5294 
5295   /* If we have a SUBREG of a register that we are replacing and we are
5296      replacing it with a MEM, make a new MEM and try replacing the
5297      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5298      or if we would be widening it.  */
5299 
5300   if (MEM_P (op)
5301       && ! mode_dependent_address_p (XEXP (op, 0))
5302       /* Allow splitting of volatile memory references in case we don't
5303          have instruction to move the whole thing.  */
5304       && (! MEM_VOLATILE_P (op)
5305 	  || ! have_insn_for (SET, innermode))
5306       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5307     return adjust_address_nv (op, outermode, byte);
5308 
5309   /* Handle complex values represented as CONCAT
5310      of real and imaginary part.  */
5311   if (GET_CODE (op) == CONCAT)
5312     {
5313       unsigned int part_size, final_offset;
5314       rtx part, res;
5315 
5316       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5317       if (byte < part_size)
5318 	{
5319 	  part = XEXP (op, 0);
5320 	  final_offset = byte;
5321 	}
5322       else
5323 	{
5324 	  part = XEXP (op, 1);
5325 	  final_offset = byte - part_size;
5326 	}
5327 
5328       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5329 	return NULL_RTX;
5330 
5331       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5332       if (res)
5333 	return res;
5334       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5335 	return gen_rtx_SUBREG (outermode, part, final_offset);
5336       return NULL_RTX;
5337     }
5338 
5339   /* Optimize SUBREG truncations of zero and sign extended values.  */
5340   if ((GET_CODE (op) == ZERO_EXTEND
5341        || GET_CODE (op) == SIGN_EXTEND)
5342       && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5343     {
5344       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5345 
5346       /* If we're requesting the lowpart of a zero or sign extension,
5347 	 there are three possibilities.  If the outermode is the same
5348 	 as the origmode, we can omit both the extension and the subreg.
5349 	 If the outermode is not larger than the origmode, we can apply
5350 	 the truncation without the extension.  Finally, if the outermode
5351 	 is larger than the origmode, but both are integer modes, we
5352 	 can just extend to the appropriate mode.  */
5353       if (bitpos == 0)
5354 	{
5355 	  enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5356 	  if (outermode == origmode)
5357 	    return XEXP (op, 0);
5358 	  if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5359 	    return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5360 					subreg_lowpart_offset (outermode,
5361 							       origmode));
5362 	  if (SCALAR_INT_MODE_P (outermode))
5363 	    return simplify_gen_unary (GET_CODE (op), outermode,
5364 				       XEXP (op, 0), origmode);
5365 	}
5366 
5367       /* A SUBREG resulting from a zero extension may fold to zero if
5368 	 it extracts higher bits that the ZERO_EXTEND's source bits.  */
5369       if (GET_CODE (op) == ZERO_EXTEND
5370 	  && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5371 	return CONST0_RTX (outermode);
5372     }
5373 
5374   /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5375      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5376      the outer subreg is effectively a truncation to the original mode.  */
5377   if ((GET_CODE (op) == LSHIFTRT
5378        || GET_CODE (op) == ASHIFTRT)
5379       && SCALAR_INT_MODE_P (outermode)
5380       /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5381 	 to avoid the possibility that an outer LSHIFTRT shifts by more
5382 	 than the sign extension's sign_bit_copies and introduces zeros
5383 	 into the high bits of the result.  */
5384       && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5385       && CONST_INT_P (XEXP (op, 1))
5386       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5387       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5388       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5389       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5390     return simplify_gen_binary (ASHIFTRT, outermode,
5391 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5392 
5393   /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5394      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5395      the outer subreg is effectively a truncation to the original mode.  */
5396   if ((GET_CODE (op) == LSHIFTRT
5397        || GET_CODE (op) == ASHIFTRT)
5398       && SCALAR_INT_MODE_P (outermode)
5399       && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5400       && CONST_INT_P (XEXP (op, 1))
5401       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5402       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5403       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5404       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5405     return simplify_gen_binary (LSHIFTRT, outermode,
5406 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5407 
5408   /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5409      to (ashift:QI (x:QI) C), where C is a suitable small constant and
5410      the outer subreg is effectively a truncation to the original mode.  */
5411   if (GET_CODE (op) == ASHIFT
5412       && SCALAR_INT_MODE_P (outermode)
5413       && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5414       && CONST_INT_P (XEXP (op, 1))
5415       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5416 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5417       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5418       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5419       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5420     return simplify_gen_binary (ASHIFT, outermode,
5421 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5422 
5423   /* Recognize a word extraction from a multi-word subreg.  */
5424   if ((GET_CODE (op) == LSHIFTRT
5425        || GET_CODE (op) == ASHIFTRT)
5426       && SCALAR_INT_MODE_P (outermode)
5427       && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5428       && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5429       && CONST_INT_P (XEXP (op, 1))
5430       && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5431       && INTVAL (XEXP (op, 1)) >= 0
5432       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5433       && byte == subreg_lowpart_offset (outermode, innermode))
5434     {
5435       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5436       return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5437 				  (WORDS_BIG_ENDIAN
5438 				   ? byte - shifted_bytes
5439 				   : byte + shifted_bytes));
5440     }
5441 
5442   return NULL_RTX;
5443 }
5444 
5445 /* Make a SUBREG operation or equivalent if it folds.  */
5446 
5447 rtx
5448 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5449 		     enum machine_mode innermode, unsigned int byte)
5450 {
5451   rtx newx;
5452 
5453   newx = simplify_subreg (outermode, op, innermode, byte);
5454   if (newx)
5455     return newx;
5456 
5457   if (GET_CODE (op) == SUBREG
5458       || GET_CODE (op) == CONCAT
5459       || GET_MODE (op) == VOIDmode)
5460     return NULL_RTX;
5461 
5462   if (validate_subreg (outermode, innermode, op, byte))
5463     return gen_rtx_SUBREG (outermode, op, byte);
5464 
5465   return NULL_RTX;
5466 }
5467 
5468 /* Simplify X, an rtx expression.
5469 
5470    Return the simplified expression or NULL if no simplifications
5471    were possible.
5472 
5473    This is the preferred entry point into the simplification routines;
5474    however, we still allow passes to call the more specific routines.
5475 
5476    Right now GCC has three (yes, three) major bodies of RTL simplification
5477    code that need to be unified.
5478 
5479 	1. fold_rtx in cse.c.  This code uses various CSE specific
5480 	   information to aid in RTL simplification.
5481 
5482 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5483 	   it uses combine specific information to aid in RTL
5484 	   simplification.
5485 
5486 	3. The routines in this file.
5487 
5488 
5489    Long term we want to only have one body of simplification code; to
5490    get to that state I recommend the following steps:
5491 
5492 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
5493 	   which are not pass dependent state into these routines.
5494 
5495 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
5496 	   use this routine whenever possible.
5497 
5498 	3. Allow for pass dependent state to be provided to these
5499 	   routines and add simplifications based on the pass dependent
5500 	   state.  Remove code from cse.c & combine.c that becomes
5501 	   redundant/dead.
5502 
5503     It will take time, but ultimately the compiler will be easier to
5504     maintain and improve.  It's totally silly that when we add a
5505     simplification that it needs to be added to 4 places (3 for RTL
5506     simplification and 1 for tree simplification.  */
5507 
5508 rtx
5509 simplify_rtx (const_rtx x)
5510 {
5511   const enum rtx_code code = GET_CODE (x);
5512   const enum machine_mode mode = GET_MODE (x);
5513 
5514   switch (GET_RTX_CLASS (code))
5515     {
5516     case RTX_UNARY:
5517       return simplify_unary_operation (code, mode,
5518 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5519     case RTX_COMM_ARITH:
5520       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5521 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5522 
5523       /* Fall through....  */
5524 
5525     case RTX_BIN_ARITH:
5526       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5527 
5528     case RTX_TERNARY:
5529     case RTX_BITFIELD_OPS:
5530       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5531 					 XEXP (x, 0), XEXP (x, 1),
5532 					 XEXP (x, 2));
5533 
5534     case RTX_COMPARE:
5535     case RTX_COMM_COMPARE:
5536       return simplify_relational_operation (code, mode,
5537                                             ((GET_MODE (XEXP (x, 0))
5538                                              != VOIDmode)
5539                                             ? GET_MODE (XEXP (x, 0))
5540                                             : GET_MODE (XEXP (x, 1))),
5541                                             XEXP (x, 0),
5542                                             XEXP (x, 1));
5543 
5544     case RTX_EXTRA:
5545       if (code == SUBREG)
5546 	return simplify_subreg (mode, SUBREG_REG (x),
5547 				GET_MODE (SUBREG_REG (x)),
5548 				SUBREG_BYTE (x));
5549       break;
5550 
5551     case RTX_OBJ:
5552       if (code == LO_SUM)
5553 	{
5554 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
5555 	  if (GET_CODE (XEXP (x, 0)) == HIGH
5556 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5557 	  return XEXP (x, 1);
5558 	}
5559       break;
5560 
5561     default:
5562       break;
5563     }
5564   return NULL;
5565 }
5566