xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/rtlanal.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39 #include "function-abi.h"
40 
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int computed_jump_p_1 (const_rtx);
46 static void parms_set (rtx, const_rtx, void *);
47 
48 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
49                                                    const_rtx, machine_mode,
50                                                    unsigned HOST_WIDE_INT);
51 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
52 					     const_rtx, machine_mode,
53                                              unsigned HOST_WIDE_INT);
54 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
55 						const_rtx, machine_mode,
56                                                 unsigned int);
57 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
58 					  const_rtx, machine_mode,
59 					  unsigned int);
60 
61 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
62 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
63 
64 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
65    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
66    SIGN_EXTEND then while narrowing we also have to enforce the
67    representation and sign-extend the value to mode DESTINATION_REP.
68 
69    If the value is already sign-extended to DESTINATION_REP mode we
70    can just switch to DESTINATION mode on it.  For each pair of
71    integral modes SOURCE and DESTINATION, when truncating from SOURCE
72    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
73    contains the number of high-order bits in SOURCE that have to be
74    copies of the sign-bit so that we can do this mode-switch to
75    DESTINATION.  */
76 
77 static unsigned int
78 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
79 
80 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
81    elements.  Return the new base of ARRAY.  */
82 
83 template <typename T>
84 typename T::value_type *
85 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
86 						  value_type *base,
87 						  size_t i, value_type x)
88 {
89   if (base == array.stack)
90     {
91       if (i < LOCAL_ELEMS)
92 	{
93 	  base[i] = x;
94 	  return base;
95 	}
96       gcc_checking_assert (i == LOCAL_ELEMS);
97       /* A previous iteration might also have moved from the stack to the
98 	 heap, in which case the heap array will already be big enough.  */
99       if (vec_safe_length (array.heap) <= i)
100 	vec_safe_grow (array.heap, i + 1);
101       base = array.heap->address ();
102       memcpy (base, array.stack, sizeof (array.stack));
103       base[LOCAL_ELEMS] = x;
104       return base;
105     }
106   unsigned int length = array.heap->length ();
107   if (length > i)
108     {
109       gcc_checking_assert (base == array.heap->address ());
110       base[i] = x;
111       return base;
112     }
113   else
114     {
115       gcc_checking_assert (i == length);
116       vec_safe_push (array.heap, x);
117       return array.heap->address ();
118     }
119 }
120 
121 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
122    number of elements added to the worklist.  */
123 
124 template <typename T>
125 size_t
126 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
127 						    value_type *base,
128 						    size_t end, rtx_type x)
129 {
130   enum rtx_code code = GET_CODE (x);
131   const char *format = GET_RTX_FORMAT (code);
132   size_t orig_end = end;
133   if (__builtin_expect (INSN_P (x), false))
134     {
135       /* Put the pattern at the top of the queue, since that's what
136 	 we're likely to want most.  It also allows for the SEQUENCE
137 	 code below.  */
138       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
139 	if (format[i] == 'e')
140 	  {
141 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
142 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
143 	      base[end++] = subx;
144 	    else
145 	      base = add_single_to_queue (array, base, end++, subx);
146 	  }
147     }
148   else
149     for (int i = 0; format[i]; ++i)
150       if (format[i] == 'e')
151 	{
152 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
153 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
154 	    base[end++] = subx;
155 	  else
156 	    base = add_single_to_queue (array, base, end++, subx);
157 	}
158       else if (format[i] == 'E')
159 	{
160 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
161 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
162 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
163 	    for (unsigned int j = 0; j < length; j++)
164 	      base[end++] = T::get_value (vec[j]);
165 	  else
166 	    for (unsigned int j = 0; j < length; j++)
167 	      base = add_single_to_queue (array, base, end++,
168 					  T::get_value (vec[j]));
169 	  if (code == SEQUENCE && end == length)
170 	    /* If the subrtxes of the sequence fill the entire array then
171 	       we know that no other parts of a containing insn are queued.
172 	       The caller is therefore iterating over the sequence as a
173 	       PATTERN (...), so we also want the patterns of the
174 	       subinstructions.  */
175 	    for (unsigned int j = 0; j < length; j++)
176 	      {
177 		typename T::rtx_type x = T::get_rtx (base[j]);
178 		if (INSN_P (x))
179 		  base[j] = T::get_value (PATTERN (x));
180 	      }
181 	}
182   return end - orig_end;
183 }
184 
185 template <typename T>
186 void
187 generic_subrtx_iterator <T>::free_array (array_type &array)
188 {
189   vec_free (array.heap);
190 }
191 
192 template <typename T>
193 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
194 
195 template class generic_subrtx_iterator <const_rtx_accessor>;
196 template class generic_subrtx_iterator <rtx_var_accessor>;
197 template class generic_subrtx_iterator <rtx_ptr_accessor>;
198 
199 /* Return 1 if the value of X is unstable
200    (would be different at a different point in the program).
201    The frame pointer, arg pointer, etc. are considered stable
202    (within one function) and so is anything marked `unchanging'.  */
203 
204 int
205 rtx_unstable_p (const_rtx x)
206 {
207   const RTX_CODE code = GET_CODE (x);
208   int i;
209   const char *fmt;
210 
211   switch (code)
212     {
213     case MEM:
214       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
215 
216     case CONST:
217     CASE_CONST_ANY:
218     case SYMBOL_REF:
219     case LABEL_REF:
220       return 0;
221 
222     case REG:
223       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
224       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
225 	  /* The arg pointer varies if it is not a fixed register.  */
226 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
227 	return 0;
228       /* ??? When call-clobbered, the value is stable modulo the restore
229 	 that must happen after a call.  This currently screws up local-alloc
230 	 into believing that the restore is not needed.  */
231       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
232 	return 0;
233       return 1;
234 
235     case ASM_OPERANDS:
236       if (MEM_VOLATILE_P (x))
237 	return 1;
238 
239       /* Fall through.  */
240 
241     default:
242       break;
243     }
244 
245   fmt = GET_RTX_FORMAT (code);
246   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
247     if (fmt[i] == 'e')
248       {
249 	if (rtx_unstable_p (XEXP (x, i)))
250 	  return 1;
251       }
252     else if (fmt[i] == 'E')
253       {
254 	int j;
255 	for (j = 0; j < XVECLEN (x, i); j++)
256 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
257 	    return 1;
258       }
259 
260   return 0;
261 }
262 
263 /* Return 1 if X has a value that can vary even between two
264    executions of the program.  0 means X can be compared reliably
265    against certain constants or near-constants.
266    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
267    zero, we are slightly more conservative.
268    The frame pointer and the arg pointer are considered constant.  */
269 
270 bool
271 rtx_varies_p (const_rtx x, bool for_alias)
272 {
273   RTX_CODE code;
274   int i;
275   const char *fmt;
276 
277   if (!x)
278     return 0;
279 
280   code = GET_CODE (x);
281   switch (code)
282     {
283     case MEM:
284       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
285 
286     case CONST:
287     CASE_CONST_ANY:
288     case SYMBOL_REF:
289     case LABEL_REF:
290       return 0;
291 
292     case REG:
293       /* Note that we have to test for the actual rtx used for the frame
294 	 and arg pointers and not just the register number in case we have
295 	 eliminated the frame and/or arg pointer and are using it
296 	 for pseudos.  */
297       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
298 	  /* The arg pointer varies if it is not a fixed register.  */
299 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
300 	return 0;
301       if (x == pic_offset_table_rtx
302 	  /* ??? When call-clobbered, the value is stable modulo the restore
303 	     that must happen after a call.  This currently screws up
304 	     local-alloc into believing that the restore is not needed, so we
305 	     must return 0 only if we are called from alias analysis.  */
306 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
307 	return 0;
308       return 1;
309 
310     case LO_SUM:
311       /* The operand 0 of a LO_SUM is considered constant
312 	 (in fact it is related specifically to operand 1)
313 	 during alias analysis.  */
314       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
315 	     || rtx_varies_p (XEXP (x, 1), for_alias);
316 
317     case ASM_OPERANDS:
318       if (MEM_VOLATILE_P (x))
319 	return 1;
320 
321       /* Fall through.  */
322 
323     default:
324       break;
325     }
326 
327   fmt = GET_RTX_FORMAT (code);
328   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
329     if (fmt[i] == 'e')
330       {
331 	if (rtx_varies_p (XEXP (x, i), for_alias))
332 	  return 1;
333       }
334     else if (fmt[i] == 'E')
335       {
336 	int j;
337 	for (j = 0; j < XVECLEN (x, i); j++)
338 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
339 	    return 1;
340       }
341 
342   return 0;
343 }
344 
345 /* Compute an approximation for the offset between the register
346    FROM and TO for the current function, as it was at the start
347    of the routine.  */
348 
349 static poly_int64
350 get_initial_register_offset (int from, int to)
351 {
352   static const struct elim_table_t
353   {
354     const int from;
355     const int to;
356   } table[] = ELIMINABLE_REGS;
357   poly_int64 offset1, offset2;
358   unsigned int i, j;
359 
360   if (to == from)
361     return 0;
362 
363   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
364      is completed, but we need to give at least an estimate for the stack
365      pointer based on the frame size.  */
366   if (!epilogue_completed)
367     {
368       offset1 = crtl->outgoing_args_size + get_frame_size ();
369 #if !STACK_GROWS_DOWNWARD
370       offset1 = - offset1;
371 #endif
372       if (to == STACK_POINTER_REGNUM)
373 	return offset1;
374       else if (from == STACK_POINTER_REGNUM)
375 	return - offset1;
376       else
377 	return 0;
378      }
379 
380   for (i = 0; i < ARRAY_SIZE (table); i++)
381       if (table[i].from == from)
382 	{
383 	  if (table[i].to == to)
384 	    {
385 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
386 					  offset1);
387 	      return offset1;
388 	    }
389 	  for (j = 0; j < ARRAY_SIZE (table); j++)
390 	    {
391 	      if (table[j].to == to
392 		  && table[j].from == table[i].to)
393 		{
394 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
395 					      offset1);
396 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
397 					      offset2);
398 		  return offset1 + offset2;
399 		}
400 	      if (table[j].from == to
401 		  && table[j].to == table[i].to)
402 		{
403 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
404 					      offset1);
405 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
406 					      offset2);
407 		  return offset1 - offset2;
408 		}
409 	    }
410 	}
411       else if (table[i].to == from)
412 	{
413 	  if (table[i].from == to)
414 	    {
415 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
416 					  offset1);
417 	      return - offset1;
418 	    }
419 	  for (j = 0; j < ARRAY_SIZE (table); j++)
420 	    {
421 	      if (table[j].to == to
422 		  && table[j].from == table[i].from)
423 		{
424 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
425 					      offset1);
426 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
427 					      offset2);
428 		  return - offset1 + offset2;
429 		}
430 	      if (table[j].from == to
431 		  && table[j].to == table[i].from)
432 		{
433 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
434 					      offset1);
435 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
436 					      offset2);
437 		  return - offset1 - offset2;
438 		}
439 	    }
440 	}
441 
442   /* If the requested register combination was not found,
443      try a different more simple combination.  */
444   if (from == ARG_POINTER_REGNUM)
445     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
446   else if (to == ARG_POINTER_REGNUM)
447     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
448   else if (from == HARD_FRAME_POINTER_REGNUM)
449     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
450   else if (to == HARD_FRAME_POINTER_REGNUM)
451     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
452   else
453     return 0;
454 }
455 
456 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
457    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
458    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
459    references on strict alignment machines.  */
460 
461 static int
462 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
463 		       machine_mode mode, bool unaligned_mems)
464 {
465   enum rtx_code code = GET_CODE (x);
466   gcc_checking_assert (mode == BLKmode
467 		       || mode == VOIDmode
468 		       || known_size_p (size));
469   poly_int64 const_x1;
470 
471   /* The offset must be a multiple of the mode size if we are considering
472      unaligned memory references on strict alignment machines.  */
473   if (STRICT_ALIGNMENT
474       && unaligned_mems
475       && mode != BLKmode
476       && mode != VOIDmode)
477     {
478       poly_int64 actual_offset = offset;
479 
480 #ifdef SPARC_STACK_BOUNDARY_HACK
481       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
482 	     the real alignment of %sp.  However, when it does this, the
483 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
484       if (SPARC_STACK_BOUNDARY_HACK
485 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
486 	actual_offset -= STACK_POINTER_OFFSET;
487 #endif
488 
489       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
490 	return 1;
491     }
492 
493   switch (code)
494     {
495     case SYMBOL_REF:
496       if (SYMBOL_REF_WEAK (x))
497 	return 1;
498       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
499 	{
500 	  tree decl;
501 	  poly_int64 decl_size;
502 
503 	  if (maybe_lt (offset, 0))
504 	    return 1;
505 	  if (!known_size_p (size))
506 	    return maybe_ne (offset, 0);
507 
508 	  /* If the size of the access or of the symbol is unknown,
509 	     assume the worst.  */
510 	  decl = SYMBOL_REF_DECL (x);
511 
512 	  /* Else check that the access is in bounds.  TODO: restructure
513 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
514 	  if (!decl)
515 	    decl_size = -1;
516 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
517 	    {
518 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
519 		decl_size = -1;
520 	    }
521 	  else if (TREE_CODE (decl) == STRING_CST)
522 	    decl_size = TREE_STRING_LENGTH (decl);
523 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
524 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
525 	  else
526 	    decl_size = -1;
527 
528 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
529 		  ? maybe_ne (offset, 0)
530 		  : !known_subrange_p (offset, size, 0, decl_size));
531         }
532 
533       return 0;
534 
535     case LABEL_REF:
536       return 0;
537 
538     case REG:
539       /* Stack references are assumed not to trap, but we need to deal with
540 	 nonsensical offsets.  */
541       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
542 	 || x == stack_pointer_rtx
543 	 /* The arg pointer varies if it is not a fixed register.  */
544 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
545 	{
546 #ifdef RED_ZONE_SIZE
547 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
548 #else
549 	  poly_int64 red_zone_size = 0;
550 #endif
551 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
552 	  poly_int64 low_bound, high_bound;
553 
554 	  if (!known_size_p (size))
555 	    return 1;
556 
557 	  if (x == frame_pointer_rtx)
558 	    {
559 	      if (FRAME_GROWS_DOWNWARD)
560 		{
561 		  high_bound = targetm.starting_frame_offset ();
562 		  low_bound  = high_bound - get_frame_size ();
563 		}
564 	      else
565 		{
566 		  low_bound  = targetm.starting_frame_offset ();
567 		  high_bound = low_bound + get_frame_size ();
568 		}
569 	    }
570 	  else if (x == hard_frame_pointer_rtx)
571 	    {
572 	      poly_int64 sp_offset
573 		= get_initial_register_offset (STACK_POINTER_REGNUM,
574 					       HARD_FRAME_POINTER_REGNUM);
575 	      poly_int64 ap_offset
576 		= get_initial_register_offset (ARG_POINTER_REGNUM,
577 					       HARD_FRAME_POINTER_REGNUM);
578 
579 #if STACK_GROWS_DOWNWARD
580 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
581 	      high_bound = ap_offset
582 			   + FIRST_PARM_OFFSET (current_function_decl)
583 #if !ARGS_GROW_DOWNWARD
584 			   + crtl->args.size
585 #endif
586 			   + stack_boundary;
587 #else
588 	      high_bound = sp_offset + red_zone_size + stack_boundary;
589 	      low_bound  = ap_offset
590 			   + FIRST_PARM_OFFSET (current_function_decl)
591 #if ARGS_GROW_DOWNWARD
592 			   - crtl->args.size
593 #endif
594 			   - stack_boundary;
595 #endif
596 	    }
597 	  else if (x == stack_pointer_rtx)
598 	    {
599 	      poly_int64 ap_offset
600 		= get_initial_register_offset (ARG_POINTER_REGNUM,
601 					       STACK_POINTER_REGNUM);
602 
603 #if STACK_GROWS_DOWNWARD
604 	      low_bound  = - red_zone_size - stack_boundary;
605 	      high_bound = ap_offset
606 			   + FIRST_PARM_OFFSET (current_function_decl)
607 #if !ARGS_GROW_DOWNWARD
608 			   + crtl->args.size
609 #endif
610 			   + stack_boundary;
611 #else
612 	      high_bound = red_zone_size + stack_boundary;
613 	      low_bound  = ap_offset
614 			   + FIRST_PARM_OFFSET (current_function_decl)
615 #if ARGS_GROW_DOWNWARD
616 			   - crtl->args.size
617 #endif
618 			   - stack_boundary;
619 #endif
620 	    }
621 	  else
622 	    {
623 	      /* We assume that accesses are safe to at least the
624 		 next stack boundary.
625 		 Examples are varargs and __builtin_return_address.  */
626 #if ARGS_GROW_DOWNWARD
627 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
628 			   + stack_boundary;
629 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
630 			   - crtl->args.size - stack_boundary;
631 #else
632 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
633 			   - stack_boundary;
634 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
635 			   + crtl->args.size + stack_boundary;
636 #endif
637 	    }
638 
639 	  if (known_ge (offset, low_bound)
640 	      && known_le (offset, high_bound - size))
641 	    return 0;
642 	  return 1;
643 	}
644       /* All of the virtual frame registers are stack references.  */
645       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
646 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
647 	return 0;
648       return 1;
649 
650     case CONST:
651       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
652 				    mode, unaligned_mems);
653 
654     case PLUS:
655       /* An address is assumed not to trap if:
656 	 - it is the pic register plus a const unspec without offset.  */
657       if (XEXP (x, 0) == pic_offset_table_rtx
658 	  && GET_CODE (XEXP (x, 1)) == CONST
659 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
660 	  && known_eq (offset, 0))
661 	return 0;
662 
663       /* - or it is an address that can't trap plus a constant integer.  */
664       if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
665 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
666 				     size, mode, unaligned_mems))
667 	return 0;
668 
669       return 1;
670 
671     case LO_SUM:
672     case PRE_MODIFY:
673       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
674 				    mode, unaligned_mems);
675 
676     case PRE_DEC:
677     case PRE_INC:
678     case POST_DEC:
679     case POST_INC:
680     case POST_MODIFY:
681       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
682 				    mode, unaligned_mems);
683 
684     default:
685       break;
686     }
687 
688   /* If it isn't one of the case above, it can cause a trap.  */
689   return 1;
690 }
691 
692 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
693 
694 int
695 rtx_addr_can_trap_p (const_rtx x)
696 {
697   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
698 }
699 
700 /* Return true if X contains a MEM subrtx.  */
701 
702 bool
703 contains_mem_rtx_p (rtx x)
704 {
705   subrtx_iterator::array_type array;
706   FOR_EACH_SUBRTX (iter, array, x, ALL)
707     if (MEM_P (*iter))
708       return true;
709 
710   return false;
711 }
712 
713 /* Return true if X is an address that is known to not be zero.  */
714 
715 bool
716 nonzero_address_p (const_rtx x)
717 {
718   const enum rtx_code code = GET_CODE (x);
719 
720   switch (code)
721     {
722     case SYMBOL_REF:
723       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
724 
725     case LABEL_REF:
726       return true;
727 
728     case REG:
729       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
730       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
731 	  || x == stack_pointer_rtx
732 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
733 	return true;
734       /* All of the virtual frame registers are stack references.  */
735       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
736 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
737 	return true;
738       return false;
739 
740     case CONST:
741       return nonzero_address_p (XEXP (x, 0));
742 
743     case PLUS:
744       /* Handle PIC references.  */
745       if (XEXP (x, 0) == pic_offset_table_rtx
746 	       && CONSTANT_P (XEXP (x, 1)))
747 	return true;
748       return false;
749 
750     case PRE_MODIFY:
751       /* Similar to the above; allow positive offsets.  Further, since
752 	 auto-inc is only allowed in memories, the register must be a
753 	 pointer.  */
754       if (CONST_INT_P (XEXP (x, 1))
755 	  && INTVAL (XEXP (x, 1)) > 0)
756 	return true;
757       return nonzero_address_p (XEXP (x, 0));
758 
759     case PRE_INC:
760       /* Similarly.  Further, the offset is always positive.  */
761       return true;
762 
763     case PRE_DEC:
764     case POST_DEC:
765     case POST_INC:
766     case POST_MODIFY:
767       return nonzero_address_p (XEXP (x, 0));
768 
769     case LO_SUM:
770       return nonzero_address_p (XEXP (x, 1));
771 
772     default:
773       break;
774     }
775 
776   /* If it isn't one of the case above, might be zero.  */
777   return false;
778 }
779 
780 /* Return 1 if X refers to a memory location whose address
781    cannot be compared reliably with constant addresses,
782    or if X refers to a BLKmode memory object.
783    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
784    zero, we are slightly more conservative.  */
785 
786 bool
787 rtx_addr_varies_p (const_rtx x, bool for_alias)
788 {
789   enum rtx_code code;
790   int i;
791   const char *fmt;
792 
793   if (x == 0)
794     return 0;
795 
796   code = GET_CODE (x);
797   if (code == MEM)
798     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
799 
800   fmt = GET_RTX_FORMAT (code);
801   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
802     if (fmt[i] == 'e')
803       {
804 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
805 	  return 1;
806       }
807     else if (fmt[i] == 'E')
808       {
809 	int j;
810 	for (j = 0; j < XVECLEN (x, i); j++)
811 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
812 	    return 1;
813       }
814   return 0;
815 }
816 
817 /* Return the CALL in X if there is one.  */
818 
819 rtx
820 get_call_rtx_from (const rtx_insn *insn)
821 {
822   rtx x = PATTERN (insn);
823   if (GET_CODE (x) == PARALLEL)
824     x = XVECEXP (x, 0, 0);
825   if (GET_CODE (x) == SET)
826     x = SET_SRC (x);
827   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
828     return x;
829   return NULL_RTX;
830 }
831 
832 /* Get the declaration of the function called by INSN.  */
833 
834 tree
835 get_call_fndecl (const rtx_insn *insn)
836 {
837   rtx note, datum;
838 
839   note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
840   if (note == NULL_RTX)
841     return NULL_TREE;
842 
843   datum = XEXP (note, 0);
844   if (datum != NULL_RTX)
845     return SYMBOL_REF_DECL (datum);
846 
847   return NULL_TREE;
848 }
849 
850 /* Return the value of the integer term in X, if one is apparent;
851    otherwise return 0.
852    Only obvious integer terms are detected.
853    This is used in cse.c with the `related_value' field.  */
854 
855 HOST_WIDE_INT
856 get_integer_term (const_rtx x)
857 {
858   if (GET_CODE (x) == CONST)
859     x = XEXP (x, 0);
860 
861   if (GET_CODE (x) == MINUS
862       && CONST_INT_P (XEXP (x, 1)))
863     return - INTVAL (XEXP (x, 1));
864   if (GET_CODE (x) == PLUS
865       && CONST_INT_P (XEXP (x, 1)))
866     return INTVAL (XEXP (x, 1));
867   return 0;
868 }
869 
870 /* If X is a constant, return the value sans apparent integer term;
871    otherwise return 0.
872    Only obvious integer terms are detected.  */
873 
874 rtx
875 get_related_value (const_rtx x)
876 {
877   if (GET_CODE (x) != CONST)
878     return 0;
879   x = XEXP (x, 0);
880   if (GET_CODE (x) == PLUS
881       && CONST_INT_P (XEXP (x, 1)))
882     return XEXP (x, 0);
883   else if (GET_CODE (x) == MINUS
884 	   && CONST_INT_P (XEXP (x, 1)))
885     return XEXP (x, 0);
886   return 0;
887 }
888 
889 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
890    to somewhere in the same object or object_block as SYMBOL.  */
891 
892 bool
893 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
894 {
895   tree decl;
896 
897   if (GET_CODE (symbol) != SYMBOL_REF)
898     return false;
899 
900   if (offset == 0)
901     return true;
902 
903   if (offset > 0)
904     {
905       if (CONSTANT_POOL_ADDRESS_P (symbol)
906 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
907 	return true;
908 
909       decl = SYMBOL_REF_DECL (symbol);
910       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
911 	return true;
912     }
913 
914   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
915       && SYMBOL_REF_BLOCK (symbol)
916       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
917       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
918 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
919     return true;
920 
921   return false;
922 }
923 
924 /* Split X into a base and a constant offset, storing them in *BASE_OUT
925    and *OFFSET_OUT respectively.  */
926 
927 void
928 split_const (rtx x, rtx *base_out, rtx *offset_out)
929 {
930   if (GET_CODE (x) == CONST)
931     {
932       x = XEXP (x, 0);
933       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
934 	{
935 	  *base_out = XEXP (x, 0);
936 	  *offset_out = XEXP (x, 1);
937 	  return;
938 	}
939     }
940   *base_out = x;
941   *offset_out = const0_rtx;
942 }
943 
944 /* Express integer value X as some value Y plus a polynomial offset,
945    where Y is either const0_rtx, X or something within X (as opposed
946    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
947 
948 rtx
949 strip_offset (rtx x, poly_int64_pod *offset_out)
950 {
951   rtx base = const0_rtx;
952   rtx test = x;
953   if (GET_CODE (test) == CONST)
954     test = XEXP (test, 0);
955   if (GET_CODE (test) == PLUS)
956     {
957       base = XEXP (test, 0);
958       test = XEXP (test, 1);
959     }
960   if (poly_int_rtx_p (test, offset_out))
961     return base;
962   *offset_out = 0;
963   return x;
964 }
965 
966 /* Return the argument size in REG_ARGS_SIZE note X.  */
967 
968 poly_int64
969 get_args_size (const_rtx x)
970 {
971   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
972   return rtx_to_poly_int64 (XEXP (x, 0));
973 }
974 
975 /* Return the number of places FIND appears within X.  If COUNT_DEST is
976    zero, we do not count occurrences inside the destination of a SET.  */
977 
978 int
979 count_occurrences (const_rtx x, const_rtx find, int count_dest)
980 {
981   int i, j;
982   enum rtx_code code;
983   const char *format_ptr;
984   int count;
985 
986   if (x == find)
987     return 1;
988 
989   code = GET_CODE (x);
990 
991   switch (code)
992     {
993     case REG:
994     CASE_CONST_ANY:
995     case SYMBOL_REF:
996     case CODE_LABEL:
997     case PC:
998     case CC0:
999       return 0;
1000 
1001     case EXPR_LIST:
1002       count = count_occurrences (XEXP (x, 0), find, count_dest);
1003       if (XEXP (x, 1))
1004 	count += count_occurrences (XEXP (x, 1), find, count_dest);
1005       return count;
1006 
1007     case MEM:
1008       if (MEM_P (find) && rtx_equal_p (x, find))
1009 	return 1;
1010       break;
1011 
1012     case SET:
1013       if (SET_DEST (x) == find && ! count_dest)
1014 	return count_occurrences (SET_SRC (x), find, count_dest);
1015       break;
1016 
1017     default:
1018       break;
1019     }
1020 
1021   format_ptr = GET_RTX_FORMAT (code);
1022   count = 0;
1023 
1024   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1025     {
1026       switch (*format_ptr++)
1027 	{
1028 	case 'e':
1029 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1030 	  break;
1031 
1032 	case 'E':
1033 	  for (j = 0; j < XVECLEN (x, i); j++)
1034 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1035 	  break;
1036 	}
1037     }
1038   return count;
1039 }
1040 
1041 
1042 /* Return TRUE if OP is a register or subreg of a register that
1043    holds an unsigned quantity.  Otherwise, return FALSE.  */
1044 
1045 bool
1046 unsigned_reg_p (rtx op)
1047 {
1048   if (REG_P (op)
1049       && REG_EXPR (op)
1050       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1051     return true;
1052 
1053   if (GET_CODE (op) == SUBREG
1054       && SUBREG_PROMOTED_SIGN (op))
1055     return true;
1056 
1057   return false;
1058 }
1059 
1060 
1061 /* Nonzero if register REG appears somewhere within IN.
1062    Also works if REG is not a register; in this case it checks
1063    for a subexpression of IN that is Lisp "equal" to REG.  */
1064 
1065 int
1066 reg_mentioned_p (const_rtx reg, const_rtx in)
1067 {
1068   const char *fmt;
1069   int i;
1070   enum rtx_code code;
1071 
1072   if (in == 0)
1073     return 0;
1074 
1075   if (reg == in)
1076     return 1;
1077 
1078   if (GET_CODE (in) == LABEL_REF)
1079     return reg == label_ref_label (in);
1080 
1081   code = GET_CODE (in);
1082 
1083   switch (code)
1084     {
1085       /* Compare registers by number.  */
1086     case REG:
1087       return REG_P (reg) && REGNO (in) == REGNO (reg);
1088 
1089       /* These codes have no constituent expressions
1090 	 and are unique.  */
1091     case SCRATCH:
1092     case CC0:
1093     case PC:
1094       return 0;
1095 
1096     CASE_CONST_ANY:
1097       /* These are kept unique for a given value.  */
1098       return 0;
1099 
1100     default:
1101       break;
1102     }
1103 
1104   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1105     return 1;
1106 
1107   fmt = GET_RTX_FORMAT (code);
1108 
1109   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1110     {
1111       if (fmt[i] == 'E')
1112 	{
1113 	  int j;
1114 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1115 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1116 	      return 1;
1117 	}
1118       else if (fmt[i] == 'e'
1119 	       && reg_mentioned_p (reg, XEXP (in, i)))
1120 	return 1;
1121     }
1122   return 0;
1123 }
1124 
1125 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1126    no CODE_LABEL insn.  */
1127 
1128 int
1129 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1130 {
1131   rtx_insn *p;
1132   if (beg == end)
1133     return 0;
1134   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1135     if (LABEL_P (p))
1136       return 0;
1137   return 1;
1138 }
1139 
1140 /* Nonzero if register REG is used in an insn between
1141    FROM_INSN and TO_INSN (exclusive of those two).  */
1142 
1143 int
1144 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1145 		    const rtx_insn *to_insn)
1146 {
1147   rtx_insn *insn;
1148 
1149   if (from_insn == to_insn)
1150     return 0;
1151 
1152   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1153     if (NONDEBUG_INSN_P (insn)
1154 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1155 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1156       return 1;
1157   return 0;
1158 }
1159 
1160 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1161    is entirely replaced by a new value and the only use is as a SET_DEST,
1162    we do not consider it a reference.  */
1163 
1164 int
1165 reg_referenced_p (const_rtx x, const_rtx body)
1166 {
1167   int i;
1168 
1169   switch (GET_CODE (body))
1170     {
1171     case SET:
1172       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1173 	return 1;
1174 
1175       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1176 	 of a REG that occupies all of the REG, the insn references X if
1177 	 it is mentioned in the destination.  */
1178       if (GET_CODE (SET_DEST (body)) != CC0
1179 	  && GET_CODE (SET_DEST (body)) != PC
1180 	  && !REG_P (SET_DEST (body))
1181 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1182 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1183 		&& !read_modify_subreg_p (SET_DEST (body)))
1184 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1185 	return 1;
1186       return 0;
1187 
1188     case ASM_OPERANDS:
1189       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1190 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1191 	  return 1;
1192       return 0;
1193 
1194     case CALL:
1195     case USE:
1196     case IF_THEN_ELSE:
1197       return reg_overlap_mentioned_p (x, body);
1198 
1199     case TRAP_IF:
1200       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1201 
1202     case PREFETCH:
1203       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1204 
1205     case UNSPEC:
1206     case UNSPEC_VOLATILE:
1207       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1208 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1209 	  return 1;
1210       return 0;
1211 
1212     case PARALLEL:
1213       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1214 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1215 	  return 1;
1216       return 0;
1217 
1218     case CLOBBER:
1219       if (MEM_P (XEXP (body, 0)))
1220 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1221 	  return 1;
1222       return 0;
1223 
1224     case COND_EXEC:
1225       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1226 	return 1;
1227       return reg_referenced_p (x, COND_EXEC_CODE (body));
1228 
1229     default:
1230       return 0;
1231     }
1232 }
1233 
1234 /* Nonzero if register REG is set or clobbered in an insn between
1235    FROM_INSN and TO_INSN (exclusive of those two).  */
1236 
1237 int
1238 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1239 		   const rtx_insn *to_insn)
1240 {
1241   const rtx_insn *insn;
1242 
1243   if (from_insn == to_insn)
1244     return 0;
1245 
1246   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1247     if (INSN_P (insn) && reg_set_p (reg, insn))
1248       return 1;
1249   return 0;
1250 }
1251 
1252 /* Return true if REG is set or clobbered inside INSN.  */
1253 
1254 int
1255 reg_set_p (const_rtx reg, const_rtx insn)
1256 {
1257   /* After delay slot handling, call and branch insns might be in a
1258      sequence.  Check all the elements there.  */
1259   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1260     {
1261       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1262 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1263 	  return true;
1264 
1265       return false;
1266     }
1267 
1268   /* We can be passed an insn or part of one.  If we are passed an insn,
1269      check if a side-effect of the insn clobbers REG.  */
1270   if (INSN_P (insn)
1271       && (FIND_REG_INC_NOTE (insn, reg)
1272 	  || (CALL_P (insn)
1273 	      && ((REG_P (reg)
1274 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1275 		   && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1276 		       .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1277 		  || MEM_P (reg)
1278 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1279     return true;
1280 
1281   /* There are no REG_INC notes for SP autoinc.  */
1282   if (reg == stack_pointer_rtx && INSN_P (insn))
1283     {
1284       subrtx_var_iterator::array_type array;
1285       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1286 	{
1287 	  rtx mem = *iter;
1288 	  if (mem
1289 	      && MEM_P (mem)
1290 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1291 	    {
1292 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1293 		return true;
1294 	      iter.skip_subrtxes ();
1295 	    }
1296 	}
1297     }
1298 
1299   return set_of (reg, insn) != NULL_RTX;
1300 }
1301 
1302 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1303    only if none of them are modified between START and END.  Return 1 if
1304    X contains a MEM; this routine does use memory aliasing.  */
1305 
1306 int
1307 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1308 {
1309   const enum rtx_code code = GET_CODE (x);
1310   const char *fmt;
1311   int i, j;
1312   rtx_insn *insn;
1313 
1314   if (start == end)
1315     return 0;
1316 
1317   switch (code)
1318     {
1319     CASE_CONST_ANY:
1320     case CONST:
1321     case SYMBOL_REF:
1322     case LABEL_REF:
1323       return 0;
1324 
1325     case PC:
1326     case CC0:
1327       return 1;
1328 
1329     case MEM:
1330       if (modified_between_p (XEXP (x, 0), start, end))
1331 	return 1;
1332       if (MEM_READONLY_P (x))
1333 	return 0;
1334       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1335 	if (memory_modified_in_insn_p (x, insn))
1336 	  return 1;
1337       return 0;
1338 
1339     case REG:
1340       return reg_set_between_p (x, start, end);
1341 
1342     default:
1343       break;
1344     }
1345 
1346   fmt = GET_RTX_FORMAT (code);
1347   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1348     {
1349       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1350 	return 1;
1351 
1352       else if (fmt[i] == 'E')
1353 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1354 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1355 	    return 1;
1356     }
1357 
1358   return 0;
1359 }
1360 
1361 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1362    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1363    does use memory aliasing.  */
1364 
1365 int
1366 modified_in_p (const_rtx x, const_rtx insn)
1367 {
1368   const enum rtx_code code = GET_CODE (x);
1369   const char *fmt;
1370   int i, j;
1371 
1372   switch (code)
1373     {
1374     CASE_CONST_ANY:
1375     case CONST:
1376     case SYMBOL_REF:
1377     case LABEL_REF:
1378       return 0;
1379 
1380     case PC:
1381     case CC0:
1382       return 1;
1383 
1384     case MEM:
1385       if (modified_in_p (XEXP (x, 0), insn))
1386 	return 1;
1387       if (MEM_READONLY_P (x))
1388 	return 0;
1389       if (memory_modified_in_insn_p (x, insn))
1390 	return 1;
1391       return 0;
1392 
1393     case REG:
1394       return reg_set_p (x, insn);
1395 
1396     default:
1397       break;
1398     }
1399 
1400   fmt = GET_RTX_FORMAT (code);
1401   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1402     {
1403       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1404 	return 1;
1405 
1406       else if (fmt[i] == 'E')
1407 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1408 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1409 	    return 1;
1410     }
1411 
1412   return 0;
1413 }
1414 
1415 /* Return true if X is a SUBREG and if storing a value to X would
1416    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1417    target, using a SUBREG to store to one half of a DImode REG would
1418    preserve the other half.  */
1419 
1420 bool
1421 read_modify_subreg_p (const_rtx x)
1422 {
1423   if (GET_CODE (x) != SUBREG)
1424     return false;
1425   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1426   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1427   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1428   /* The inner and outer modes of a subreg must be ordered, so that we
1429      can tell whether they're paradoxical or partial.  */
1430   gcc_checking_assert (ordered_p (isize, osize));
1431   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1432 }
1433 
1434 /* Helper function for set_of.  */
1435 struct set_of_data
1436   {
1437     const_rtx found;
1438     const_rtx pat;
1439   };
1440 
1441 static void
1442 set_of_1 (rtx x, const_rtx pat, void *data1)
1443 {
1444   struct set_of_data *const data = (struct set_of_data *) (data1);
1445   if (rtx_equal_p (x, data->pat)
1446       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1447     data->found = pat;
1448 }
1449 
1450 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1451    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1452 const_rtx
1453 set_of (const_rtx pat, const_rtx insn)
1454 {
1455   struct set_of_data data;
1456   data.found = NULL_RTX;
1457   data.pat = pat;
1458   note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1459   return data.found;
1460 }
1461 
1462 /* Add all hard register in X to *PSET.  */
1463 void
1464 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1465 {
1466   subrtx_iterator::array_type array;
1467   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1468     {
1469       const_rtx x = *iter;
1470       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1471 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1472     }
1473 }
1474 
1475 /* This function, called through note_stores, collects sets and
1476    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1477    by DATA.  */
1478 void
1479 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1480 {
1481   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1482   if (REG_P (x) && HARD_REGISTER_P (x))
1483     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1484 }
1485 
1486 /* Examine INSN, and compute the set of hard registers written by it.
1487    Store it in *PSET.  Should only be called after reload.
1488 
1489    IMPLICIT is true if we should include registers that are fully-clobbered
1490    by calls.  This should be used with caution, since it doesn't include
1491    partially-clobbered registers.  */
1492 void
1493 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1494 {
1495   rtx link;
1496 
1497   CLEAR_HARD_REG_SET (*pset);
1498   note_stores (insn, record_hard_reg_sets, pset);
1499   if (CALL_P (insn) && implicit)
1500     *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1501   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1502     if (REG_NOTE_KIND (link) == REG_INC)
1503       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1504 }
1505 
1506 /* Like record_hard_reg_sets, but called through note_uses.  */
1507 void
1508 record_hard_reg_uses (rtx *px, void *data)
1509 {
1510   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1511 }
1512 
1513 /* Given an INSN, return a SET expression if this insn has only a single SET.
1514    It may also have CLOBBERs, USEs, or SET whose output
1515    will not be used, which we ignore.  */
1516 
1517 rtx
1518 single_set_2 (const rtx_insn *insn, const_rtx pat)
1519 {
1520   rtx set = NULL;
1521   int set_verified = 1;
1522   int i;
1523 
1524   if (GET_CODE (pat) == PARALLEL)
1525     {
1526       for (i = 0; i < XVECLEN (pat, 0); i++)
1527 	{
1528 	  rtx sub = XVECEXP (pat, 0, i);
1529 	  switch (GET_CODE (sub))
1530 	    {
1531 	    case USE:
1532 	    case CLOBBER:
1533 	      break;
1534 
1535 	    case SET:
1536 	      /* We can consider insns having multiple sets, where all
1537 		 but one are dead as single set insns.  In common case
1538 		 only single set is present in the pattern so we want
1539 		 to avoid checking for REG_UNUSED notes unless necessary.
1540 
1541 		 When we reach set first time, we just expect this is
1542 		 the single set we are looking for and only when more
1543 		 sets are found in the insn, we check them.  */
1544 	      if (!set_verified)
1545 		{
1546 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1547 		      && !side_effects_p (set))
1548 		    set = NULL;
1549 		  else
1550 		    set_verified = 1;
1551 		}
1552 	      if (!set)
1553 		set = sub, set_verified = 0;
1554 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1555 		       || side_effects_p (sub))
1556 		return NULL_RTX;
1557 	      break;
1558 
1559 	    default:
1560 	      return NULL_RTX;
1561 	    }
1562 	}
1563     }
1564   return set;
1565 }
1566 
1567 /* Given an INSN, return nonzero if it has more than one SET, else return
1568    zero.  */
1569 
1570 int
1571 multiple_sets (const_rtx insn)
1572 {
1573   int found;
1574   int i;
1575 
1576   /* INSN must be an insn.  */
1577   if (! INSN_P (insn))
1578     return 0;
1579 
1580   /* Only a PARALLEL can have multiple SETs.  */
1581   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1582     {
1583       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1584 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1585 	  {
1586 	    /* If we have already found a SET, then return now.  */
1587 	    if (found)
1588 	      return 1;
1589 	    else
1590 	      found = 1;
1591 	  }
1592     }
1593 
1594   /* Either zero or one SET.  */
1595   return 0;
1596 }
1597 
1598 /* Return nonzero if the destination of SET equals the source
1599    and there are no side effects.  */
1600 
1601 int
1602 set_noop_p (const_rtx set)
1603 {
1604   rtx src = SET_SRC (set);
1605   rtx dst = SET_DEST (set);
1606 
1607   if (dst == pc_rtx && src == pc_rtx)
1608     return 1;
1609 
1610   if (MEM_P (dst) && MEM_P (src))
1611     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1612 
1613   if (GET_CODE (dst) == ZERO_EXTRACT)
1614     return rtx_equal_p (XEXP (dst, 0), src)
1615 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1616 	   && !side_effects_p (src);
1617 
1618   if (GET_CODE (dst) == STRICT_LOW_PART)
1619     dst = XEXP (dst, 0);
1620 
1621   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1622     {
1623       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1624 	return 0;
1625       src = SUBREG_REG (src);
1626       dst = SUBREG_REG (dst);
1627     }
1628 
1629   /* It is a NOOP if destination overlaps with selected src vector
1630      elements.  */
1631   if (GET_CODE (src) == VEC_SELECT
1632       && REG_P (XEXP (src, 0)) && REG_P (dst)
1633       && HARD_REGISTER_P (XEXP (src, 0))
1634       && HARD_REGISTER_P (dst))
1635     {
1636       int i;
1637       rtx par = XEXP (src, 1);
1638       rtx src0 = XEXP (src, 0);
1639       poly_int64 c0;
1640       if (!poly_int_rtx_p (XVECEXP (par, 0, 0), &c0))
1641 	return 0;
1642       poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1643 
1644       for (i = 1; i < XVECLEN (par, 0); i++)
1645 	{
1646 	  poly_int64 c0i;
1647 	  if (!poly_int_rtx_p (XVECEXP (par, 0, i), &c0i)
1648 	      || maybe_ne (c0i, c0 + i))
1649 	    return 0;
1650 	}
1651       return
1652 	REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1653 	&& simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1654 				  offset, GET_MODE (dst)) == (int) REGNO (dst);
1655     }
1656 
1657   return (REG_P (src) && REG_P (dst)
1658 	  && REGNO (src) == REGNO (dst));
1659 }
1660 
1661 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1662    value to itself.  */
1663 
1664 int
1665 noop_move_p (const rtx_insn *insn)
1666 {
1667   rtx pat = PATTERN (insn);
1668 
1669   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1670     return 1;
1671 
1672   /* Insns carrying these notes are useful later on.  */
1673   if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1674     return 0;
1675 
1676   /* Check the code to be executed for COND_EXEC.  */
1677   if (GET_CODE (pat) == COND_EXEC)
1678     pat = COND_EXEC_CODE (pat);
1679 
1680   if (GET_CODE (pat) == SET && set_noop_p (pat))
1681     return 1;
1682 
1683   if (GET_CODE (pat) == PARALLEL)
1684     {
1685       int i;
1686       /* If nothing but SETs of registers to themselves,
1687 	 this insn can also be deleted.  */
1688       for (i = 0; i < XVECLEN (pat, 0); i++)
1689 	{
1690 	  rtx tem = XVECEXP (pat, 0, i);
1691 
1692 	  if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1693 	    continue;
1694 
1695 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1696 	    return 0;
1697 	}
1698 
1699       return 1;
1700     }
1701   return 0;
1702 }
1703 
1704 
1705 /* Return nonzero if register in range [REGNO, ENDREGNO)
1706    appears either explicitly or implicitly in X
1707    other than being stored into.
1708 
1709    References contained within the substructure at LOC do not count.
1710    LOC may be zero, meaning don't ignore anything.  */
1711 
1712 bool
1713 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1714 		   rtx *loc)
1715 {
1716   int i;
1717   unsigned int x_regno;
1718   RTX_CODE code;
1719   const char *fmt;
1720 
1721  repeat:
1722   /* The contents of a REG_NONNEG note is always zero, so we must come here
1723      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1724   if (x == 0)
1725     return false;
1726 
1727   code = GET_CODE (x);
1728 
1729   switch (code)
1730     {
1731     case REG:
1732       x_regno = REGNO (x);
1733 
1734       /* If we modifying the stack, frame, or argument pointer, it will
1735 	 clobber a virtual register.  In fact, we could be more precise,
1736 	 but it isn't worth it.  */
1737       if ((x_regno == STACK_POINTER_REGNUM
1738 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1739 	       && x_regno == ARG_POINTER_REGNUM)
1740 	   || x_regno == FRAME_POINTER_REGNUM)
1741 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1742 	return true;
1743 
1744       return endregno > x_regno && regno < END_REGNO (x);
1745 
1746     case SUBREG:
1747       /* If this is a SUBREG of a hard reg, we can see exactly which
1748 	 registers are being modified.  Otherwise, handle normally.  */
1749       if (REG_P (SUBREG_REG (x))
1750 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1751 	{
1752 	  unsigned int inner_regno = subreg_regno (x);
1753 	  unsigned int inner_endregno
1754 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1755 			     ? subreg_nregs (x) : 1);
1756 
1757 	  return endregno > inner_regno && regno < inner_endregno;
1758 	}
1759       break;
1760 
1761     case CLOBBER:
1762     case SET:
1763       if (&SET_DEST (x) != loc
1764 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1765 	     a pseudo but not for hard registers since we can
1766 	     treat each word individually.  */
1767 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1768 	       && loc != &SUBREG_REG (SET_DEST (x))
1769 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1770 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1771 	       && refers_to_regno_p (regno, endregno,
1772 				     SUBREG_REG (SET_DEST (x)), loc))
1773 	      || (!REG_P (SET_DEST (x))
1774 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1775 	return true;
1776 
1777       if (code == CLOBBER || loc == &SET_SRC (x))
1778 	return false;
1779       x = SET_SRC (x);
1780       goto repeat;
1781 
1782     default:
1783       break;
1784     }
1785 
1786   /* X does not match, so try its subexpressions.  */
1787 
1788   fmt = GET_RTX_FORMAT (code);
1789   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1790     {
1791       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1792 	{
1793 	  if (i == 0)
1794 	    {
1795 	      x = XEXP (x, 0);
1796 	      goto repeat;
1797 	    }
1798 	  else
1799 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1800 	      return true;
1801 	}
1802       else if (fmt[i] == 'E')
1803 	{
1804 	  int j;
1805 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1806 	    if (loc != &XVECEXP (x, i, j)
1807 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1808 	      return true;
1809 	}
1810     }
1811   return false;
1812 }
1813 
1814 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1815    we check if any register number in X conflicts with the relevant register
1816    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1817    contains a MEM (we don't bother checking for memory addresses that can't
1818    conflict because we expect this to be a rare case.  */
1819 
1820 int
1821 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1822 {
1823   unsigned int regno, endregno;
1824 
1825   /* If either argument is a constant, then modifying X cannot
1826      affect IN.  Here we look at IN, we can profitably combine
1827      CONSTANT_P (x) with the switch statement below.  */
1828   if (CONSTANT_P (in))
1829     return 0;
1830 
1831  recurse:
1832   switch (GET_CODE (x))
1833     {
1834     case CLOBBER:
1835     case STRICT_LOW_PART:
1836     case ZERO_EXTRACT:
1837     case SIGN_EXTRACT:
1838       /* Overly conservative.  */
1839       x = XEXP (x, 0);
1840       goto recurse;
1841 
1842     case SUBREG:
1843       regno = REGNO (SUBREG_REG (x));
1844       if (regno < FIRST_PSEUDO_REGISTER)
1845 	regno = subreg_regno (x);
1846       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1847 			  ? subreg_nregs (x) : 1);
1848       goto do_reg;
1849 
1850     case REG:
1851       regno = REGNO (x);
1852       endregno = END_REGNO (x);
1853     do_reg:
1854       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1855 
1856     case MEM:
1857       {
1858 	const char *fmt;
1859 	int i;
1860 
1861 	if (MEM_P (in))
1862 	  return 1;
1863 
1864 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1865 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1866 	  if (fmt[i] == 'e')
1867 	    {
1868 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1869 		return 1;
1870 	    }
1871 	  else if (fmt[i] == 'E')
1872 	    {
1873 	      int j;
1874 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1875 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1876 		  return 1;
1877 	    }
1878 
1879 	return 0;
1880       }
1881 
1882     case SCRATCH:
1883     case PC:
1884     case CC0:
1885       return reg_mentioned_p (x, in);
1886 
1887     case PARALLEL:
1888       {
1889 	int i;
1890 
1891 	/* If any register in here refers to it we return true.  */
1892 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1893 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1894 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1895 	    return 1;
1896 	return 0;
1897       }
1898 
1899     default:
1900       gcc_assert (CONSTANT_P (x));
1901       return 0;
1902     }
1903 }
1904 
1905 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1906    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1907    ignored by note_stores, but passed to FUN.
1908 
1909    FUN receives three arguments:
1910    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1911    2. the SET or CLOBBER rtx that does the store,
1912    3. the pointer DATA provided to note_stores.
1913 
1914   If the item being stored in or clobbered is a SUBREG of a hard register,
1915   the SUBREG will be passed.  */
1916 
1917 void
1918 note_pattern_stores (const_rtx x,
1919 		     void (*fun) (rtx, const_rtx, void *), void *data)
1920 {
1921   int i;
1922 
1923   if (GET_CODE (x) == COND_EXEC)
1924     x = COND_EXEC_CODE (x);
1925 
1926   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1927     {
1928       rtx dest = SET_DEST (x);
1929 
1930       while ((GET_CODE (dest) == SUBREG
1931 	      && (!REG_P (SUBREG_REG (dest))
1932 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1933 	     || GET_CODE (dest) == ZERO_EXTRACT
1934 	     || GET_CODE (dest) == STRICT_LOW_PART)
1935 	dest = XEXP (dest, 0);
1936 
1937       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1938 	 each of whose first operand is a register.  */
1939       if (GET_CODE (dest) == PARALLEL)
1940 	{
1941 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1942 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1943 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1944 	}
1945       else
1946 	(*fun) (dest, x, data);
1947     }
1948 
1949   else if (GET_CODE (x) == PARALLEL)
1950     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1951       note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1952 }
1953 
1954 /* Same, but for an instruction.  If the instruction is a call, include
1955    any CLOBBERs in its CALL_INSN_FUNCTION_USAGE.  */
1956 
1957 void
1958 note_stores (const rtx_insn *insn,
1959 	     void (*fun) (rtx, const_rtx, void *), void *data)
1960 {
1961   if (CALL_P (insn))
1962     for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1963 	 link; link = XEXP (link, 1))
1964       if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1965 	note_pattern_stores (XEXP (link, 0), fun, data);
1966   note_pattern_stores (PATTERN (insn), fun, data);
1967 }
1968 
1969 /* Like notes_stores, but call FUN for each expression that is being
1970    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
1971    FUN for each expression, not any interior subexpressions.  FUN receives a
1972    pointer to the expression and the DATA passed to this function.
1973 
1974    Note that this is not quite the same test as that done in reg_referenced_p
1975    since that considers something as being referenced if it is being
1976    partially set, while we do not.  */
1977 
1978 void
1979 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1980 {
1981   rtx body = *pbody;
1982   int i;
1983 
1984   switch (GET_CODE (body))
1985     {
1986     case COND_EXEC:
1987       (*fun) (&COND_EXEC_TEST (body), data);
1988       note_uses (&COND_EXEC_CODE (body), fun, data);
1989       return;
1990 
1991     case PARALLEL:
1992       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1993 	note_uses (&XVECEXP (body, 0, i), fun, data);
1994       return;
1995 
1996     case SEQUENCE:
1997       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1998 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1999       return;
2000 
2001     case USE:
2002       (*fun) (&XEXP (body, 0), data);
2003       return;
2004 
2005     case ASM_OPERANDS:
2006       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2007 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2008       return;
2009 
2010     case TRAP_IF:
2011       (*fun) (&TRAP_CONDITION (body), data);
2012       return;
2013 
2014     case PREFETCH:
2015       (*fun) (&XEXP (body, 0), data);
2016       return;
2017 
2018     case UNSPEC:
2019     case UNSPEC_VOLATILE:
2020       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2021 	(*fun) (&XVECEXP (body, 0, i), data);
2022       return;
2023 
2024     case CLOBBER:
2025       if (MEM_P (XEXP (body, 0)))
2026 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
2027       return;
2028 
2029     case SET:
2030       {
2031 	rtx dest = SET_DEST (body);
2032 
2033 	/* For sets we replace everything in source plus registers in memory
2034 	   expression in store and operands of a ZERO_EXTRACT.  */
2035 	(*fun) (&SET_SRC (body), data);
2036 
2037 	if (GET_CODE (dest) == ZERO_EXTRACT)
2038 	  {
2039 	    (*fun) (&XEXP (dest, 1), data);
2040 	    (*fun) (&XEXP (dest, 2), data);
2041 	  }
2042 
2043 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2044 	  dest = XEXP (dest, 0);
2045 
2046 	if (MEM_P (dest))
2047 	  (*fun) (&XEXP (dest, 0), data);
2048       }
2049       return;
2050 
2051     default:
2052       /* All the other possibilities never store.  */
2053       (*fun) (pbody, data);
2054       return;
2055     }
2056 }
2057 
2058 /* Return nonzero if X's old contents don't survive after INSN.
2059    This will be true if X is (cc0) or if X is a register and
2060    X dies in INSN or because INSN entirely sets X.
2061 
2062    "Entirely set" means set directly and not through a SUBREG, or
2063    ZERO_EXTRACT, so no trace of the old contents remains.
2064    Likewise, REG_INC does not count.
2065 
2066    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2067    but for this use that makes no difference, since regs don't overlap
2068    during their lifetimes.  Therefore, this function may be used
2069    at any time after deaths have been computed.
2070 
2071    If REG is a hard reg that occupies multiple machine registers, this
2072    function will only return 1 if each of those registers will be replaced
2073    by INSN.  */
2074 
2075 int
2076 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2077 {
2078   unsigned int regno, end_regno;
2079   unsigned int i;
2080 
2081   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
2082   if (GET_CODE (x) == CC0)
2083     return 1;
2084 
2085   gcc_assert (REG_P (x));
2086 
2087   regno = REGNO (x);
2088   end_regno = END_REGNO (x);
2089   for (i = regno; i < end_regno; i++)
2090     if (! dead_or_set_regno_p (insn, i))
2091       return 0;
2092 
2093   return 1;
2094 }
2095 
2096 /* Return TRUE iff DEST is a register or subreg of a register, is a
2097    complete rather than read-modify-write destination, and contains
2098    register TEST_REGNO.  */
2099 
2100 static bool
2101 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2102 {
2103   unsigned int regno, endregno;
2104 
2105   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2106     dest = SUBREG_REG (dest);
2107 
2108   if (!REG_P (dest))
2109     return false;
2110 
2111   regno = REGNO (dest);
2112   endregno = END_REGNO (dest);
2113   return (test_regno >= regno && test_regno < endregno);
2114 }
2115 
2116 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2117    any member matches the covers_regno_no_parallel_p criteria.  */
2118 
2119 static bool
2120 covers_regno_p (const_rtx dest, unsigned int test_regno)
2121 {
2122   if (GET_CODE (dest) == PARALLEL)
2123     {
2124       /* Some targets place small structures in registers for return
2125 	 values of functions, and those registers are wrapped in
2126 	 PARALLELs that we may see as the destination of a SET.  */
2127       int i;
2128 
2129       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2130 	{
2131 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2132 	  if (inner != NULL_RTX
2133 	      && covers_regno_no_parallel_p (inner, test_regno))
2134 	    return true;
2135 	}
2136 
2137       return false;
2138     }
2139   else
2140     return covers_regno_no_parallel_p (dest, test_regno);
2141 }
2142 
2143 /* Utility function for dead_or_set_p to check an individual register. */
2144 
2145 int
2146 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2147 {
2148   const_rtx pattern;
2149 
2150   /* See if there is a death note for something that includes TEST_REGNO.  */
2151   if (find_regno_note (insn, REG_DEAD, test_regno))
2152     return 1;
2153 
2154   if (CALL_P (insn)
2155       && find_regno_fusage (insn, CLOBBER, test_regno))
2156     return 1;
2157 
2158   pattern = PATTERN (insn);
2159 
2160   /* If a COND_EXEC is not executed, the value survives.  */
2161   if (GET_CODE (pattern) == COND_EXEC)
2162     return 0;
2163 
2164   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2165     return covers_regno_p (SET_DEST (pattern), test_regno);
2166   else if (GET_CODE (pattern) == PARALLEL)
2167     {
2168       int i;
2169 
2170       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2171 	{
2172 	  rtx body = XVECEXP (pattern, 0, i);
2173 
2174 	  if (GET_CODE (body) == COND_EXEC)
2175 	    body = COND_EXEC_CODE (body);
2176 
2177 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2178 	      && covers_regno_p (SET_DEST (body), test_regno))
2179 	    return 1;
2180 	}
2181     }
2182 
2183   return 0;
2184 }
2185 
2186 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2187    If DATUM is nonzero, look for one whose datum is DATUM.  */
2188 
2189 rtx
2190 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2191 {
2192   rtx link;
2193 
2194   gcc_checking_assert (insn);
2195 
2196   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2197   if (! INSN_P (insn))
2198     return 0;
2199   if (datum == 0)
2200     {
2201       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2202 	if (REG_NOTE_KIND (link) == kind)
2203 	  return link;
2204       return 0;
2205     }
2206 
2207   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2208     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2209       return link;
2210   return 0;
2211 }
2212 
2213 /* Return the reg-note of kind KIND in insn INSN which applies to register
2214    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2215    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2216    it might be the case that the note overlaps REGNO.  */
2217 
2218 rtx
2219 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2220 {
2221   rtx link;
2222 
2223   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2224   if (! INSN_P (insn))
2225     return 0;
2226 
2227   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2228     if (REG_NOTE_KIND (link) == kind
2229 	/* Verify that it is a register, so that scratch and MEM won't cause a
2230 	   problem here.  */
2231 	&& REG_P (XEXP (link, 0))
2232 	&& REGNO (XEXP (link, 0)) <= regno
2233 	&& END_REGNO (XEXP (link, 0)) > regno)
2234       return link;
2235   return 0;
2236 }
2237 
2238 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2239    has such a note.  */
2240 
2241 rtx
2242 find_reg_equal_equiv_note (const_rtx insn)
2243 {
2244   rtx link;
2245 
2246   if (!INSN_P (insn))
2247     return 0;
2248 
2249   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2250     if (REG_NOTE_KIND (link) == REG_EQUAL
2251 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2252       {
2253 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2254 	   insns that have multiple sets.  Checking single_set to
2255 	   make sure of this is not the proper check, as explained
2256 	   in the comment in set_unique_reg_note.
2257 
2258 	   This should be changed into an assert.  */
2259 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2260 	  return 0;
2261 	return link;
2262       }
2263   return NULL;
2264 }
2265 
2266 /* Check whether INSN is a single_set whose source is known to be
2267    equivalent to a constant.  Return that constant if so, otherwise
2268    return null.  */
2269 
2270 rtx
2271 find_constant_src (const rtx_insn *insn)
2272 {
2273   rtx note, set, x;
2274 
2275   set = single_set (insn);
2276   if (set)
2277     {
2278       x = avoid_constant_pool_reference (SET_SRC (set));
2279       if (CONSTANT_P (x))
2280 	return x;
2281     }
2282 
2283   note = find_reg_equal_equiv_note (insn);
2284   if (note && CONSTANT_P (XEXP (note, 0)))
2285     return XEXP (note, 0);
2286 
2287   return NULL_RTX;
2288 }
2289 
2290 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2291    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2292 
2293 int
2294 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2295 {
2296   /* If it's not a CALL_INSN, it can't possibly have a
2297      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2298   if (!CALL_P (insn))
2299     return 0;
2300 
2301   gcc_assert (datum);
2302 
2303   if (!REG_P (datum))
2304     {
2305       rtx link;
2306 
2307       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2308 	   link;
2309 	   link = XEXP (link, 1))
2310 	if (GET_CODE (XEXP (link, 0)) == code
2311 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2312 	  return 1;
2313     }
2314   else
2315     {
2316       unsigned int regno = REGNO (datum);
2317 
2318       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2319 	 to pseudo registers, so don't bother checking.  */
2320 
2321       if (regno < FIRST_PSEUDO_REGISTER)
2322 	{
2323 	  unsigned int end_regno = END_REGNO (datum);
2324 	  unsigned int i;
2325 
2326 	  for (i = regno; i < end_regno; i++)
2327 	    if (find_regno_fusage (insn, code, i))
2328 	      return 1;
2329 	}
2330     }
2331 
2332   return 0;
2333 }
2334 
2335 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2336    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2337 
2338 int
2339 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2340 {
2341   rtx link;
2342 
2343   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2344      to pseudo registers, so don't bother checking.  */
2345 
2346   if (regno >= FIRST_PSEUDO_REGISTER
2347       || !CALL_P (insn) )
2348     return 0;
2349 
2350   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2351     {
2352       rtx op, reg;
2353 
2354       if (GET_CODE (op = XEXP (link, 0)) == code
2355 	  && REG_P (reg = XEXP (op, 0))
2356 	  && REGNO (reg) <= regno
2357 	  && END_REGNO (reg) > regno)
2358 	return 1;
2359     }
2360 
2361   return 0;
2362 }
2363 
2364 
2365 /* Return true if KIND is an integer REG_NOTE.  */
2366 
2367 static bool
2368 int_reg_note_p (enum reg_note kind)
2369 {
2370   return kind == REG_BR_PROB;
2371 }
2372 
2373 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2374    stored as the pointer to the next register note.  */
2375 
2376 rtx
2377 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2378 {
2379   rtx note;
2380 
2381   gcc_checking_assert (!int_reg_note_p (kind));
2382   switch (kind)
2383     {
2384     case REG_CC_SETTER:
2385     case REG_CC_USER:
2386     case REG_LABEL_TARGET:
2387     case REG_LABEL_OPERAND:
2388     case REG_TM:
2389       /* These types of register notes use an INSN_LIST rather than an
2390 	 EXPR_LIST, so that copying is done right and dumps look
2391 	 better.  */
2392       note = alloc_INSN_LIST (datum, list);
2393       PUT_REG_NOTE_KIND (note, kind);
2394       break;
2395 
2396     default:
2397       note = alloc_EXPR_LIST (kind, datum, list);
2398       break;
2399     }
2400 
2401   return note;
2402 }
2403 
2404 /* Add register note with kind KIND and datum DATUM to INSN.  */
2405 
2406 void
2407 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2408 {
2409   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2410 }
2411 
2412 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2413 
2414 void
2415 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2416 {
2417   gcc_checking_assert (int_reg_note_p (kind));
2418   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2419 				       datum, REG_NOTES (insn));
2420 }
2421 
2422 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2423 
2424 void
2425 add_args_size_note (rtx_insn *insn, poly_int64 value)
2426 {
2427   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2428   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2429 }
2430 
2431 /* Add a register note like NOTE to INSN.  */
2432 
2433 void
2434 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2435 {
2436   if (GET_CODE (note) == INT_LIST)
2437     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2438   else
2439     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2440 }
2441 
2442 /* Duplicate NOTE and return the copy.  */
2443 rtx
2444 duplicate_reg_note (rtx note)
2445 {
2446   reg_note kind = REG_NOTE_KIND (note);
2447 
2448   if (GET_CODE (note) == INT_LIST)
2449     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2450   else if (GET_CODE (note) == EXPR_LIST)
2451     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2452   else
2453     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2454 }
2455 
2456 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2457 
2458 void
2459 remove_note (rtx_insn *insn, const_rtx note)
2460 {
2461   rtx link;
2462 
2463   if (note == NULL_RTX)
2464     return;
2465 
2466   if (REG_NOTES (insn) == note)
2467     REG_NOTES (insn) = XEXP (note, 1);
2468   else
2469     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2470       if (XEXP (link, 1) == note)
2471 	{
2472 	  XEXP (link, 1) = XEXP (note, 1);
2473 	  break;
2474 	}
2475 
2476   switch (REG_NOTE_KIND (note))
2477     {
2478     case REG_EQUAL:
2479     case REG_EQUIV:
2480       df_notes_rescan (insn);
2481       break;
2482     default:
2483       break;
2484     }
2485 }
2486 
2487 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2488    Return true if any note has been removed.  */
2489 
2490 bool
2491 remove_reg_equal_equiv_notes (rtx_insn *insn)
2492 {
2493   rtx *loc;
2494   bool ret = false;
2495 
2496   loc = &REG_NOTES (insn);
2497   while (*loc)
2498     {
2499       enum reg_note kind = REG_NOTE_KIND (*loc);
2500       if (kind == REG_EQUAL || kind == REG_EQUIV)
2501 	{
2502 	  *loc = XEXP (*loc, 1);
2503 	  ret = true;
2504 	}
2505       else
2506 	loc = &XEXP (*loc, 1);
2507     }
2508   return ret;
2509 }
2510 
2511 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2512 
2513 void
2514 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2515 {
2516   df_ref eq_use;
2517 
2518   if (!df)
2519     return;
2520 
2521   /* This loop is a little tricky.  We cannot just go down the chain because
2522      it is being modified by some actions in the loop.  So we just iterate
2523      over the head.  We plan to drain the list anyway.  */
2524   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2525     {
2526       rtx_insn *insn = DF_REF_INSN (eq_use);
2527       rtx note = find_reg_equal_equiv_note (insn);
2528 
2529       /* This assert is generally triggered when someone deletes a REG_EQUAL
2530 	 or REG_EQUIV note by hacking the list manually rather than calling
2531 	 remove_note.  */
2532       gcc_assert (note);
2533 
2534       remove_note (insn, note);
2535     }
2536 }
2537 
2538 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2539    return 1 if it is found.  A simple equality test is used to determine if
2540    NODE matches.  */
2541 
2542 bool
2543 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2544 {
2545   const_rtx x;
2546 
2547   for (x = listp; x; x = XEXP (x, 1))
2548     if (node == XEXP (x, 0))
2549       return true;
2550 
2551   return false;
2552 }
2553 
2554 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2555    remove that entry from the list if it is found.
2556 
2557    A simple equality test is used to determine if NODE matches.  */
2558 
2559 void
2560 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2561 {
2562   rtx_expr_list *temp = *listp;
2563   rtx_expr_list *prev = NULL;
2564 
2565   while (temp)
2566     {
2567       if (node == temp->element ())
2568 	{
2569 	  /* Splice the node out of the list.  */
2570 	  if (prev)
2571 	    XEXP (prev, 1) = temp->next ();
2572 	  else
2573 	    *listp = temp->next ();
2574 
2575 	  return;
2576 	}
2577 
2578       prev = temp;
2579       temp = temp->next ();
2580     }
2581 }
2582 
2583 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2584    remove that entry from the list if it is found.
2585 
2586    A simple equality test is used to determine if NODE matches.  */
2587 
2588 void
2589 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2590 {
2591   rtx_insn_list *temp = *listp;
2592   rtx_insn_list *prev = NULL;
2593 
2594   while (temp)
2595     {
2596       if (node == temp->insn ())
2597 	{
2598 	  /* Splice the node out of the list.  */
2599 	  if (prev)
2600 	    XEXP (prev, 1) = temp->next ();
2601 	  else
2602 	    *listp = temp->next ();
2603 
2604 	  return;
2605 	}
2606 
2607       prev = temp;
2608       temp = temp->next ();
2609     }
2610 }
2611 
2612 /* Nonzero if X contains any volatile instructions.  These are instructions
2613    which may cause unpredictable machine state instructions, and thus no
2614    instructions or register uses should be moved or combined across them.
2615    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2616 
2617 int
2618 volatile_insn_p (const_rtx x)
2619 {
2620   const RTX_CODE code = GET_CODE (x);
2621   switch (code)
2622     {
2623     case LABEL_REF:
2624     case SYMBOL_REF:
2625     case CONST:
2626     CASE_CONST_ANY:
2627     case CC0:
2628     case PC:
2629     case REG:
2630     case SCRATCH:
2631     case CLOBBER:
2632     case ADDR_VEC:
2633     case ADDR_DIFF_VEC:
2634     case CALL:
2635     case MEM:
2636       return 0;
2637 
2638     case UNSPEC_VOLATILE:
2639       return 1;
2640 
2641     case ASM_INPUT:
2642     case ASM_OPERANDS:
2643       if (MEM_VOLATILE_P (x))
2644 	return 1;
2645 
2646     default:
2647       break;
2648     }
2649 
2650   /* Recursively scan the operands of this expression.  */
2651 
2652   {
2653     const char *const fmt = GET_RTX_FORMAT (code);
2654     int i;
2655 
2656     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2657       {
2658 	if (fmt[i] == 'e')
2659 	  {
2660 	    if (volatile_insn_p (XEXP (x, i)))
2661 	      return 1;
2662 	  }
2663 	else if (fmt[i] == 'E')
2664 	  {
2665 	    int j;
2666 	    for (j = 0; j < XVECLEN (x, i); j++)
2667 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2668 		return 1;
2669 	  }
2670       }
2671   }
2672   return 0;
2673 }
2674 
2675 /* Nonzero if X contains any volatile memory references
2676    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
2677 
2678 int
2679 volatile_refs_p (const_rtx x)
2680 {
2681   const RTX_CODE code = GET_CODE (x);
2682   switch (code)
2683     {
2684     case LABEL_REF:
2685     case SYMBOL_REF:
2686     case CONST:
2687     CASE_CONST_ANY:
2688     case CC0:
2689     case PC:
2690     case REG:
2691     case SCRATCH:
2692     case CLOBBER:
2693     case ADDR_VEC:
2694     case ADDR_DIFF_VEC:
2695       return 0;
2696 
2697     case UNSPEC_VOLATILE:
2698       return 1;
2699 
2700     case MEM:
2701     case ASM_INPUT:
2702     case ASM_OPERANDS:
2703       if (MEM_VOLATILE_P (x))
2704 	return 1;
2705 
2706     default:
2707       break;
2708     }
2709 
2710   /* Recursively scan the operands of this expression.  */
2711 
2712   {
2713     const char *const fmt = GET_RTX_FORMAT (code);
2714     int i;
2715 
2716     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2717       {
2718 	if (fmt[i] == 'e')
2719 	  {
2720 	    if (volatile_refs_p (XEXP (x, i)))
2721 	      return 1;
2722 	  }
2723 	else if (fmt[i] == 'E')
2724 	  {
2725 	    int j;
2726 	    for (j = 0; j < XVECLEN (x, i); j++)
2727 	      if (volatile_refs_p (XVECEXP (x, i, j)))
2728 		return 1;
2729 	  }
2730       }
2731   }
2732   return 0;
2733 }
2734 
2735 /* Similar to above, except that it also rejects register pre- and post-
2736    incrementing.  */
2737 
2738 int
2739 side_effects_p (const_rtx x)
2740 {
2741   const RTX_CODE code = GET_CODE (x);
2742   switch (code)
2743     {
2744     case LABEL_REF:
2745     case SYMBOL_REF:
2746     case CONST:
2747     CASE_CONST_ANY:
2748     case CC0:
2749     case PC:
2750     case REG:
2751     case SCRATCH:
2752     case ADDR_VEC:
2753     case ADDR_DIFF_VEC:
2754     case VAR_LOCATION:
2755       return 0;
2756 
2757     case CLOBBER:
2758       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
2759 	 when some combination can't be done.  If we see one, don't think
2760 	 that we can simplify the expression.  */
2761       return (GET_MODE (x) != VOIDmode);
2762 
2763     case PRE_INC:
2764     case PRE_DEC:
2765     case POST_INC:
2766     case POST_DEC:
2767     case PRE_MODIFY:
2768     case POST_MODIFY:
2769     case CALL:
2770     case UNSPEC_VOLATILE:
2771       return 1;
2772 
2773     case MEM:
2774     case ASM_INPUT:
2775     case ASM_OPERANDS:
2776       if (MEM_VOLATILE_P (x))
2777 	return 1;
2778 
2779     default:
2780       break;
2781     }
2782 
2783   /* Recursively scan the operands of this expression.  */
2784 
2785   {
2786     const char *fmt = GET_RTX_FORMAT (code);
2787     int i;
2788 
2789     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2790       {
2791 	if (fmt[i] == 'e')
2792 	  {
2793 	    if (side_effects_p (XEXP (x, i)))
2794 	      return 1;
2795 	  }
2796 	else if (fmt[i] == 'E')
2797 	  {
2798 	    int j;
2799 	    for (j = 0; j < XVECLEN (x, i); j++)
2800 	      if (side_effects_p (XVECEXP (x, i, j)))
2801 		return 1;
2802 	  }
2803       }
2804   }
2805   return 0;
2806 }
2807 
2808 /* Return nonzero if evaluating rtx X might cause a trap.
2809    FLAGS controls how to consider MEMs.  A nonzero means the context
2810    of the access may have changed from the original, such that the
2811    address may have become invalid.  */
2812 
2813 int
2814 may_trap_p_1 (const_rtx x, unsigned flags)
2815 {
2816   int i;
2817   enum rtx_code code;
2818   const char *fmt;
2819 
2820   /* We make no distinction currently, but this function is part of
2821      the internal target-hooks ABI so we keep the parameter as
2822      "unsigned flags".  */
2823   bool code_changed = flags != 0;
2824 
2825   if (x == 0)
2826     return 0;
2827   code = GET_CODE (x);
2828   switch (code)
2829     {
2830       /* Handle these cases quickly.  */
2831     CASE_CONST_ANY:
2832     case SYMBOL_REF:
2833     case LABEL_REF:
2834     case CONST:
2835     case PC:
2836     case CC0:
2837     case REG:
2838     case SCRATCH:
2839       return 0;
2840 
2841     case UNSPEC:
2842       return targetm.unspec_may_trap_p (x, flags);
2843 
2844     case UNSPEC_VOLATILE:
2845     case ASM_INPUT:
2846     case TRAP_IF:
2847       return 1;
2848 
2849     case ASM_OPERANDS:
2850       return MEM_VOLATILE_P (x);
2851 
2852       /* Memory ref can trap unless it's a static var or a stack slot.  */
2853     case MEM:
2854       /* Recognize specific pattern of stack checking probes.  */
2855       if (flag_stack_check
2856 	  && MEM_VOLATILE_P (x)
2857 	  && XEXP (x, 0) == stack_pointer_rtx)
2858 	return 1;
2859       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2860 	     reference; moving it out of context such as when moving code
2861 	     when optimizing, might cause its address to become invalid.  */
2862 	  code_changed
2863 	  || !MEM_NOTRAP_P (x))
2864 	{
2865 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2866 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2867 					GET_MODE (x), code_changed);
2868 	}
2869 
2870       return 0;
2871 
2872       /* Division by a non-constant might trap.  */
2873     case DIV:
2874     case MOD:
2875     case UDIV:
2876     case UMOD:
2877       if (HONOR_SNANS (x))
2878 	return 1;
2879       if (FLOAT_MODE_P (GET_MODE (x)))
2880 	return flag_trapping_math;
2881       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2882 	return 1;
2883       if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
2884 	{
2885 	  /* For CONST_VECTOR, return 1 if any element is or might be zero.  */
2886 	  unsigned int n_elts;
2887 	  rtx op = XEXP (x, 1);
2888 	  if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
2889 	    {
2890 	      if (!CONST_VECTOR_DUPLICATE_P (op))
2891 		return 1;
2892 	      for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
2893 		if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
2894 		  return 1;
2895 	    }
2896 	  else
2897 	    for (unsigned i = 0; i < n_elts; i++)
2898 	      if (CONST_VECTOR_ELT (op, i) == const0_rtx)
2899 		return 1;
2900 	}
2901       break;
2902 
2903     case EXPR_LIST:
2904       /* An EXPR_LIST is used to represent a function call.  This
2905 	 certainly may trap.  */
2906       return 1;
2907 
2908     case GE:
2909     case GT:
2910     case LE:
2911     case LT:
2912     case LTGT:
2913     case COMPARE:
2914       /* Some floating point comparisons may trap.  */
2915       if (!flag_trapping_math)
2916 	break;
2917       /* ??? There is no machine independent way to check for tests that trap
2918 	 when COMPARE is used, though many targets do make this distinction.
2919 	 For instance, sparc uses CCFPE for compares which generate exceptions
2920 	 and CCFP for compares which do not generate exceptions.  */
2921       if (HONOR_NANS (x))
2922 	return 1;
2923       /* But often the compare has some CC mode, so check operand
2924 	 modes as well.  */
2925       if (HONOR_NANS (XEXP (x, 0))
2926 	  || HONOR_NANS (XEXP (x, 1)))
2927 	return 1;
2928       break;
2929 
2930     case EQ:
2931     case NE:
2932       if (HONOR_SNANS (x))
2933 	return 1;
2934       /* Often comparison is CC mode, so check operand modes.  */
2935       if (HONOR_SNANS (XEXP (x, 0))
2936 	  || HONOR_SNANS (XEXP (x, 1)))
2937 	return 1;
2938       break;
2939 
2940     case FIX:
2941       /* Conversion of floating point might trap.  */
2942       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2943 	return 1;
2944       break;
2945 
2946     case NEG:
2947     case ABS:
2948     case SUBREG:
2949     case VEC_MERGE:
2950     case VEC_SELECT:
2951     case VEC_CONCAT:
2952     case VEC_DUPLICATE:
2953       /* These operations don't trap even with floating point.  */
2954       break;
2955 
2956     default:
2957       /* Any floating arithmetic may trap.  */
2958       if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2959 	return 1;
2960     }
2961 
2962   fmt = GET_RTX_FORMAT (code);
2963   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2964     {
2965       if (fmt[i] == 'e')
2966 	{
2967 	  if (may_trap_p_1 (XEXP (x, i), flags))
2968 	    return 1;
2969 	}
2970       else if (fmt[i] == 'E')
2971 	{
2972 	  int j;
2973 	  for (j = 0; j < XVECLEN (x, i); j++)
2974 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2975 	      return 1;
2976 	}
2977     }
2978   return 0;
2979 }
2980 
2981 /* Return nonzero if evaluating rtx X might cause a trap.  */
2982 
2983 int
2984 may_trap_p (const_rtx x)
2985 {
2986   return may_trap_p_1 (x, 0);
2987 }
2988 
2989 /* Same as above, but additionally return nonzero if evaluating rtx X might
2990    cause a fault.  We define a fault for the purpose of this function as a
2991    erroneous execution condition that cannot be encountered during the normal
2992    execution of a valid program; the typical example is an unaligned memory
2993    access on a strict alignment machine.  The compiler guarantees that it
2994    doesn't generate code that will fault from a valid program, but this
2995    guarantee doesn't mean anything for individual instructions.  Consider
2996    the following example:
2997 
2998       struct S { int d; union { char *cp; int *ip; }; };
2999 
3000       int foo(struct S *s)
3001       {
3002 	if (s->d == 1)
3003 	  return *s->ip;
3004 	else
3005 	  return *s->cp;
3006       }
3007 
3008    on a strict alignment machine.  In a valid program, foo will never be
3009    invoked on a structure for which d is equal to 1 and the underlying
3010    unique field of the union not aligned on a 4-byte boundary, but the
3011    expression *s->ip might cause a fault if considered individually.
3012 
3013    At the RTL level, potentially problematic expressions will almost always
3014    verify may_trap_p; for example, the above dereference can be emitted as
3015    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3016    However, suppose that foo is inlined in a caller that causes s->cp to
3017    point to a local character variable and guarantees that s->d is not set
3018    to 1; foo may have been effectively translated into pseudo-RTL as:
3019 
3020       if ((reg:SI) == 1)
3021 	(set (reg:SI) (mem:SI (%fp - 7)))
3022       else
3023 	(set (reg:QI) (mem:QI (%fp - 7)))
3024 
3025    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3026    memory reference to a stack slot, but it will certainly cause a fault
3027    on a strict alignment machine.  */
3028 
3029 int
3030 may_trap_or_fault_p (const_rtx x)
3031 {
3032   return may_trap_p_1 (x, 1);
3033 }
3034 
3035 /* Replace any occurrence of FROM in X with TO.  The function does
3036    not enter into CONST_DOUBLE for the replace.
3037 
3038    Note that copying is not done so X must not be shared unless all copies
3039    are to be modified.
3040 
3041    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3042    those pointer-equal ones.  */
3043 
3044 rtx
3045 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3046 {
3047   int i, j;
3048   const char *fmt;
3049 
3050   if (x == from)
3051     return to;
3052 
3053   /* Allow this function to make replacements in EXPR_LISTs.  */
3054   if (x == 0)
3055     return 0;
3056 
3057   if (all_regs
3058       && REG_P (x)
3059       && REG_P (from)
3060       && REGNO (x) == REGNO (from))
3061     {
3062       gcc_assert (GET_MODE (x) == GET_MODE (from));
3063       return to;
3064     }
3065   else if (GET_CODE (x) == SUBREG)
3066     {
3067       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3068 
3069       if (CONST_SCALAR_INT_P (new_rtx))
3070 	{
3071 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3072 			       GET_MODE (SUBREG_REG (x)),
3073 			       SUBREG_BYTE (x));
3074 	  gcc_assert (x);
3075 	}
3076       else
3077 	SUBREG_REG (x) = new_rtx;
3078 
3079       return x;
3080     }
3081   else if (GET_CODE (x) == ZERO_EXTEND)
3082     {
3083       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3084 
3085       if (CONST_SCALAR_INT_P (new_rtx))
3086 	{
3087 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3088 					new_rtx, GET_MODE (XEXP (x, 0)));
3089 	  gcc_assert (x);
3090 	}
3091       else
3092 	XEXP (x, 0) = new_rtx;
3093 
3094       return x;
3095     }
3096 
3097   fmt = GET_RTX_FORMAT (GET_CODE (x));
3098   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3099     {
3100       if (fmt[i] == 'e')
3101 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3102       else if (fmt[i] == 'E')
3103 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3104 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3105 					   from, to, all_regs);
3106     }
3107 
3108   return x;
3109 }
3110 
3111 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3112    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3113 
3114 void
3115 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3116 {
3117   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3118   rtx x = *loc;
3119   if (JUMP_TABLE_DATA_P (x))
3120     {
3121       x = PATTERN (x);
3122       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3123       int len = GET_NUM_ELEM (vec);
3124       for (int i = 0; i < len; ++i)
3125 	{
3126 	  rtx ref = RTVEC_ELT (vec, i);
3127 	  if (XEXP (ref, 0) == old_label)
3128 	    {
3129 	      XEXP (ref, 0) = new_label;
3130 	      if (update_label_nuses)
3131 		{
3132 		  ++LABEL_NUSES (new_label);
3133 		  --LABEL_NUSES (old_label);
3134 		}
3135 	    }
3136 	}
3137       return;
3138     }
3139 
3140   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3141      field.  This is not handled by the iterator because it doesn't
3142      handle unprinted ('0') fields.  */
3143   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3144     JUMP_LABEL (x) = new_label;
3145 
3146   subrtx_ptr_iterator::array_type array;
3147   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3148     {
3149       rtx *loc = *iter;
3150       if (rtx x = *loc)
3151 	{
3152 	  if (GET_CODE (x) == SYMBOL_REF
3153 	      && CONSTANT_POOL_ADDRESS_P (x))
3154 	    {
3155 	      rtx c = get_pool_constant (x);
3156 	      if (rtx_referenced_p (old_label, c))
3157 		{
3158 		  /* Create a copy of constant C; replace the label inside
3159 		     but do not update LABEL_NUSES because uses in constant pool
3160 		     are not counted.  */
3161 		  rtx new_c = copy_rtx (c);
3162 		  replace_label (&new_c, old_label, new_label, false);
3163 
3164 		  /* Add the new constant NEW_C to constant pool and replace
3165 		     the old reference to constant by new reference.  */
3166 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3167 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3168 		}
3169 	    }
3170 
3171 	  if ((GET_CODE (x) == LABEL_REF
3172 	       || GET_CODE (x) == INSN_LIST)
3173 	      && XEXP (x, 0) == old_label)
3174 	    {
3175 	      XEXP (x, 0) = new_label;
3176 	      if (update_label_nuses)
3177 		{
3178 		  ++LABEL_NUSES (new_label);
3179 		  --LABEL_NUSES (old_label);
3180 		}
3181 	    }
3182 	}
3183     }
3184 }
3185 
3186 void
3187 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3188 		       rtx_insn *new_label, bool update_label_nuses)
3189 {
3190   rtx insn_as_rtx = insn;
3191   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3192   gcc_checking_assert (insn_as_rtx == insn);
3193 }
3194 
3195 /* Return true if X is referenced in BODY.  */
3196 
3197 bool
3198 rtx_referenced_p (const_rtx x, const_rtx body)
3199 {
3200   subrtx_iterator::array_type array;
3201   FOR_EACH_SUBRTX (iter, array, body, ALL)
3202     if (const_rtx y = *iter)
3203       {
3204 	/* Check if a label_ref Y refers to label X.  */
3205 	if (GET_CODE (y) == LABEL_REF
3206 	    && LABEL_P (x)
3207 	    && label_ref_label (y) == x)
3208 	  return true;
3209 
3210 	if (rtx_equal_p (x, y))
3211 	  return true;
3212 
3213 	/* If Y is a reference to pool constant traverse the constant.  */
3214 	if (GET_CODE (y) == SYMBOL_REF
3215 	    && CONSTANT_POOL_ADDRESS_P (y))
3216 	  iter.substitute (get_pool_constant (y));
3217       }
3218   return false;
3219 }
3220 
3221 /* If INSN is a tablejump return true and store the label (before jump table) to
3222    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3223 
3224 bool
3225 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3226 	     rtx_jump_table_data **tablep)
3227 {
3228   if (!JUMP_P (insn))
3229     return false;
3230 
3231   rtx target = JUMP_LABEL (insn);
3232   if (target == NULL_RTX || ANY_RETURN_P (target))
3233     return false;
3234 
3235   rtx_insn *label = as_a<rtx_insn *> (target);
3236   rtx_insn *table = next_insn (label);
3237   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3238     return false;
3239 
3240   if (labelp)
3241     *labelp = label;
3242   if (tablep)
3243     *tablep = as_a <rtx_jump_table_data *> (table);
3244   return true;
3245 }
3246 
3247 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3248    CASESI.  Return the insn pattern if so, NULL_RTX otherwise.  */
3249 
3250 rtx
3251 tablejump_casesi_pattern (const rtx_insn *insn)
3252 {
3253   rtx tmp;
3254 
3255   if ((tmp = single_set (insn)) != NULL
3256       && SET_DEST (tmp) == pc_rtx
3257       && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3258       && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3259     return tmp;
3260 
3261   return NULL_RTX;
3262 }
3263 
3264 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3265    constant that is not in the constant pool and not in the condition
3266    of an IF_THEN_ELSE.  */
3267 
3268 static int
3269 computed_jump_p_1 (const_rtx x)
3270 {
3271   const enum rtx_code code = GET_CODE (x);
3272   int i, j;
3273   const char *fmt;
3274 
3275   switch (code)
3276     {
3277     case LABEL_REF:
3278     case PC:
3279       return 0;
3280 
3281     case CONST:
3282     CASE_CONST_ANY:
3283     case SYMBOL_REF:
3284     case REG:
3285       return 1;
3286 
3287     case MEM:
3288       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3289 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3290 
3291     case IF_THEN_ELSE:
3292       return (computed_jump_p_1 (XEXP (x, 1))
3293 	      || computed_jump_p_1 (XEXP (x, 2)));
3294 
3295     default:
3296       break;
3297     }
3298 
3299   fmt = GET_RTX_FORMAT (code);
3300   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3301     {
3302       if (fmt[i] == 'e'
3303 	  && computed_jump_p_1 (XEXP (x, i)))
3304 	return 1;
3305 
3306       else if (fmt[i] == 'E')
3307 	for (j = 0; j < XVECLEN (x, i); j++)
3308 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3309 	    return 1;
3310     }
3311 
3312   return 0;
3313 }
3314 
3315 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3316 
3317    Tablejumps and casesi insns are not considered indirect jumps;
3318    we can recognize them by a (use (label_ref)).  */
3319 
3320 int
3321 computed_jump_p (const rtx_insn *insn)
3322 {
3323   int i;
3324   if (JUMP_P (insn))
3325     {
3326       rtx pat = PATTERN (insn);
3327 
3328       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3329       if (JUMP_LABEL (insn) != NULL)
3330 	return 0;
3331 
3332       if (GET_CODE (pat) == PARALLEL)
3333 	{
3334 	  int len = XVECLEN (pat, 0);
3335 	  int has_use_labelref = 0;
3336 
3337 	  for (i = len - 1; i >= 0; i--)
3338 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3339 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3340 		    == LABEL_REF))
3341 	      {
3342 	        has_use_labelref = 1;
3343 	        break;
3344 	      }
3345 
3346 	  if (! has_use_labelref)
3347 	    for (i = len - 1; i >= 0; i--)
3348 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3349 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3350 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3351 		return 1;
3352 	}
3353       else if (GET_CODE (pat) == SET
3354 	       && SET_DEST (pat) == pc_rtx
3355 	       && computed_jump_p_1 (SET_SRC (pat)))
3356 	return 1;
3357     }
3358   return 0;
3359 }
3360 
3361 
3362 
3363 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3364    the equivalent add insn and pass the result to FN, using DATA as the
3365    final argument.  */
3366 
3367 static int
3368 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3369 {
3370   rtx x = XEXP (mem, 0);
3371   switch (GET_CODE (x))
3372     {
3373     case PRE_INC:
3374     case POST_INC:
3375       {
3376 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3377 	rtx r1 = XEXP (x, 0);
3378 	rtx c = gen_int_mode (size, GET_MODE (r1));
3379 	return fn (mem, x, r1, r1, c, data);
3380       }
3381 
3382     case PRE_DEC:
3383     case POST_DEC:
3384       {
3385 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3386 	rtx r1 = XEXP (x, 0);
3387 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3388 	return fn (mem, x, r1, r1, c, data);
3389       }
3390 
3391     case PRE_MODIFY:
3392     case POST_MODIFY:
3393       {
3394 	rtx r1 = XEXP (x, 0);
3395 	rtx add = XEXP (x, 1);
3396 	return fn (mem, x, r1, add, NULL, data);
3397       }
3398 
3399     default:
3400       gcc_unreachable ();
3401     }
3402 }
3403 
3404 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3405    For each such autoinc operation found, call FN, passing it
3406    the innermost enclosing MEM, the operation itself, the RTX modified
3407    by the operation, two RTXs (the second may be NULL) that, once
3408    added, represent the value to be held by the modified RTX
3409    afterwards, and DATA.  FN is to return 0 to continue the
3410    traversal or any other value to have it returned to the caller of
3411    for_each_inc_dec.  */
3412 
3413 int
3414 for_each_inc_dec (rtx x,
3415 		  for_each_inc_dec_fn fn,
3416 		  void *data)
3417 {
3418   subrtx_var_iterator::array_type array;
3419   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3420     {
3421       rtx mem = *iter;
3422       if (mem
3423 	  && MEM_P (mem)
3424 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3425 	{
3426 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3427 	  if (res != 0)
3428 	    return res;
3429 	  iter.skip_subrtxes ();
3430 	}
3431     }
3432   return 0;
3433 }
3434 
3435 
3436 /* Searches X for any reference to REGNO, returning the rtx of the
3437    reference found if any.  Otherwise, returns NULL_RTX.  */
3438 
3439 rtx
3440 regno_use_in (unsigned int regno, rtx x)
3441 {
3442   const char *fmt;
3443   int i, j;
3444   rtx tem;
3445 
3446   if (REG_P (x) && REGNO (x) == regno)
3447     return x;
3448 
3449   fmt = GET_RTX_FORMAT (GET_CODE (x));
3450   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3451     {
3452       if (fmt[i] == 'e')
3453 	{
3454 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3455 	    return tem;
3456 	}
3457       else if (fmt[i] == 'E')
3458 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3459 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3460 	    return tem;
3461     }
3462 
3463   return NULL_RTX;
3464 }
3465 
3466 /* Return a value indicating whether OP, an operand of a commutative
3467    operation, is preferred as the first or second operand.  The more
3468    positive the value, the stronger the preference for being the first
3469    operand.  */
3470 
3471 int
3472 commutative_operand_precedence (rtx op)
3473 {
3474   enum rtx_code code = GET_CODE (op);
3475 
3476   /* Constants always become the second operand.  Prefer "nice" constants.  */
3477   if (code == CONST_INT)
3478     return -10;
3479   if (code == CONST_WIDE_INT)
3480     return -9;
3481   if (code == CONST_POLY_INT)
3482     return -8;
3483   if (code == CONST_DOUBLE)
3484     return -8;
3485   if (code == CONST_FIXED)
3486     return -8;
3487   op = avoid_constant_pool_reference (op);
3488   code = GET_CODE (op);
3489 
3490   switch (GET_RTX_CLASS (code))
3491     {
3492     case RTX_CONST_OBJ:
3493       if (code == CONST_INT)
3494 	return -7;
3495       if (code == CONST_WIDE_INT)
3496 	return -6;
3497       if (code == CONST_POLY_INT)
3498 	return -5;
3499       if (code == CONST_DOUBLE)
3500 	return -5;
3501       if (code == CONST_FIXED)
3502 	return -5;
3503       return -4;
3504 
3505     case RTX_EXTRA:
3506       /* SUBREGs of objects should come second.  */
3507       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3508         return -3;
3509       return 0;
3510 
3511     case RTX_OBJ:
3512       /* Complex expressions should be the first, so decrease priority
3513          of objects.  Prefer pointer objects over non pointer objects.  */
3514       if ((REG_P (op) && REG_POINTER (op))
3515 	  || (MEM_P (op) && MEM_POINTER (op)))
3516 	return -1;
3517       return -2;
3518 
3519     case RTX_COMM_ARITH:
3520       /* Prefer operands that are themselves commutative to be first.
3521          This helps to make things linear.  In particular,
3522          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3523       return 4;
3524 
3525     case RTX_BIN_ARITH:
3526       /* If only one operand is a binary expression, it will be the first
3527          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3528          is canonical, although it will usually be further simplified.  */
3529       return 2;
3530 
3531     case RTX_UNARY:
3532       /* Then prefer NEG and NOT.  */
3533       if (code == NEG || code == NOT)
3534         return 1;
3535       /* FALLTHRU */
3536 
3537     default:
3538       return 0;
3539     }
3540 }
3541 
3542 /* Return 1 iff it is necessary to swap operands of commutative operation
3543    in order to canonicalize expression.  */
3544 
3545 bool
3546 swap_commutative_operands_p (rtx x, rtx y)
3547 {
3548   return (commutative_operand_precedence (x)
3549 	  < commutative_operand_precedence (y));
3550 }
3551 
3552 /* Return 1 if X is an autoincrement side effect and the register is
3553    not the stack pointer.  */
3554 int
3555 auto_inc_p (const_rtx x)
3556 {
3557   switch (GET_CODE (x))
3558     {
3559     case PRE_INC:
3560     case POST_INC:
3561     case PRE_DEC:
3562     case POST_DEC:
3563     case PRE_MODIFY:
3564     case POST_MODIFY:
3565       /* There are no REG_INC notes for SP.  */
3566       if (XEXP (x, 0) != stack_pointer_rtx)
3567 	return 1;
3568     default:
3569       break;
3570     }
3571   return 0;
3572 }
3573 
3574 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3575 int
3576 loc_mentioned_in_p (rtx *loc, const_rtx in)
3577 {
3578   enum rtx_code code;
3579   const char *fmt;
3580   int i, j;
3581 
3582   if (!in)
3583     return 0;
3584 
3585   code = GET_CODE (in);
3586   fmt = GET_RTX_FORMAT (code);
3587   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3588     {
3589       if (fmt[i] == 'e')
3590 	{
3591 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3592 	    return 1;
3593 	}
3594       else if (fmt[i] == 'E')
3595 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3596 	  if (loc == &XVECEXP (in, i, j)
3597 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3598 	    return 1;
3599     }
3600   return 0;
3601 }
3602 
3603 /* Reinterpret a subreg as a bit extraction from an integer and return
3604    the position of the least significant bit of the extracted value.
3605    In other words, if the extraction were performed as a shift right
3606    and mask, return the number of bits to shift right.
3607 
3608    The outer value of the subreg has OUTER_BYTES bytes and starts at
3609    byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes.  */
3610 
3611 poly_uint64
3612 subreg_size_lsb (poly_uint64 outer_bytes,
3613 		 poly_uint64 inner_bytes,
3614 		 poly_uint64 subreg_byte)
3615 {
3616   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3617 
3618   /* A paradoxical subreg begins at bit position 0.  */
3619   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3620   if (maybe_gt (outer_bytes, inner_bytes))
3621     {
3622       gcc_checking_assert (known_eq (subreg_byte, 0U));
3623       return 0;
3624     }
3625 
3626   subreg_end = subreg_byte + outer_bytes;
3627   trailing_bytes = inner_bytes - subreg_end;
3628   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3629     byte_pos = trailing_bytes;
3630   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3631     byte_pos = subreg_byte;
3632   else
3633     {
3634       /* When bytes and words have opposite endianness, we must be able
3635 	 to split offsets into words and bytes at compile time.  */
3636       poly_uint64 leading_word_part
3637 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3638       poly_uint64 trailing_word_part
3639 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3640       /* If the subreg crosses a word boundary ensure that
3641 	 it also begins and ends on a word boundary.  */
3642       gcc_assert (known_le (subreg_end - leading_word_part,
3643 			    (unsigned int) UNITS_PER_WORD)
3644 		  || (known_eq (leading_word_part, subreg_byte)
3645 		      && known_eq (trailing_word_part, trailing_bytes)));
3646       if (WORDS_BIG_ENDIAN)
3647 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3648       else
3649 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3650     }
3651 
3652   return byte_pos * BITS_PER_UNIT;
3653 }
3654 
3655 /* Given a subreg X, return the bit offset where the subreg begins
3656    (counting from the least significant bit of the reg).  */
3657 
3658 poly_uint64
3659 subreg_lsb (const_rtx x)
3660 {
3661   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3662 		       SUBREG_BYTE (x));
3663 }
3664 
3665 /* Return the subreg byte offset for a subreg whose outer value has
3666    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3667    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3668    lsb of the inner value.  This is the inverse of the calculation
3669    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3670 
3671 poly_uint64
3672 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3673 			     poly_uint64 lsb_shift)
3674 {
3675   /* A paradoxical subreg begins at bit position 0.  */
3676   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3677   if (maybe_gt (outer_bytes, inner_bytes))
3678     {
3679       gcc_checking_assert (known_eq (lsb_shift, 0U));
3680       return 0;
3681     }
3682 
3683   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3684   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3685   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3686     return upper_bytes;
3687   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3688     return lower_bytes;
3689   else
3690     {
3691       /* When bytes and words have opposite endianness, we must be able
3692 	 to split offsets into words and bytes at compile time.  */
3693       poly_uint64 lower_word_part = force_align_down (lower_bytes,
3694 						      UNITS_PER_WORD);
3695       poly_uint64 upper_word_part = force_align_down (upper_bytes,
3696 						      UNITS_PER_WORD);
3697       if (WORDS_BIG_ENDIAN)
3698 	return upper_word_part + (lower_bytes - lower_word_part);
3699       else
3700 	return lower_word_part + (upper_bytes - upper_word_part);
3701     }
3702 }
3703 
3704 /* Fill in information about a subreg of a hard register.
3705    xregno - A regno of an inner hard subreg_reg (or what will become one).
3706    xmode  - The mode of xregno.
3707    offset - The byte offset.
3708    ymode  - The mode of a top level SUBREG (or what may become one).
3709    info   - Pointer to structure to fill in.
3710 
3711    Rather than considering one particular inner register (and thus one
3712    particular "outer" register) in isolation, this function really uses
3713    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
3714    function does not check whether adding INFO->offset to XREGNO gives
3715    a valid hard register; even if INFO->offset + XREGNO is out of range,
3716    there might be another register of the same type that is in range.
3717    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3718    the new register, since that can depend on things like whether the final
3719    register number is even or odd.  Callers that want to check whether
3720    this particular subreg can be replaced by a simple (reg ...) should
3721    use simplify_subreg_regno.  */
3722 
3723 void
3724 subreg_get_info (unsigned int xregno, machine_mode xmode,
3725 		 poly_uint64 offset, machine_mode ymode,
3726 		 struct subreg_info *info)
3727 {
3728   unsigned int nregs_xmode, nregs_ymode;
3729 
3730   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3731 
3732   poly_uint64 xsize = GET_MODE_SIZE (xmode);
3733   poly_uint64 ysize = GET_MODE_SIZE (ymode);
3734 
3735   bool rknown = false;
3736 
3737   /* If the register representation of a non-scalar mode has holes in it,
3738      we expect the scalar units to be concatenated together, with the holes
3739      distributed evenly among the scalar units.  Each scalar unit must occupy
3740      at least one register.  */
3741   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3742     {
3743       /* As a consequence, we must be dealing with a constant number of
3744 	 scalars, and thus a constant offset and number of units.  */
3745       HOST_WIDE_INT coffset = offset.to_constant ();
3746       HOST_WIDE_INT cysize = ysize.to_constant ();
3747       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3748       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3749       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3750       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3751       gcc_assert (nregs_xmode
3752 		  == (nunits
3753 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3754       gcc_assert (hard_regno_nregs (xregno, xmode)
3755 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
3756 
3757       /* You can only ask for a SUBREG of a value with holes in the middle
3758 	 if you don't cross the holes.  (Such a SUBREG should be done by
3759 	 picking a different register class, or doing it in memory if
3760 	 necessary.)  An example of a value with holes is XCmode on 32-bit
3761 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3762 	 3 for each part, but in memory it's two 128-bit parts.
3763 	 Padding is assumed to be at the end (not necessarily the 'high part')
3764 	 of each unit.  */
3765       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3766 	  && (coffset / GET_MODE_SIZE (xmode_unit)
3767 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3768 	{
3769 	  info->representable_p = false;
3770 	  rknown = true;
3771 	}
3772     }
3773   else
3774     nregs_xmode = hard_regno_nregs (xregno, xmode);
3775 
3776   nregs_ymode = hard_regno_nregs (xregno, ymode);
3777 
3778   /* Subreg sizes must be ordered, so that we can tell whether they are
3779      partial, paradoxical or complete.  */
3780   gcc_checking_assert (ordered_p (xsize, ysize));
3781 
3782   /* Paradoxical subregs are otherwise valid.  */
3783   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3784     {
3785       info->representable_p = true;
3786       /* If this is a big endian paradoxical subreg, which uses more
3787 	 actual hard registers than the original register, we must
3788 	 return a negative offset so that we find the proper highpart
3789 	 of the register.
3790 
3791 	 We assume that the ordering of registers within a multi-register
3792 	 value has a consistent endianness: if bytes and register words
3793 	 have different endianness, the hard registers that make up a
3794 	 multi-register value must be at least word-sized.  */
3795       if (REG_WORDS_BIG_ENDIAN)
3796 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
3797       else
3798 	info->offset = 0;
3799       info->nregs = nregs_ymode;
3800       return;
3801     }
3802 
3803   /* If registers store different numbers of bits in the different
3804      modes, we cannot generally form this subreg.  */
3805   poly_uint64 regsize_xmode, regsize_ymode;
3806   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3807       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3808       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3809       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3810     {
3811       if (!rknown
3812 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3813 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3814 	{
3815 	  info->representable_p = false;
3816 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3817 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3818 	    /* Checked by validate_subreg.  We must know at compile time
3819 	       which inner registers are being accessed.  */
3820 	    gcc_unreachable ();
3821 	  return;
3822 	}
3823       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3824 	 would go outside of XMODE.  */
3825       if (!rknown && maybe_gt (ysize + offset, xsize))
3826 	{
3827 	  info->representable_p = false;
3828 	  info->nregs = nregs_ymode;
3829 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3830 	    /* Checked by validate_subreg.  We must know at compile time
3831 	       which inner registers are being accessed.  */
3832 	    gcc_unreachable ();
3833 	  return;
3834 	}
3835       /* Quick exit for the simple and common case of extracting whole
3836 	 subregisters from a multiregister value.  */
3837       /* ??? It would be better to integrate this into the code below,
3838 	 if we can generalize the concept enough and figure out how
3839 	 odd-sized modes can coexist with the other weird cases we support.  */
3840       HOST_WIDE_INT count;
3841       if (!rknown
3842 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3843 	  && known_eq (regsize_xmode, regsize_ymode)
3844 	  && constant_multiple_p (offset, regsize_ymode, &count))
3845 	{
3846 	  info->representable_p = true;
3847 	  info->nregs = nregs_ymode;
3848 	  info->offset = count;
3849 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3850 	  return;
3851 	}
3852     }
3853 
3854   /* Lowpart subregs are otherwise valid.  */
3855   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3856     {
3857       info->representable_p = true;
3858       rknown = true;
3859 
3860       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3861 	{
3862 	  info->offset = 0;
3863 	  info->nregs = nregs_ymode;
3864 	  return;
3865 	}
3866     }
3867 
3868   /* Set NUM_BLOCKS to the number of independently-representable YMODE
3869      values there are in (reg:XMODE XREGNO).  We can view the register
3870      as consisting of this number of independent "blocks", where each
3871      block occupies NREGS_YMODE registers and contains exactly one
3872      representable YMODE value.  */
3873   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3874   unsigned int num_blocks = nregs_xmode / nregs_ymode;
3875 
3876   /* Calculate the number of bytes in each block.  This must always
3877      be exact, otherwise we don't know how to verify the constraint.
3878      These conditions may be relaxed but subreg_regno_offset would
3879      need to be redesigned.  */
3880   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3881 
3882   /* Get the number of the first block that contains the subreg and the byte
3883      offset of the subreg from the start of that block.  */
3884   unsigned int block_number;
3885   poly_uint64 subblock_offset;
3886   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3887 			&subblock_offset))
3888     /* Checked by validate_subreg.  We must know at compile time which
3889        inner registers are being accessed.  */
3890     gcc_unreachable ();
3891 
3892   if (!rknown)
3893     {
3894       /* Only the lowpart of each block is representable.  */
3895       info->representable_p
3896 	= known_eq (subblock_offset,
3897 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
3898       rknown = true;
3899     }
3900 
3901   /* We assume that the ordering of registers within a multi-register
3902      value has a consistent endianness: if bytes and register words
3903      have different endianness, the hard registers that make up a
3904      multi-register value must be at least word-sized.  */
3905   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3906     /* The block number we calculated above followed memory endianness.
3907        Convert it to register endianness by counting back from the end.
3908        (Note that, because of the assumption above, each block must be
3909        at least word-sized.)  */
3910     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3911   else
3912     info->offset = block_number * nregs_ymode;
3913   info->nregs = nregs_ymode;
3914 }
3915 
3916 /* This function returns the regno offset of a subreg expression.
3917    xregno - A regno of an inner hard subreg_reg (or what will become one).
3918    xmode  - The mode of xregno.
3919    offset - The byte offset.
3920    ymode  - The mode of a top level SUBREG (or what may become one).
3921    RETURN - The regno offset which would be used.  */
3922 unsigned int
3923 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3924 		     poly_uint64 offset, machine_mode ymode)
3925 {
3926   struct subreg_info info;
3927   subreg_get_info (xregno, xmode, offset, ymode, &info);
3928   return info.offset;
3929 }
3930 
3931 /* This function returns true when the offset is representable via
3932    subreg_offset in the given regno.
3933    xregno - A regno of an inner hard subreg_reg (or what will become one).
3934    xmode  - The mode of xregno.
3935    offset - The byte offset.
3936    ymode  - The mode of a top level SUBREG (or what may become one).
3937    RETURN - Whether the offset is representable.  */
3938 bool
3939 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3940 			       poly_uint64 offset, machine_mode ymode)
3941 {
3942   struct subreg_info info;
3943   subreg_get_info (xregno, xmode, offset, ymode, &info);
3944   return info.representable_p;
3945 }
3946 
3947 /* Return the number of a YMODE register to which
3948 
3949        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3950 
3951    can be simplified.  Return -1 if the subreg can't be simplified.
3952 
3953    XREGNO is a hard register number.  */
3954 
3955 int
3956 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3957 		       poly_uint64 offset, machine_mode ymode)
3958 {
3959   struct subreg_info info;
3960   unsigned int yregno;
3961 
3962   /* Give the backend a chance to disallow the mode change.  */
3963   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3964       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3965       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode))
3966     return -1;
3967 
3968   /* We shouldn't simplify stack-related registers.  */
3969   if ((!reload_completed || frame_pointer_needed)
3970       && xregno == FRAME_POINTER_REGNUM)
3971     return -1;
3972 
3973   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3974       && xregno == ARG_POINTER_REGNUM)
3975     return -1;
3976 
3977   if (xregno == STACK_POINTER_REGNUM
3978       /* We should convert hard stack register in LRA if it is
3979 	 possible.  */
3980       && ! lra_in_progress)
3981     return -1;
3982 
3983   /* Try to get the register offset.  */
3984   subreg_get_info (xregno, xmode, offset, ymode, &info);
3985   if (!info.representable_p)
3986     return -1;
3987 
3988   /* Make sure that the offsetted register value is in range.  */
3989   yregno = xregno + info.offset;
3990   if (!HARD_REGISTER_NUM_P (yregno))
3991     return -1;
3992 
3993   /* See whether (reg:YMODE YREGNO) is valid.
3994 
3995      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3996      This is a kludge to work around how complex FP arguments are passed
3997      on IA-64 and should be fixed.  See PR target/49226.  */
3998   if (!targetm.hard_regno_mode_ok (yregno, ymode)
3999       && targetm.hard_regno_mode_ok (xregno, xmode))
4000     return -1;
4001 
4002   return (int) yregno;
4003 }
4004 
4005 /* Return the final regno that a subreg expression refers to.  */
4006 unsigned int
4007 subreg_regno (const_rtx x)
4008 {
4009   unsigned int ret;
4010   rtx subreg = SUBREG_REG (x);
4011   int regno = REGNO (subreg);
4012 
4013   ret = regno + subreg_regno_offset (regno,
4014 				     GET_MODE (subreg),
4015 				     SUBREG_BYTE (x),
4016 				     GET_MODE (x));
4017   return ret;
4018 
4019 }
4020 
4021 /* Return the number of registers that a subreg expression refers
4022    to.  */
4023 unsigned int
4024 subreg_nregs (const_rtx x)
4025 {
4026   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4027 }
4028 
4029 /* Return the number of registers that a subreg REG with REGNO
4030    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
4031    changed so that the regno can be passed in. */
4032 
4033 unsigned int
4034 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4035 {
4036   struct subreg_info info;
4037   rtx subreg = SUBREG_REG (x);
4038 
4039   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4040 		   &info);
4041   return info.nregs;
4042 }
4043 
4044 struct parms_set_data
4045 {
4046   int nregs;
4047   HARD_REG_SET regs;
4048 };
4049 
4050 /* Helper function for noticing stores to parameter registers.  */
4051 static void
4052 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4053 {
4054   struct parms_set_data *const d = (struct parms_set_data *) data;
4055   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4056       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4057     {
4058       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4059       d->nregs--;
4060     }
4061 }
4062 
4063 /* Look backward for first parameter to be loaded.
4064    Note that loads of all parameters will not necessarily be
4065    found if CSE has eliminated some of them (e.g., an argument
4066    to the outer function is passed down as a parameter).
4067    Do not skip BOUNDARY.  */
4068 rtx_insn *
4069 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4070 {
4071   struct parms_set_data parm;
4072   rtx p;
4073   rtx_insn *before, *first_set;
4074 
4075   /* Since different machines initialize their parameter registers
4076      in different orders, assume nothing.  Collect the set of all
4077      parameter registers.  */
4078   CLEAR_HARD_REG_SET (parm.regs);
4079   parm.nregs = 0;
4080   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4081     if (GET_CODE (XEXP (p, 0)) == USE
4082 	&& REG_P (XEXP (XEXP (p, 0), 0))
4083 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4084       {
4085 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4086 
4087 	/* We only care about registers which can hold function
4088 	   arguments.  */
4089 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4090 	  continue;
4091 
4092 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4093 	parm.nregs++;
4094       }
4095   before = call_insn;
4096   first_set = call_insn;
4097 
4098   /* Search backward for the first set of a register in this set.  */
4099   while (parm.nregs && before != boundary)
4100     {
4101       before = PREV_INSN (before);
4102 
4103       /* It is possible that some loads got CSEed from one call to
4104          another.  Stop in that case.  */
4105       if (CALL_P (before))
4106 	break;
4107 
4108       /* Our caller needs either ensure that we will find all sets
4109          (in case code has not been optimized yet), or take care
4110          for possible labels in a way by setting boundary to preceding
4111          CODE_LABEL.  */
4112       if (LABEL_P (before))
4113 	{
4114 	  gcc_assert (before == boundary);
4115 	  break;
4116 	}
4117 
4118       if (INSN_P (before))
4119 	{
4120 	  int nregs_old = parm.nregs;
4121 	  note_stores (before, parms_set, &parm);
4122 	  /* If we found something that did not set a parameter reg,
4123 	     we're done.  Do not keep going, as that might result
4124 	     in hoisting an insn before the setting of a pseudo
4125 	     that is used by the hoisted insn. */
4126 	  if (nregs_old != parm.nregs)
4127 	    first_set = before;
4128 	  else
4129 	    break;
4130 	}
4131     }
4132   return first_set;
4133 }
4134 
4135 /* Return true if we should avoid inserting code between INSN and preceding
4136    call instruction.  */
4137 
4138 bool
4139 keep_with_call_p (const rtx_insn *insn)
4140 {
4141   rtx set;
4142 
4143   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4144     {
4145       if (REG_P (SET_DEST (set))
4146 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4147 	  && fixed_regs[REGNO (SET_DEST (set))]
4148 	  && general_operand (SET_SRC (set), VOIDmode))
4149 	return true;
4150       if (REG_P (SET_SRC (set))
4151 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4152 	  && REG_P (SET_DEST (set))
4153 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4154 	return true;
4155       /* There may be a stack pop just after the call and before the store
4156 	 of the return register.  Search for the actual store when deciding
4157 	 if we can break or not.  */
4158       if (SET_DEST (set) == stack_pointer_rtx)
4159 	{
4160 	  /* This CONST_CAST is okay because next_nonnote_insn just
4161 	     returns its argument and we assign it to a const_rtx
4162 	     variable.  */
4163 	  const rtx_insn *i2
4164 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4165 	  if (i2 && keep_with_call_p (i2))
4166 	    return true;
4167 	}
4168     }
4169   return false;
4170 }
4171 
4172 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4173    to non-complex jumps.  That is, direct unconditional, conditional,
4174    and tablejumps, but not computed jumps or returns.  It also does
4175    not apply to the fallthru case of a conditional jump.  */
4176 
4177 bool
4178 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4179 {
4180   rtx tmp = JUMP_LABEL (jump_insn);
4181   rtx_jump_table_data *table;
4182 
4183   if (label == tmp)
4184     return true;
4185 
4186   if (tablejump_p (jump_insn, NULL, &table))
4187     {
4188       rtvec vec = table->get_labels ();
4189       int i, veclen = GET_NUM_ELEM (vec);
4190 
4191       for (i = 0; i < veclen; ++i)
4192 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4193 	  return true;
4194     }
4195 
4196   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4197     return true;
4198 
4199   return false;
4200 }
4201 
4202 
4203 /* Return an estimate of the cost of computing rtx X.
4204    One use is in cse, to decide which expression to keep in the hash table.
4205    Another is in rtl generation, to pick the cheapest way to multiply.
4206    Other uses like the latter are expected in the future.
4207 
4208    X appears as operand OPNO in an expression with code OUTER_CODE.
4209    SPEED specifies whether costs optimized for speed or size should
4210    be returned.  */
4211 
4212 int
4213 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4214 	  int opno, bool speed)
4215 {
4216   int i, j;
4217   enum rtx_code code;
4218   const char *fmt;
4219   int total;
4220   int factor;
4221   unsigned mode_size;
4222 
4223   if (x == 0)
4224     return 0;
4225 
4226   if (GET_CODE (x) == SET)
4227     /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4228        the mode for the factor.  */
4229     mode = GET_MODE (SET_DEST (x));
4230   else if (GET_MODE (x) != VOIDmode)
4231     mode = GET_MODE (x);
4232 
4233   mode_size = estimated_poly_value (GET_MODE_SIZE (mode));
4234 
4235   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4236      many insns, taking N times as long.  */
4237   factor = mode_size > UNITS_PER_WORD ? mode_size / UNITS_PER_WORD : 1;
4238 
4239   /* Compute the default costs of certain things.
4240      Note that targetm.rtx_costs can override the defaults.  */
4241 
4242   code = GET_CODE (x);
4243   switch (code)
4244     {
4245     case MULT:
4246       /* Multiplication has time-complexity O(N*N), where N is the
4247 	 number of units (translated from digits) when using
4248 	 schoolbook long multiplication.  */
4249       total = factor * factor * COSTS_N_INSNS (5);
4250       break;
4251     case DIV:
4252     case UDIV:
4253     case MOD:
4254     case UMOD:
4255       /* Similarly, complexity for schoolbook long division.  */
4256       total = factor * factor * COSTS_N_INSNS (7);
4257       break;
4258     case USE:
4259       /* Used in combine.c as a marker.  */
4260       total = 0;
4261       break;
4262     default:
4263       total = factor * COSTS_N_INSNS (1);
4264     }
4265 
4266   switch (code)
4267     {
4268     case REG:
4269       return 0;
4270 
4271     case SUBREG:
4272       total = 0;
4273       /* If we can't tie these modes, make this expensive.  The larger
4274 	 the mode, the more expensive it is.  */
4275       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4276 	return COSTS_N_INSNS (2 + factor);
4277       break;
4278 
4279     case TRUNCATE:
4280       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4281 	{
4282 	  total = 0;
4283 	  break;
4284 	}
4285       /* FALLTHRU */
4286     default:
4287       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4288 	return total;
4289       break;
4290     }
4291 
4292   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4293      which is already in total.  */
4294 
4295   fmt = GET_RTX_FORMAT (code);
4296   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4297     if (fmt[i] == 'e')
4298       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4299     else if (fmt[i] == 'E')
4300       for (j = 0; j < XVECLEN (x, i); j++)
4301 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4302 
4303   return total;
4304 }
4305 
4306 /* Fill in the structure C with information about both speed and size rtx
4307    costs for X, which is operand OPNO in an expression with code OUTER.  */
4308 
4309 void
4310 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4311 		   struct full_rtx_costs *c)
4312 {
4313   c->speed = rtx_cost (x, mode, outer, opno, true);
4314   c->size = rtx_cost (x, mode, outer, opno, false);
4315 }
4316 
4317 
4318 /* Return cost of address expression X.
4319    Expect that X is properly formed address reference.
4320 
4321    SPEED parameter specify whether costs optimized for speed or size should
4322    be returned.  */
4323 
4324 int
4325 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4326 {
4327   /* We may be asked for cost of various unusual addresses, such as operands
4328      of push instruction.  It is not worthwhile to complicate writing
4329      of the target hook by such cases.  */
4330 
4331   if (!memory_address_addr_space_p (mode, x, as))
4332     return 1000;
4333 
4334   return targetm.address_cost (x, mode, as, speed);
4335 }
4336 
4337 /* If the target doesn't override, compute the cost as with arithmetic.  */
4338 
4339 int
4340 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4341 {
4342   return rtx_cost (x, Pmode, MEM, 0, speed);
4343 }
4344 
4345 
4346 unsigned HOST_WIDE_INT
4347 nonzero_bits (const_rtx x, machine_mode mode)
4348 {
4349   if (mode == VOIDmode)
4350     mode = GET_MODE (x);
4351   scalar_int_mode int_mode;
4352   if (!is_a <scalar_int_mode> (mode, &int_mode))
4353     return GET_MODE_MASK (mode);
4354   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4355 }
4356 
4357 unsigned int
4358 num_sign_bit_copies (const_rtx x, machine_mode mode)
4359 {
4360   if (mode == VOIDmode)
4361     mode = GET_MODE (x);
4362   scalar_int_mode int_mode;
4363   if (!is_a <scalar_int_mode> (mode, &int_mode))
4364     return 1;
4365   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4366 }
4367 
4368 /* Return true if nonzero_bits1 might recurse into both operands
4369    of X.  */
4370 
4371 static inline bool
4372 nonzero_bits_binary_arith_p (const_rtx x)
4373 {
4374   if (!ARITHMETIC_P (x))
4375     return false;
4376   switch (GET_CODE (x))
4377     {
4378     case AND:
4379     case XOR:
4380     case IOR:
4381     case UMIN:
4382     case UMAX:
4383     case SMIN:
4384     case SMAX:
4385     case PLUS:
4386     case MINUS:
4387     case MULT:
4388     case DIV:
4389     case UDIV:
4390     case MOD:
4391     case UMOD:
4392       return true;
4393     default:
4394       return false;
4395     }
4396 }
4397 
4398 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4399    It avoids exponential behavior in nonzero_bits1 when X has
4400    identical subexpressions on the first or the second level.  */
4401 
4402 static unsigned HOST_WIDE_INT
4403 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4404 		     machine_mode known_mode,
4405 		     unsigned HOST_WIDE_INT known_ret)
4406 {
4407   if (x == known_x && mode == known_mode)
4408     return known_ret;
4409 
4410   /* Try to find identical subexpressions.  If found call
4411      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4412      precomputed value for the subexpression as KNOWN_RET.  */
4413 
4414   if (nonzero_bits_binary_arith_p (x))
4415     {
4416       rtx x0 = XEXP (x, 0);
4417       rtx x1 = XEXP (x, 1);
4418 
4419       /* Check the first level.  */
4420       if (x0 == x1)
4421 	return nonzero_bits1 (x, mode, x0, mode,
4422 			      cached_nonzero_bits (x0, mode, known_x,
4423 						   known_mode, known_ret));
4424 
4425       /* Check the second level.  */
4426       if (nonzero_bits_binary_arith_p (x0)
4427 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4428 	return nonzero_bits1 (x, mode, x1, mode,
4429 			      cached_nonzero_bits (x1, mode, known_x,
4430 						   known_mode, known_ret));
4431 
4432       if (nonzero_bits_binary_arith_p (x1)
4433 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4434 	return nonzero_bits1 (x, mode, x0, mode,
4435 			      cached_nonzero_bits (x0, mode, known_x,
4436 						   known_mode, known_ret));
4437     }
4438 
4439   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4440 }
4441 
4442 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4443    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4444    is less useful.  We can't allow both, because that results in exponential
4445    run time recursion.  There is a nullstone testcase that triggered
4446    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4447 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4448 
4449 /* Given an expression, X, compute which bits in X can be nonzero.
4450    We don't care about bits outside of those defined in MODE.
4451 
4452    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4453    an arithmetic operation, we can do better.  */
4454 
4455 static unsigned HOST_WIDE_INT
4456 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4457 	       machine_mode known_mode,
4458 	       unsigned HOST_WIDE_INT known_ret)
4459 {
4460   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4461   unsigned HOST_WIDE_INT inner_nz;
4462   enum rtx_code code = GET_CODE (x);
4463   machine_mode inner_mode;
4464   unsigned int inner_width;
4465   scalar_int_mode xmode;
4466 
4467   unsigned int mode_width = GET_MODE_PRECISION (mode);
4468 
4469   if (CONST_INT_P (x))
4470     {
4471       if (SHORT_IMMEDIATES_SIGN_EXTEND
4472 	  && INTVAL (x) > 0
4473 	  && mode_width < BITS_PER_WORD
4474 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4475 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4476 
4477       return UINTVAL (x);
4478     }
4479 
4480   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4481     return nonzero;
4482   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4483 
4484   /* If X is wider than MODE, use its mode instead.  */
4485   if (xmode_width > mode_width)
4486     {
4487       mode = xmode;
4488       nonzero = GET_MODE_MASK (mode);
4489       mode_width = xmode_width;
4490     }
4491 
4492   if (mode_width > HOST_BITS_PER_WIDE_INT)
4493     /* Our only callers in this case look for single bit values.  So
4494        just return the mode mask.  Those tests will then be false.  */
4495     return nonzero;
4496 
4497   /* If MODE is wider than X, but both are a single word for both the host
4498      and target machines, we can compute this from which bits of the object
4499      might be nonzero in its own mode, taking into account the fact that, on
4500      CISC machines, accessing an object in a wider mode generally causes the
4501      high-order bits to become undefined, so they are not known to be zero.
4502      We extend this reasoning to RISC machines for operations that might not
4503      operate on the full registers.  */
4504   if (mode_width > xmode_width
4505       && xmode_width <= BITS_PER_WORD
4506       && xmode_width <= HOST_BITS_PER_WIDE_INT
4507       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4508     {
4509       nonzero &= cached_nonzero_bits (x, xmode,
4510 				      known_x, known_mode, known_ret);
4511       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4512       return nonzero;
4513     }
4514 
4515   /* Please keep nonzero_bits_binary_arith_p above in sync with
4516      the code in the switch below.  */
4517   switch (code)
4518     {
4519     case REG:
4520 #if defined(POINTERS_EXTEND_UNSIGNED)
4521       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4522 	 all the bits above ptr_mode are known to be zero.  */
4523       /* As we do not know which address space the pointer is referring to,
4524 	 we can do this only if the target does not support different pointer
4525 	 or address modes depending on the address space.  */
4526       if (target_default_pointer_address_modes_p ()
4527 	  && POINTERS_EXTEND_UNSIGNED
4528 	  && xmode == Pmode
4529 	  && REG_POINTER (x)
4530 	  && !targetm.have_ptr_extend ())
4531 	nonzero &= GET_MODE_MASK (ptr_mode);
4532 #endif
4533 
4534       /* Include declared information about alignment of pointers.  */
4535       /* ??? We don't properly preserve REG_POINTER changes across
4536 	 pointer-to-integer casts, so we can't trust it except for
4537 	 things that we know must be pointers.  See execute/960116-1.c.  */
4538       if ((x == stack_pointer_rtx
4539 	   || x == frame_pointer_rtx
4540 	   || x == arg_pointer_rtx)
4541 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4542 	{
4543 	  unsigned HOST_WIDE_INT alignment
4544 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4545 
4546 #ifdef PUSH_ROUNDING
4547 	  /* If PUSH_ROUNDING is defined, it is possible for the
4548 	     stack to be momentarily aligned only to that amount,
4549 	     so we pick the least alignment.  */
4550 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4551 	    {
4552 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4553 	      alignment = MIN (known_alignment (rounded_1), alignment);
4554 	    }
4555 #endif
4556 
4557 	  nonzero &= ~(alignment - 1);
4558 	}
4559 
4560       {
4561 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4562 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4563 						  &nonzero_for_hook);
4564 
4565 	if (new_rtx)
4566 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4567 						   known_mode, known_ret);
4568 
4569 	return nonzero_for_hook;
4570       }
4571 
4572     case MEM:
4573       /* In many, if not most, RISC machines, reading a byte from memory
4574 	 zeros the rest of the register.  Noticing that fact saves a lot
4575 	 of extra zero-extends.  */
4576       if (load_extend_op (xmode) == ZERO_EXTEND)
4577 	nonzero &= GET_MODE_MASK (xmode);
4578       break;
4579 
4580     case EQ:  case NE:
4581     case UNEQ:  case LTGT:
4582     case GT:  case GTU:  case UNGT:
4583     case LT:  case LTU:  case UNLT:
4584     case GE:  case GEU:  case UNGE:
4585     case LE:  case LEU:  case UNLE:
4586     case UNORDERED: case ORDERED:
4587       /* If this produces an integer result, we know which bits are set.
4588 	 Code here used to clear bits outside the mode of X, but that is
4589 	 now done above.  */
4590       /* Mind that MODE is the mode the caller wants to look at this
4591 	 operation in, and not the actual operation mode.  We can wind
4592 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4593 	 that describes the results of a vector compare.  */
4594       if (GET_MODE_CLASS (xmode) == MODE_INT
4595 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4596 	nonzero = STORE_FLAG_VALUE;
4597       break;
4598 
4599     case NEG:
4600 #if 0
4601       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4602 	 and num_sign_bit_copies.  */
4603       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4604 	nonzero = 1;
4605 #endif
4606 
4607       if (xmode_width < mode_width)
4608 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4609       break;
4610 
4611     case ABS:
4612 #if 0
4613       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4614 	 and num_sign_bit_copies.  */
4615       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4616 	nonzero = 1;
4617 #endif
4618       break;
4619 
4620     case TRUNCATE:
4621       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4622 				       known_x, known_mode, known_ret)
4623 		  & GET_MODE_MASK (mode));
4624       break;
4625 
4626     case ZERO_EXTEND:
4627       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4628 				      known_x, known_mode, known_ret);
4629       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4630 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4631       break;
4632 
4633     case SIGN_EXTEND:
4634       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4635 	 Otherwise, show all the bits in the outer mode but not the inner
4636 	 may be nonzero.  */
4637       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4638 				      known_x, known_mode, known_ret);
4639       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4640 	{
4641 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4642 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4643 	    inner_nz |= (GET_MODE_MASK (mode)
4644 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4645 	}
4646 
4647       nonzero &= inner_nz;
4648       break;
4649 
4650     case AND:
4651       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4652 				       known_x, known_mode, known_ret)
4653       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4654 					known_x, known_mode, known_ret);
4655       break;
4656 
4657     case XOR:   case IOR:
4658     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4659       {
4660 	unsigned HOST_WIDE_INT nonzero0
4661 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4662 				  known_x, known_mode, known_ret);
4663 
4664 	/* Don't call nonzero_bits for the second time if it cannot change
4665 	   anything.  */
4666 	if ((nonzero & nonzero0) != nonzero)
4667 	  nonzero &= nonzero0
4668       		     | cached_nonzero_bits (XEXP (x, 1), mode,
4669 					    known_x, known_mode, known_ret);
4670       }
4671       break;
4672 
4673     case PLUS:  case MINUS:
4674     case MULT:
4675     case DIV:   case UDIV:
4676     case MOD:   case UMOD:
4677       /* We can apply the rules of arithmetic to compute the number of
4678 	 high- and low-order zero bits of these operations.  We start by
4679 	 computing the width (position of the highest-order nonzero bit)
4680 	 and the number of low-order zero bits for each value.  */
4681       {
4682 	unsigned HOST_WIDE_INT nz0
4683 	  = cached_nonzero_bits (XEXP (x, 0), mode,
4684 				 known_x, known_mode, known_ret);
4685 	unsigned HOST_WIDE_INT nz1
4686 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4687 				 known_x, known_mode, known_ret);
4688 	int sign_index = xmode_width - 1;
4689 	int width0 = floor_log2 (nz0) + 1;
4690 	int width1 = floor_log2 (nz1) + 1;
4691 	int low0 = ctz_or_zero (nz0);
4692 	int low1 = ctz_or_zero (nz1);
4693 	unsigned HOST_WIDE_INT op0_maybe_minusp
4694 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
4695 	unsigned HOST_WIDE_INT op1_maybe_minusp
4696 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
4697 	unsigned int result_width = mode_width;
4698 	int result_low = 0;
4699 
4700 	switch (code)
4701 	  {
4702 	  case PLUS:
4703 	    result_width = MAX (width0, width1) + 1;
4704 	    result_low = MIN (low0, low1);
4705 	    break;
4706 	  case MINUS:
4707 	    result_low = MIN (low0, low1);
4708 	    break;
4709 	  case MULT:
4710 	    result_width = width0 + width1;
4711 	    result_low = low0 + low1;
4712 	    break;
4713 	  case DIV:
4714 	    if (width1 == 0)
4715 	      break;
4716 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4717 	      result_width = width0;
4718 	    break;
4719 	  case UDIV:
4720 	    if (width1 == 0)
4721 	      break;
4722 	    result_width = width0;
4723 	    break;
4724 	  case MOD:
4725 	    if (width1 == 0)
4726 	      break;
4727 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4728 	      result_width = MIN (width0, width1);
4729 	    result_low = MIN (low0, low1);
4730 	    break;
4731 	  case UMOD:
4732 	    if (width1 == 0)
4733 	      break;
4734 	    result_width = MIN (width0, width1);
4735 	    result_low = MIN (low0, low1);
4736 	    break;
4737 	  default:
4738 	    gcc_unreachable ();
4739 	  }
4740 
4741 	if (result_width < mode_width)
4742 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4743 
4744 	if (result_low > 0)
4745 	  nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4746       }
4747       break;
4748 
4749     case ZERO_EXTRACT:
4750       if (CONST_INT_P (XEXP (x, 1))
4751 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4752 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4753       break;
4754 
4755     case SUBREG:
4756       /* If this is a SUBREG formed for a promoted variable that has
4757 	 been zero-extended, we know that at least the high-order bits
4758 	 are zero, though others might be too.  */
4759       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4760 	nonzero = GET_MODE_MASK (xmode)
4761 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
4762 					 known_x, known_mode, known_ret);
4763 
4764       /* If the inner mode is a single word for both the host and target
4765 	 machines, we can compute this from which bits of the inner
4766 	 object might be nonzero.  */
4767       inner_mode = GET_MODE (SUBREG_REG (x));
4768       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4769 	  && inner_width <= BITS_PER_WORD
4770 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
4771 	{
4772 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4773 					  known_x, known_mode, known_ret);
4774 
4775           /* On a typical CISC machine, accessing an object in a wider mode
4776 	     causes the high-order bits to become undefined.  So they are
4777 	     not known to be zero.
4778 
4779 	     On a typical RISC machine, we only have to worry about the way
4780 	     loads are extended.  Otherwise, if we get a reload for the inner
4781 	     part, it may be loaded from the stack, and then we may lose all
4782 	     the zero bits that existed before the store to the stack.  */
4783 	  rtx_code extend_op;
4784 	  if ((!WORD_REGISTER_OPERATIONS
4785 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4786 		   ? val_signbit_known_set_p (inner_mode, nonzero)
4787 		   : extend_op != ZERO_EXTEND)
4788 	       || !MEM_P (SUBREG_REG (x)))
4789 	      && xmode_width > inner_width)
4790 	    nonzero
4791 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4792 	}
4793       break;
4794 
4795     case ASHIFT:
4796     case ASHIFTRT:
4797     case LSHIFTRT:
4798     case ROTATE:
4799     case ROTATERT:
4800       /* The nonzero bits are in two classes: any bits within MODE
4801 	 that aren't in xmode are always significant.  The rest of the
4802 	 nonzero bits are those that are significant in the operand of
4803 	 the shift when shifted the appropriate number of bits.  This
4804 	 shows that high-order bits are cleared by the right shift and
4805 	 low-order bits by left shifts.  */
4806       if (CONST_INT_P (XEXP (x, 1))
4807 	  && INTVAL (XEXP (x, 1)) >= 0
4808 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4809 	  && INTVAL (XEXP (x, 1)) < xmode_width)
4810 	{
4811 	  int count = INTVAL (XEXP (x, 1));
4812 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4813 	  unsigned HOST_WIDE_INT op_nonzero
4814 	    = cached_nonzero_bits (XEXP (x, 0), mode,
4815 				   known_x, known_mode, known_ret);
4816 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4817 	  unsigned HOST_WIDE_INT outer = 0;
4818 
4819 	  if (mode_width > xmode_width)
4820 	    outer = (op_nonzero & nonzero & ~mode_mask);
4821 
4822 	  switch (code)
4823 	    {
4824 	    case ASHIFT:
4825 	      inner <<= count;
4826 	      break;
4827 
4828 	    case LSHIFTRT:
4829 	      inner >>= count;
4830 	      break;
4831 
4832 	    case ASHIFTRT:
4833 	      inner >>= count;
4834 
4835 	      /* If the sign bit may have been nonzero before the shift, we
4836 		 need to mark all the places it could have been copied to
4837 		 by the shift as possibly nonzero.  */
4838 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4839 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
4840 			  << (xmode_width - count));
4841 	      break;
4842 
4843 	    case ROTATE:
4844 	      inner = (inner << (count % xmode_width)
4845 		       | (inner >> (xmode_width - (count % xmode_width))))
4846 		      & mode_mask;
4847 	      break;
4848 
4849 	    case ROTATERT:
4850 	      inner = (inner >> (count % xmode_width)
4851 		       | (inner << (xmode_width - (count % xmode_width))))
4852 		      & mode_mask;
4853 	      break;
4854 
4855 	    default:
4856 	      gcc_unreachable ();
4857 	    }
4858 
4859 	  nonzero &= (outer | inner);
4860 	}
4861       break;
4862 
4863     case FFS:
4864     case POPCOUNT:
4865       /* This is at most the number of bits in the mode.  */
4866       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4867       break;
4868 
4869     case CLZ:
4870       /* If CLZ has a known value at zero, then the nonzero bits are
4871 	 that value, plus the number of bits in the mode minus one.  */
4872       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4873 	nonzero
4874 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4875       else
4876 	nonzero = -1;
4877       break;
4878 
4879     case CTZ:
4880       /* If CTZ has a known value at zero, then the nonzero bits are
4881 	 that value, plus the number of bits in the mode minus one.  */
4882       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4883 	nonzero
4884 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4885       else
4886 	nonzero = -1;
4887       break;
4888 
4889     case CLRSB:
4890       /* This is at most the number of bits in the mode minus 1.  */
4891       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4892       break;
4893 
4894     case PARITY:
4895       nonzero = 1;
4896       break;
4897 
4898     case IF_THEN_ELSE:
4899       {
4900 	unsigned HOST_WIDE_INT nonzero_true
4901 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4902 				 known_x, known_mode, known_ret);
4903 
4904 	/* Don't call nonzero_bits for the second time if it cannot change
4905 	   anything.  */
4906 	if ((nonzero & nonzero_true) != nonzero)
4907 	  nonzero &= nonzero_true
4908       		     | cached_nonzero_bits (XEXP (x, 2), mode,
4909 					    known_x, known_mode, known_ret);
4910       }
4911       break;
4912 
4913     default:
4914       break;
4915     }
4916 
4917   return nonzero;
4918 }
4919 
4920 /* See the macro definition above.  */
4921 #undef cached_num_sign_bit_copies
4922 
4923 
4924 /* Return true if num_sign_bit_copies1 might recurse into both operands
4925    of X.  */
4926 
4927 static inline bool
4928 num_sign_bit_copies_binary_arith_p (const_rtx x)
4929 {
4930   if (!ARITHMETIC_P (x))
4931     return false;
4932   switch (GET_CODE (x))
4933     {
4934     case IOR:
4935     case AND:
4936     case XOR:
4937     case SMIN:
4938     case SMAX:
4939     case UMIN:
4940     case UMAX:
4941     case PLUS:
4942     case MINUS:
4943     case MULT:
4944       return true;
4945     default:
4946       return false;
4947     }
4948 }
4949 
4950 /* The function cached_num_sign_bit_copies is a wrapper around
4951    num_sign_bit_copies1.  It avoids exponential behavior in
4952    num_sign_bit_copies1 when X has identical subexpressions on the
4953    first or the second level.  */
4954 
4955 static unsigned int
4956 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4957 			    const_rtx known_x, machine_mode known_mode,
4958 			    unsigned int known_ret)
4959 {
4960   if (x == known_x && mode == known_mode)
4961     return known_ret;
4962 
4963   /* Try to find identical subexpressions.  If found call
4964      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4965      the precomputed value for the subexpression as KNOWN_RET.  */
4966 
4967   if (num_sign_bit_copies_binary_arith_p (x))
4968     {
4969       rtx x0 = XEXP (x, 0);
4970       rtx x1 = XEXP (x, 1);
4971 
4972       /* Check the first level.  */
4973       if (x0 == x1)
4974 	return
4975 	  num_sign_bit_copies1 (x, mode, x0, mode,
4976 				cached_num_sign_bit_copies (x0, mode, known_x,
4977 							    known_mode,
4978 							    known_ret));
4979 
4980       /* Check the second level.  */
4981       if (num_sign_bit_copies_binary_arith_p (x0)
4982 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4983 	return
4984 	  num_sign_bit_copies1 (x, mode, x1, mode,
4985 				cached_num_sign_bit_copies (x1, mode, known_x,
4986 							    known_mode,
4987 							    known_ret));
4988 
4989       if (num_sign_bit_copies_binary_arith_p (x1)
4990 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4991 	return
4992 	  num_sign_bit_copies1 (x, mode, x0, mode,
4993 				cached_num_sign_bit_copies (x0, mode, known_x,
4994 							    known_mode,
4995 							    known_ret));
4996     }
4997 
4998   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4999 }
5000 
5001 /* Return the number of bits at the high-order end of X that are known to
5002    be equal to the sign bit.  X will be used in mode MODE.  The returned
5003    value will always be between 1 and the number of bits in MODE.  */
5004 
5005 static unsigned int
5006 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5007 		      machine_mode known_mode,
5008 		      unsigned int known_ret)
5009 {
5010   enum rtx_code code = GET_CODE (x);
5011   unsigned int bitwidth = GET_MODE_PRECISION (mode);
5012   int num0, num1, result;
5013   unsigned HOST_WIDE_INT nonzero;
5014 
5015   if (CONST_INT_P (x))
5016     {
5017       /* If the constant is negative, take its 1's complement and remask.
5018 	 Then see how many zero bits we have.  */
5019       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5020       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5021 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5022 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5023 
5024       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5025     }
5026 
5027   scalar_int_mode xmode, inner_mode;
5028   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5029     return 1;
5030 
5031   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5032 
5033   /* For a smaller mode, just ignore the high bits.  */
5034   if (bitwidth < xmode_width)
5035     {
5036       num0 = cached_num_sign_bit_copies (x, xmode,
5037 					 known_x, known_mode, known_ret);
5038       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5039     }
5040 
5041   if (bitwidth > xmode_width)
5042     {
5043       /* If this machine does not do all register operations on the entire
5044 	 register and MODE is wider than the mode of X, we can say nothing
5045 	 at all about the high-order bits.  We extend this reasoning to RISC
5046 	 machines for operations that might not operate on full registers.  */
5047       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5048 	return 1;
5049 
5050       /* Likewise on machines that do, if the mode of the object is smaller
5051 	 than a word and loads of that size don't sign extend, we can say
5052 	 nothing about the high order bits.  */
5053       if (xmode_width < BITS_PER_WORD
5054 	  && load_extend_op (xmode) != SIGN_EXTEND)
5055 	return 1;
5056     }
5057 
5058   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5059      the code in the switch below.  */
5060   switch (code)
5061     {
5062     case REG:
5063 
5064 #if defined(POINTERS_EXTEND_UNSIGNED)
5065       /* If pointers extend signed and this is a pointer in Pmode, say that
5066 	 all the bits above ptr_mode are known to be sign bit copies.  */
5067       /* As we do not know which address space the pointer is referring to,
5068 	 we can do this only if the target does not support different pointer
5069 	 or address modes depending on the address space.  */
5070       if (target_default_pointer_address_modes_p ()
5071 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5072 	  && mode == Pmode && REG_POINTER (x)
5073 	  && !targetm.have_ptr_extend ())
5074 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5075 #endif
5076 
5077       {
5078 	unsigned int copies_for_hook = 1, copies = 1;
5079 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5080 							 &copies_for_hook);
5081 
5082 	if (new_rtx)
5083 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5084 					       known_mode, known_ret);
5085 
5086 	if (copies > 1 || copies_for_hook > 1)
5087 	  return MAX (copies, copies_for_hook);
5088 
5089 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5090       }
5091       break;
5092 
5093     case MEM:
5094       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5095       if (load_extend_op (xmode) == SIGN_EXTEND)
5096 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5097       break;
5098 
5099     case SUBREG:
5100       /* If this is a SUBREG for a promoted object that is sign-extended
5101 	 and we are looking at it in a wider mode, we know that at least the
5102 	 high-order bits are known to be sign bit copies.  */
5103 
5104       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5105 	{
5106 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5107 					     known_x, known_mode, known_ret);
5108 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5109 	}
5110 
5111       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5112 	{
5113 	  /* For a smaller object, just ignore the high bits.  */
5114 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5115 	    {
5116 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5117 						 known_x, known_mode,
5118 						 known_ret);
5119 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5120 					   - bitwidth));
5121 	    }
5122 
5123 	  /* For paradoxical SUBREGs on machines where all register operations
5124 	     affect the entire register, just look inside.  Note that we are
5125 	     passing MODE to the recursive call, so the number of sign bit
5126 	     copies will remain relative to that mode, not the inner mode.
5127 
5128 	     This works only if loads sign extend.  Otherwise, if we get a
5129 	     reload for the inner part, it may be loaded from the stack, and
5130 	     then we lose all sign bit copies that existed before the store
5131 	     to the stack.  */
5132 	  if (WORD_REGISTER_OPERATIONS
5133 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5134 	      && paradoxical_subreg_p (x)
5135 	      && MEM_P (SUBREG_REG (x)))
5136 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5137 					       known_x, known_mode, known_ret);
5138 	}
5139       break;
5140 
5141     case SIGN_EXTRACT:
5142       if (CONST_INT_P (XEXP (x, 1)))
5143 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5144       break;
5145 
5146     case SIGN_EXTEND:
5147       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5148 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5149 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5150 					      known_x, known_mode, known_ret));
5151       break;
5152 
5153     case TRUNCATE:
5154       /* For a smaller object, just ignore the high bits.  */
5155       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5156       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5157 					 known_x, known_mode, known_ret);
5158       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5159 				    - bitwidth)));
5160 
5161     case NOT:
5162       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5163 					 known_x, known_mode, known_ret);
5164 
5165     case ROTATE:       case ROTATERT:
5166       /* If we are rotating left by a number of bits less than the number
5167 	 of sign bit copies, we can just subtract that amount from the
5168 	 number.  */
5169       if (CONST_INT_P (XEXP (x, 1))
5170 	  && INTVAL (XEXP (x, 1)) >= 0
5171 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5172 	{
5173 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5174 					     known_x, known_mode, known_ret);
5175 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5176 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5177 	}
5178       break;
5179 
5180     case NEG:
5181       /* In general, this subtracts one sign bit copy.  But if the value
5182 	 is known to be positive, the number of sign bit copies is the
5183 	 same as that of the input.  Finally, if the input has just one bit
5184 	 that might be nonzero, all the bits are copies of the sign bit.  */
5185       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5186 					 known_x, known_mode, known_ret);
5187       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5188 	return num0 > 1 ? num0 - 1 : 1;
5189 
5190       nonzero = nonzero_bits (XEXP (x, 0), mode);
5191       if (nonzero == 1)
5192 	return bitwidth;
5193 
5194       if (num0 > 1
5195 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5196 	num0--;
5197 
5198       return num0;
5199 
5200     case IOR:   case AND:   case XOR:
5201     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5202       /* Logical operations will preserve the number of sign-bit copies.
5203 	 MIN and MAX operations always return one of the operands.  */
5204       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5205 					 known_x, known_mode, known_ret);
5206       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5207 					 known_x, known_mode, known_ret);
5208 
5209       /* If num1 is clearing some of the top bits then regardless of
5210 	 the other term, we are guaranteed to have at least that many
5211 	 high-order zero bits.  */
5212       if (code == AND
5213 	  && num1 > 1
5214 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5215 	  && CONST_INT_P (XEXP (x, 1))
5216 	  && (UINTVAL (XEXP (x, 1))
5217 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5218 	return num1;
5219 
5220       /* Similarly for IOR when setting high-order bits.  */
5221       if (code == IOR
5222 	  && num1 > 1
5223 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5224 	  && CONST_INT_P (XEXP (x, 1))
5225 	  && (UINTVAL (XEXP (x, 1))
5226 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5227 	return num1;
5228 
5229       return MIN (num0, num1);
5230 
5231     case PLUS:  case MINUS:
5232       /* For addition and subtraction, we can have a 1-bit carry.  However,
5233 	 if we are subtracting 1 from a positive number, there will not
5234 	 be such a carry.  Furthermore, if the positive number is known to
5235 	 be 0 or 1, we know the result is either -1 or 0.  */
5236 
5237       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5238 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5239 	{
5240 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5241 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5242 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5243 		    : bitwidth - floor_log2 (nonzero) - 1);
5244 	}
5245 
5246       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5247 					 known_x, known_mode, known_ret);
5248       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5249 					 known_x, known_mode, known_ret);
5250       result = MAX (1, MIN (num0, num1) - 1);
5251 
5252       return result;
5253 
5254     case MULT:
5255       /* The number of bits of the product is the sum of the number of
5256 	 bits of both terms.  However, unless one of the terms if known
5257 	 to be positive, we must allow for an additional bit since negating
5258 	 a negative number can remove one sign bit copy.  */
5259 
5260       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5261 					 known_x, known_mode, known_ret);
5262       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5263 					 known_x, known_mode, known_ret);
5264 
5265       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5266       if (result > 0
5267 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5268 	      || (((nonzero_bits (XEXP (x, 0), mode)
5269 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5270 		  && ((nonzero_bits (XEXP (x, 1), mode)
5271 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5272 		      != 0))))
5273 	result--;
5274 
5275       return MAX (1, result);
5276 
5277     case UDIV:
5278       /* The result must be <= the first operand.  If the first operand
5279 	 has the high bit set, we know nothing about the number of sign
5280 	 bit copies.  */
5281       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5282 	return 1;
5283       else if ((nonzero_bits (XEXP (x, 0), mode)
5284 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5285 	return 1;
5286       else
5287 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5288 					   known_x, known_mode, known_ret);
5289 
5290     case UMOD:
5291       /* The result must be <= the second operand.  If the second operand
5292 	 has (or just might have) the high bit set, we know nothing about
5293 	 the number of sign bit copies.  */
5294       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5295 	return 1;
5296       else if ((nonzero_bits (XEXP (x, 1), mode)
5297 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5298 	return 1;
5299       else
5300 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5301 					   known_x, known_mode, known_ret);
5302 
5303     case DIV:
5304       /* Similar to unsigned division, except that we have to worry about
5305 	 the case where the divisor is negative, in which case we have
5306 	 to add 1.  */
5307       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5308 					   known_x, known_mode, known_ret);
5309       if (result > 1
5310 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5311 	      || (nonzero_bits (XEXP (x, 1), mode)
5312 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5313 	result--;
5314 
5315       return result;
5316 
5317     case MOD:
5318       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5319 					   known_x, known_mode, known_ret);
5320       if (result > 1
5321 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5322 	      || (nonzero_bits (XEXP (x, 1), mode)
5323 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5324 	result--;
5325 
5326       return result;
5327 
5328     case ASHIFTRT:
5329       /* Shifts by a constant add to the number of bits equal to the
5330 	 sign bit.  */
5331       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5332 					 known_x, known_mode, known_ret);
5333       if (CONST_INT_P (XEXP (x, 1))
5334 	  && INTVAL (XEXP (x, 1)) > 0
5335 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5336 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5337 
5338       return num0;
5339 
5340     case ASHIFT:
5341       /* Left shifts destroy copies.  */
5342       if (!CONST_INT_P (XEXP (x, 1))
5343 	  || INTVAL (XEXP (x, 1)) < 0
5344 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5345 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5346 	return 1;
5347 
5348       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5349 					 known_x, known_mode, known_ret);
5350       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5351 
5352     case IF_THEN_ELSE:
5353       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5354 					 known_x, known_mode, known_ret);
5355       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5356 					 known_x, known_mode, known_ret);
5357       return MIN (num0, num1);
5358 
5359     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5360     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5361     case GEU: case GTU: case LEU: case LTU:
5362     case UNORDERED: case ORDERED:
5363       /* If the constant is negative, take its 1's complement and remask.
5364 	 Then see how many zero bits we have.  */
5365       nonzero = STORE_FLAG_VALUE;
5366       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5367 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5368 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5369 
5370       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5371 
5372     default:
5373       break;
5374     }
5375 
5376   /* If we haven't been able to figure it out by one of the above rules,
5377      see if some of the high-order bits are known to be zero.  If so,
5378      count those bits and return one less than that amount.  If we can't
5379      safely compute the mask for this mode, always return BITWIDTH.  */
5380 
5381   bitwidth = GET_MODE_PRECISION (mode);
5382   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5383     return 1;
5384 
5385   nonzero = nonzero_bits (x, mode);
5386   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5387 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5388 }
5389 
5390 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5391    zero indicates an instruction pattern without a known cost.  */
5392 
5393 int
5394 pattern_cost (rtx pat, bool speed)
5395 {
5396   int i, cost;
5397   rtx set;
5398 
5399   /* Extract the single set rtx from the instruction pattern.  We
5400      can't use single_set since we only have the pattern.  We also
5401      consider PARALLELs of a normal set and a single comparison.  In
5402      that case we use the cost of the non-comparison SET operation,
5403      which is most-likely to be the real cost of this operation.  */
5404   if (GET_CODE (pat) == SET)
5405     set = pat;
5406   else if (GET_CODE (pat) == PARALLEL)
5407     {
5408       set = NULL_RTX;
5409       rtx comparison = NULL_RTX;
5410 
5411       for (i = 0; i < XVECLEN (pat, 0); i++)
5412 	{
5413 	  rtx x = XVECEXP (pat, 0, i);
5414 	  if (GET_CODE (x) == SET)
5415 	    {
5416 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5417 		{
5418 		  if (comparison)
5419 		    return 0;
5420 		  comparison = x;
5421 		}
5422 	      else
5423 		{
5424 		  if (set)
5425 		    return 0;
5426 		  set = x;
5427 		}
5428 	    }
5429 	}
5430 
5431       if (!set && comparison)
5432 	set = comparison;
5433 
5434       if (!set)
5435 	return 0;
5436     }
5437   else
5438     return 0;
5439 
5440   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5441   return cost > 0 ? cost : COSTS_N_INSNS (1);
5442 }
5443 
5444 /* Calculate the cost of a single instruction.  A return value of zero
5445    indicates an instruction pattern without a known cost.  */
5446 
5447 int
5448 insn_cost (rtx_insn *insn, bool speed)
5449 {
5450   if (targetm.insn_cost)
5451     return targetm.insn_cost (insn, speed);
5452 
5453   return pattern_cost (PATTERN (insn), speed);
5454 }
5455 
5456 /* Returns estimate on cost of computing SEQ.  */
5457 
5458 unsigned
5459 seq_cost (const rtx_insn *seq, bool speed)
5460 {
5461   unsigned cost = 0;
5462   rtx set;
5463 
5464   for (; seq; seq = NEXT_INSN (seq))
5465     {
5466       set = single_set (seq);
5467       if (set)
5468         cost += set_rtx_cost (set, speed);
5469       else if (NONDEBUG_INSN_P (seq))
5470 	{
5471 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5472 	  if (this_cost > 0)
5473 	    cost += this_cost;
5474 	  else
5475 	    cost++;
5476 	}
5477     }
5478 
5479   return cost;
5480 }
5481 
5482 /* Given an insn INSN and condition COND, return the condition in a
5483    canonical form to simplify testing by callers.  Specifically:
5484 
5485    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5486    (2) Both operands will be machine operands; (cc0) will have been replaced.
5487    (3) If an operand is a constant, it will be the second operand.
5488    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5489        for GE, GEU, and LEU.
5490 
5491    If the condition cannot be understood, or is an inequality floating-point
5492    comparison which needs to be reversed, 0 will be returned.
5493 
5494    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5495 
5496    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5497    insn used in locating the condition was found.  If a replacement test
5498    of the condition is desired, it should be placed in front of that
5499    insn and we will be sure that the inputs are still valid.
5500 
5501    If WANT_REG is nonzero, we wish the condition to be relative to that
5502    register, if possible.  Therefore, do not canonicalize the condition
5503    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5504    to be a compare to a CC mode register.
5505 
5506    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5507    and at INSN.  */
5508 
5509 rtx
5510 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5511 			rtx_insn **earliest,
5512 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5513 {
5514   enum rtx_code code;
5515   rtx_insn *prev = insn;
5516   const_rtx set;
5517   rtx tem;
5518   rtx op0, op1;
5519   int reverse_code = 0;
5520   machine_mode mode;
5521   basic_block bb = BLOCK_FOR_INSN (insn);
5522 
5523   code = GET_CODE (cond);
5524   mode = GET_MODE (cond);
5525   op0 = XEXP (cond, 0);
5526   op1 = XEXP (cond, 1);
5527 
5528   if (reverse)
5529     code = reversed_comparison_code (cond, insn);
5530   if (code == UNKNOWN)
5531     return 0;
5532 
5533   if (earliest)
5534     *earliest = insn;
5535 
5536   /* If we are comparing a register with zero, see if the register is set
5537      in the previous insn to a COMPARE or a comparison operation.  Perform
5538      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5539      in cse.c  */
5540 
5541   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5542 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5543 	 && op1 == CONST0_RTX (GET_MODE (op0))
5544 	 && op0 != want_reg)
5545     {
5546       /* Set nonzero when we find something of interest.  */
5547       rtx x = 0;
5548 
5549       /* If comparison with cc0, import actual comparison from compare
5550 	 insn.  */
5551       if (op0 == cc0_rtx)
5552 	{
5553 	  if ((prev = prev_nonnote_insn (prev)) == 0
5554 	      || !NONJUMP_INSN_P (prev)
5555 	      || (set = single_set (prev)) == 0
5556 	      || SET_DEST (set) != cc0_rtx)
5557 	    return 0;
5558 
5559 	  op0 = SET_SRC (set);
5560 	  op1 = CONST0_RTX (GET_MODE (op0));
5561 	  if (earliest)
5562 	    *earliest = prev;
5563 	}
5564 
5565       /* If this is a COMPARE, pick up the two things being compared.  */
5566       if (GET_CODE (op0) == COMPARE)
5567 	{
5568 	  op1 = XEXP (op0, 1);
5569 	  op0 = XEXP (op0, 0);
5570 	  continue;
5571 	}
5572       else if (!REG_P (op0))
5573 	break;
5574 
5575       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5576 	 stop if it isn't a single set or if it has a REG_INC note because
5577 	 we don't want to bother dealing with it.  */
5578 
5579       prev = prev_nonnote_nondebug_insn (prev);
5580 
5581       if (prev == 0
5582 	  || !NONJUMP_INSN_P (prev)
5583 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5584 	  /* In cfglayout mode, there do not have to be labels at the
5585 	     beginning of a block, or jumps at the end, so the previous
5586 	     conditions would not stop us when we reach bb boundary.  */
5587 	  || BLOCK_FOR_INSN (prev) != bb)
5588 	break;
5589 
5590       set = set_of (op0, prev);
5591 
5592       if (set
5593 	  && (GET_CODE (set) != SET
5594 	      || !rtx_equal_p (SET_DEST (set), op0)))
5595 	break;
5596 
5597       /* If this is setting OP0, get what it sets it to if it looks
5598 	 relevant.  */
5599       if (set)
5600 	{
5601 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5602 #ifdef FLOAT_STORE_FLAG_VALUE
5603 	  REAL_VALUE_TYPE fsfv;
5604 #endif
5605 
5606 	  /* ??? We may not combine comparisons done in a CCmode with
5607 	     comparisons not done in a CCmode.  This is to aid targets
5608 	     like Alpha that have an IEEE compliant EQ instruction, and
5609 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5610 	     actually artificial, simply to prevent the combination, but
5611 	     should not affect other platforms.
5612 
5613 	     However, we must allow VOIDmode comparisons to match either
5614 	     CCmode or non-CCmode comparison, because some ports have
5615 	     modeless comparisons inside branch patterns.
5616 
5617 	     ??? This mode check should perhaps look more like the mode check
5618 	     in simplify_comparison in combine.  */
5619 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5620 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5621 	      && mode != VOIDmode
5622 	      && inner_mode != VOIDmode)
5623 	    break;
5624 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5625 	      || (((code == NE
5626 		    || (code == LT
5627 			&& val_signbit_known_set_p (inner_mode,
5628 						    STORE_FLAG_VALUE))
5629 #ifdef FLOAT_STORE_FLAG_VALUE
5630 		    || (code == LT
5631 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5632 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5633 			    REAL_VALUE_NEGATIVE (fsfv)))
5634 #endif
5635 		    ))
5636 		  && COMPARISON_P (SET_SRC (set))))
5637 	    x = SET_SRC (set);
5638 	  else if (((code == EQ
5639 		     || (code == GE
5640 			 && val_signbit_known_set_p (inner_mode,
5641 						     STORE_FLAG_VALUE))
5642 #ifdef FLOAT_STORE_FLAG_VALUE
5643 		     || (code == GE
5644 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5645 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5646 			     REAL_VALUE_NEGATIVE (fsfv)))
5647 #endif
5648 		     ))
5649 		   && COMPARISON_P (SET_SRC (set)))
5650 	    {
5651 	      reverse_code = 1;
5652 	      x = SET_SRC (set);
5653 	    }
5654 	  else if ((code == EQ || code == NE)
5655 		   && GET_CODE (SET_SRC (set)) == XOR)
5656 	    /* Handle sequences like:
5657 
5658 	       (set op0 (xor X Y))
5659 	       ...(eq|ne op0 (const_int 0))...
5660 
5661 	       in which case:
5662 
5663 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5664 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5665 
5666 	       This is the form used by MIPS16, for example.  */
5667 	    x = SET_SRC (set);
5668 	  else
5669 	    break;
5670 	}
5671 
5672       else if (reg_set_p (op0, prev))
5673 	/* If this sets OP0, but not directly, we have to give up.  */
5674 	break;
5675 
5676       if (x)
5677 	{
5678 	  /* If the caller is expecting the condition to be valid at INSN,
5679 	     make sure X doesn't change before INSN.  */
5680 	  if (valid_at_insn_p)
5681 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5682 	      break;
5683 	  if (COMPARISON_P (x))
5684 	    code = GET_CODE (x);
5685 	  if (reverse_code)
5686 	    {
5687 	      code = reversed_comparison_code (x, prev);
5688 	      if (code == UNKNOWN)
5689 		return 0;
5690 	      reverse_code = 0;
5691 	    }
5692 
5693 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5694 	  if (earliest)
5695 	    *earliest = prev;
5696 	}
5697     }
5698 
5699   /* If constant is first, put it last.  */
5700   if (CONSTANT_P (op0))
5701     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5702 
5703   /* If OP0 is the result of a comparison, we weren't able to find what
5704      was really being compared, so fail.  */
5705   if (!allow_cc_mode
5706       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5707     return 0;
5708 
5709   /* Canonicalize any ordered comparison with integers involving equality
5710      if we can do computations in the relevant mode and we do not
5711      overflow.  */
5712 
5713   scalar_int_mode op0_mode;
5714   if (CONST_INT_P (op1)
5715       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5716       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5717     {
5718       HOST_WIDE_INT const_val = INTVAL (op1);
5719       unsigned HOST_WIDE_INT uconst_val = const_val;
5720       unsigned HOST_WIDE_INT max_val
5721 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5722 
5723       switch (code)
5724 	{
5725 	case LE:
5726 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5727 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5728 	  break;
5729 
5730 	/* When cross-compiling, const_val might be sign-extended from
5731 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5732 	case GE:
5733 	  if ((const_val & max_val)
5734 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5735 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5736 	  break;
5737 
5738 	case LEU:
5739 	  if (uconst_val < max_val)
5740 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5741 	  break;
5742 
5743 	case GEU:
5744 	  if (uconst_val != 0)
5745 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5746 	  break;
5747 
5748 	default:
5749 	  break;
5750 	}
5751     }
5752 
5753   /* Never return CC0; return zero instead.  */
5754   if (CC0_P (op0))
5755     return 0;
5756 
5757   /* We promised to return a comparison.  */
5758   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5759   if (COMPARISON_P (ret))
5760     return ret;
5761   return 0;
5762 }
5763 
5764 /* Given a jump insn JUMP, return the condition that will cause it to branch
5765    to its JUMP_LABEL.  If the condition cannot be understood, or is an
5766    inequality floating-point comparison which needs to be reversed, 0 will
5767    be returned.
5768 
5769    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5770    insn used in locating the condition was found.  If a replacement test
5771    of the condition is desired, it should be placed in front of that
5772    insn and we will be sure that the inputs are still valid.  If EARLIEST
5773    is null, the returned condition will be valid at INSN.
5774 
5775    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5776    compare CC mode register.
5777 
5778    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
5779 
5780 rtx
5781 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5782 	       int valid_at_insn_p)
5783 {
5784   rtx cond;
5785   int reverse;
5786   rtx set;
5787 
5788   /* If this is not a standard conditional jump, we can't parse it.  */
5789   if (!JUMP_P (jump)
5790       || ! any_condjump_p (jump))
5791     return 0;
5792   set = pc_set (jump);
5793 
5794   cond = XEXP (SET_SRC (set), 0);
5795 
5796   /* If this branches to JUMP_LABEL when the condition is false, reverse
5797      the condition.  */
5798   reverse
5799     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5800       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5801 
5802   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5803 				 allow_cc_mode, valid_at_insn_p);
5804 }
5805 
5806 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5807    TARGET_MODE_REP_EXTENDED.
5808 
5809    Note that we assume that the property of
5810    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5811    narrower than mode B.  I.e., if A is a mode narrower than B then in
5812    order to be able to operate on it in mode B, mode A needs to
5813    satisfy the requirements set by the representation of mode B.  */
5814 
5815 static void
5816 init_num_sign_bit_copies_in_rep (void)
5817 {
5818   opt_scalar_int_mode in_mode_iter;
5819   scalar_int_mode mode;
5820 
5821   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5822     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5823       {
5824 	scalar_int_mode in_mode = in_mode_iter.require ();
5825 	scalar_int_mode i;
5826 
5827 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5828 	   extends to the next widest mode.  */
5829 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5830 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5831 
5832 	/* We are in in_mode.  Count how many bits outside of mode
5833 	   have to be copies of the sign-bit.  */
5834 	FOR_EACH_MODE (i, mode, in_mode)
5835 	  {
5836 	    /* This must always exist (for the last iteration it will be
5837 	       IN_MODE).  */
5838 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5839 
5840 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5841 		/* We can only check sign-bit copies starting from the
5842 		   top-bit.  In order to be able to check the bits we
5843 		   have already seen we pretend that subsequent bits
5844 		   have to be sign-bit copies too.  */
5845 		|| num_sign_bit_copies_in_rep [in_mode][mode])
5846 	      num_sign_bit_copies_in_rep [in_mode][mode]
5847 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5848 	  }
5849       }
5850 }
5851 
5852 /* Suppose that truncation from the machine mode of X to MODE is not a
5853    no-op.  See if there is anything special about X so that we can
5854    assume it already contains a truncated value of MODE.  */
5855 
5856 bool
5857 truncated_to_mode (machine_mode mode, const_rtx x)
5858 {
5859   /* This register has already been used in MODE without explicit
5860      truncation.  */
5861   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5862     return true;
5863 
5864   /* See if we already satisfy the requirements of MODE.  If yes we
5865      can just switch to MODE.  */
5866   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5867       && (num_sign_bit_copies (x, GET_MODE (x))
5868 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5869     return true;
5870 
5871   return false;
5872 }
5873 
5874 /* Return true if RTX code CODE has a single sequence of zero or more
5875    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
5876    entry in that case.  */
5877 
5878 static bool
5879 setup_reg_subrtx_bounds (unsigned int code)
5880 {
5881   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5882   unsigned int i = 0;
5883   for (; format[i] != 'e'; ++i)
5884     {
5885       if (!format[i])
5886 	/* No subrtxes.  Leave start and count as 0.  */
5887 	return true;
5888       if (format[i] == 'E' || format[i] == 'V')
5889 	return false;
5890     }
5891 
5892   /* Record the sequence of 'e's.  */
5893   rtx_all_subrtx_bounds[code].start = i;
5894   do
5895     ++i;
5896   while (format[i] == 'e');
5897   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5898   /* rtl-iter.h relies on this.  */
5899   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5900 
5901   for (; format[i]; ++i)
5902     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5903       return false;
5904 
5905   return true;
5906 }
5907 
5908 /* Initialize rtx_all_subrtx_bounds.  */
5909 void
5910 init_rtlanal (void)
5911 {
5912   int i;
5913   for (i = 0; i < NUM_RTX_CODE; i++)
5914     {
5915       if (!setup_reg_subrtx_bounds (i))
5916 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5917       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5918 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5919     }
5920 
5921   init_num_sign_bit_copies_in_rep ();
5922 }
5923 
5924 /* Check whether this is a constant pool constant.  */
5925 bool
5926 constant_pool_constant_p (rtx x)
5927 {
5928   x = avoid_constant_pool_reference (x);
5929   return CONST_DOUBLE_P (x);
5930 }
5931 
5932 /* If M is a bitmask that selects a field of low-order bits within an item but
5933    not the entire word, return the length of the field.  Return -1 otherwise.
5934    M is used in machine mode MODE.  */
5935 
5936 int
5937 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5938 {
5939   if (mode != VOIDmode)
5940     {
5941       if (!HWI_COMPUTABLE_MODE_P (mode))
5942 	return -1;
5943       m &= GET_MODE_MASK (mode);
5944     }
5945 
5946   return exact_log2 (m + 1);
5947 }
5948 
5949 /* Return the mode of MEM's address.  */
5950 
5951 scalar_int_mode
5952 get_address_mode (rtx mem)
5953 {
5954   machine_mode mode;
5955 
5956   gcc_assert (MEM_P (mem));
5957   mode = GET_MODE (XEXP (mem, 0));
5958   if (mode != VOIDmode)
5959     return as_a <scalar_int_mode> (mode);
5960   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5961 }
5962 
5963 /* Split up a CONST_DOUBLE or integer constant rtx
5964    into two rtx's for single words,
5965    storing in *FIRST the word that comes first in memory in the target
5966    and in *SECOND the other.
5967 
5968    TODO: This function needs to be rewritten to work on any size
5969    integer.  */
5970 
5971 void
5972 split_double (rtx value, rtx *first, rtx *second)
5973 {
5974   if (CONST_INT_P (value))
5975     {
5976       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5977 	{
5978 	  /* In this case the CONST_INT holds both target words.
5979 	     Extract the bits from it into two word-sized pieces.
5980 	     Sign extend each half to HOST_WIDE_INT.  */
5981 	  unsigned HOST_WIDE_INT low, high;
5982 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5983 	  unsigned bits_per_word = BITS_PER_WORD;
5984 
5985 	  /* Set sign_bit to the most significant bit of a word.  */
5986 	  sign_bit = 1;
5987 	  sign_bit <<= bits_per_word - 1;
5988 
5989 	  /* Set mask so that all bits of the word are set.  We could
5990 	     have used 1 << BITS_PER_WORD instead of basing the
5991 	     calculation on sign_bit.  However, on machines where
5992 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5993 	     compiler warning, even though the code would never be
5994 	     executed.  */
5995 	  mask = sign_bit << 1;
5996 	  mask--;
5997 
5998 	  /* Set sign_extend as any remaining bits.  */
5999 	  sign_extend = ~mask;
6000 
6001 	  /* Pick the lower word and sign-extend it.  */
6002 	  low = INTVAL (value);
6003 	  low &= mask;
6004 	  if (low & sign_bit)
6005 	    low |= sign_extend;
6006 
6007 	  /* Pick the higher word, shifted to the least significant
6008 	     bits, and sign-extend it.  */
6009 	  high = INTVAL (value);
6010 	  high >>= bits_per_word - 1;
6011 	  high >>= 1;
6012 	  high &= mask;
6013 	  if (high & sign_bit)
6014 	    high |= sign_extend;
6015 
6016 	  /* Store the words in the target machine order.  */
6017 	  if (WORDS_BIG_ENDIAN)
6018 	    {
6019 	      *first = GEN_INT (high);
6020 	      *second = GEN_INT (low);
6021 	    }
6022 	  else
6023 	    {
6024 	      *first = GEN_INT (low);
6025 	      *second = GEN_INT (high);
6026 	    }
6027 	}
6028       else
6029 	{
6030 	  /* The rule for using CONST_INT for a wider mode
6031 	     is that we regard the value as signed.
6032 	     So sign-extend it.  */
6033 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6034 	  if (WORDS_BIG_ENDIAN)
6035 	    {
6036 	      *first = high;
6037 	      *second = value;
6038 	    }
6039 	  else
6040 	    {
6041 	      *first = value;
6042 	      *second = high;
6043 	    }
6044 	}
6045     }
6046   else if (GET_CODE (value) == CONST_WIDE_INT)
6047     {
6048       /* All of this is scary code and needs to be converted to
6049 	 properly work with any size integer.  */
6050       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6051       if (WORDS_BIG_ENDIAN)
6052 	{
6053 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6054 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6055 	}
6056       else
6057 	{
6058 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6059 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6060 	}
6061     }
6062   else if (!CONST_DOUBLE_P (value))
6063     {
6064       if (WORDS_BIG_ENDIAN)
6065 	{
6066 	  *first = const0_rtx;
6067 	  *second = value;
6068 	}
6069       else
6070 	{
6071 	  *first = value;
6072 	  *second = const0_rtx;
6073 	}
6074     }
6075   else if (GET_MODE (value) == VOIDmode
6076 	   /* This is the old way we did CONST_DOUBLE integers.  */
6077 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6078     {
6079       /* In an integer, the words are defined as most and least significant.
6080 	 So order them by the target's convention.  */
6081       if (WORDS_BIG_ENDIAN)
6082 	{
6083 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6084 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6085 	}
6086       else
6087 	{
6088 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6089 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6090 	}
6091     }
6092   else
6093     {
6094       long l[2];
6095 
6096       /* Note, this converts the REAL_VALUE_TYPE to the target's
6097 	 format, splits up the floating point double and outputs
6098 	 exactly 32 bits of it into each of l[0] and l[1] --
6099 	 not necessarily BITS_PER_WORD bits.  */
6100       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6101 
6102       /* If 32 bits is an entire word for the target, but not for the host,
6103 	 then sign-extend on the host so that the number will look the same
6104 	 way on the host that it would on the target.  See for instance
6105 	 simplify_unary_operation.  The #if is needed to avoid compiler
6106 	 warnings.  */
6107 
6108 #if HOST_BITS_PER_LONG > 32
6109       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6110 	{
6111 	  if (l[0] & ((long) 1 << 31))
6112 	    l[0] |= ((unsigned long) (-1) << 32);
6113 	  if (l[1] & ((long) 1 << 31))
6114 	    l[1] |= ((unsigned long) (-1) << 32);
6115 	}
6116 #endif
6117 
6118       *first = GEN_INT (l[0]);
6119       *second = GEN_INT (l[1]);
6120     }
6121 }
6122 
6123 /* Return true if X is a sign_extract or zero_extract from the least
6124    significant bit.  */
6125 
6126 static bool
6127 lsb_bitfield_op_p (rtx x)
6128 {
6129   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6130     {
6131       machine_mode mode = GET_MODE (XEXP (x, 0));
6132       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6133       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6134       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6135 
6136       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6137     }
6138   return false;
6139 }
6140 
6141 /* Strip outer address "mutations" from LOC and return a pointer to the
6142    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6143    stripped expression there.
6144 
6145    "Mutations" either convert between modes or apply some kind of
6146    extension, truncation or alignment.  */
6147 
6148 rtx *
6149 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6150 {
6151   for (;;)
6152     {
6153       enum rtx_code code = GET_CODE (*loc);
6154       if (GET_RTX_CLASS (code) == RTX_UNARY)
6155 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6156 	   used to convert between pointer sizes.  */
6157 	loc = &XEXP (*loc, 0);
6158       else if (lsb_bitfield_op_p (*loc))
6159 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6160 	   acts as a combined truncation and extension.  */
6161 	loc = &XEXP (*loc, 0);
6162       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6163 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6164 	loc = &XEXP (*loc, 0);
6165       else if (code == SUBREG
6166                && !OBJECT_P (SUBREG_REG (*loc))
6167                && subreg_lowpart_p (*loc))
6168 	/* (subreg (operator ...) ...) inside and is used for mode
6169 	   conversion too.  */
6170 	loc = &SUBREG_REG (*loc);
6171       else
6172 	return loc;
6173       if (outer_code)
6174 	*outer_code = code;
6175     }
6176 }
6177 
6178 /* Return true if CODE applies some kind of scale.  The scaled value is
6179    is the first operand and the scale is the second.  */
6180 
6181 static bool
6182 binary_scale_code_p (enum rtx_code code)
6183 {
6184   return (code == MULT
6185           || code == ASHIFT
6186           /* Needed by ARM targets.  */
6187           || code == ASHIFTRT
6188           || code == LSHIFTRT
6189           || code == ROTATE
6190           || code == ROTATERT);
6191 }
6192 
6193 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6194    (see address_info).  Return null otherwise.  */
6195 
6196 static rtx *
6197 get_base_term (rtx *inner)
6198 {
6199   if (GET_CODE (*inner) == LO_SUM)
6200     inner = strip_address_mutations (&XEXP (*inner, 0));
6201   if (REG_P (*inner)
6202       || MEM_P (*inner)
6203       || GET_CODE (*inner) == SUBREG
6204       || GET_CODE (*inner) == SCRATCH)
6205     return inner;
6206   return 0;
6207 }
6208 
6209 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6210    (see address_info).  Return null otherwise.  */
6211 
6212 static rtx *
6213 get_index_term (rtx *inner)
6214 {
6215   /* At present, only constant scales are allowed.  */
6216   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6217     inner = strip_address_mutations (&XEXP (*inner, 0));
6218   if (REG_P (*inner)
6219       || MEM_P (*inner)
6220       || GET_CODE (*inner) == SUBREG
6221       || GET_CODE (*inner) == SCRATCH)
6222     return inner;
6223   return 0;
6224 }
6225 
6226 /* Set the segment part of address INFO to LOC, given that INNER is the
6227    unmutated value.  */
6228 
6229 static void
6230 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6231 {
6232   gcc_assert (!info->segment);
6233   info->segment = loc;
6234   info->segment_term = inner;
6235 }
6236 
6237 /* Set the base part of address INFO to LOC, given that INNER is the
6238    unmutated value.  */
6239 
6240 static void
6241 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6242 {
6243   gcc_assert (!info->base);
6244   info->base = loc;
6245   info->base_term = inner;
6246 }
6247 
6248 /* Set the index part of address INFO to LOC, given that INNER is the
6249    unmutated value.  */
6250 
6251 static void
6252 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6253 {
6254   gcc_assert (!info->index);
6255   info->index = loc;
6256   info->index_term = inner;
6257 }
6258 
6259 /* Set the displacement part of address INFO to LOC, given that INNER
6260    is the constant term.  */
6261 
6262 static void
6263 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6264 {
6265   gcc_assert (!info->disp);
6266   info->disp = loc;
6267   info->disp_term = inner;
6268 }
6269 
6270 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6271    rest of INFO accordingly.  */
6272 
6273 static void
6274 decompose_incdec_address (struct address_info *info)
6275 {
6276   info->autoinc_p = true;
6277 
6278   rtx *base = &XEXP (*info->inner, 0);
6279   set_address_base (info, base, base);
6280   gcc_checking_assert (info->base == info->base_term);
6281 
6282   /* These addresses are only valid when the size of the addressed
6283      value is known.  */
6284   gcc_checking_assert (info->mode != VOIDmode);
6285 }
6286 
6287 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6288    of INFO accordingly.  */
6289 
6290 static void
6291 decompose_automod_address (struct address_info *info)
6292 {
6293   info->autoinc_p = true;
6294 
6295   rtx *base = &XEXP (*info->inner, 0);
6296   set_address_base (info, base, base);
6297   gcc_checking_assert (info->base == info->base_term);
6298 
6299   rtx plus = XEXP (*info->inner, 1);
6300   gcc_assert (GET_CODE (plus) == PLUS);
6301 
6302   info->base_term2 = &XEXP (plus, 0);
6303   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6304 
6305   rtx *step = &XEXP (plus, 1);
6306   rtx *inner_step = strip_address_mutations (step);
6307   if (CONSTANT_P (*inner_step))
6308     set_address_disp (info, step, inner_step);
6309   else
6310     set_address_index (info, step, inner_step);
6311 }
6312 
6313 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6314    values in [PTR, END).  Return a pointer to the end of the used array.  */
6315 
6316 static rtx **
6317 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6318 {
6319   rtx x = *loc;
6320   if (GET_CODE (x) == PLUS)
6321     {
6322       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6323       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6324     }
6325   else
6326     {
6327       gcc_assert (ptr != end);
6328       *ptr++ = loc;
6329     }
6330   return ptr;
6331 }
6332 
6333 /* Evaluate the likelihood of X being a base or index value, returning
6334    positive if it is likely to be a base, negative if it is likely to be
6335    an index, and 0 if we can't tell.  Make the magnitude of the return
6336    value reflect the amount of confidence we have in the answer.
6337 
6338    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6339 
6340 static int
6341 baseness (rtx x, machine_mode mode, addr_space_t as,
6342 	  enum rtx_code outer_code, enum rtx_code index_code)
6343 {
6344   /* Believe *_POINTER unless the address shape requires otherwise.  */
6345   if (REG_P (x) && REG_POINTER (x))
6346     return 2;
6347   if (MEM_P (x) && MEM_POINTER (x))
6348     return 2;
6349 
6350   if (REG_P (x) && HARD_REGISTER_P (x))
6351     {
6352       /* X is a hard register.  If it only fits one of the base
6353 	 or index classes, choose that interpretation.  */
6354       int regno = REGNO (x);
6355       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6356       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6357       if (base_p != index_p)
6358 	return base_p ? 1 : -1;
6359     }
6360   return 0;
6361 }
6362 
6363 /* INFO->INNER describes a normal, non-automodified address.
6364    Fill in the rest of INFO accordingly.  */
6365 
6366 static void
6367 decompose_normal_address (struct address_info *info)
6368 {
6369   /* Treat the address as the sum of up to four values.  */
6370   rtx *ops[4];
6371   size_t n_ops = extract_plus_operands (info->inner, ops,
6372 					ops + ARRAY_SIZE (ops)) - ops;
6373 
6374   /* If there is more than one component, any base component is in a PLUS.  */
6375   if (n_ops > 1)
6376     info->base_outer_code = PLUS;
6377 
6378   /* Try to classify each sum operand now.  Leave those that could be
6379      either a base or an index in OPS.  */
6380   rtx *inner_ops[4];
6381   size_t out = 0;
6382   for (size_t in = 0; in < n_ops; ++in)
6383     {
6384       rtx *loc = ops[in];
6385       rtx *inner = strip_address_mutations (loc);
6386       if (CONSTANT_P (*inner))
6387 	set_address_disp (info, loc, inner);
6388       else if (GET_CODE (*inner) == UNSPEC)
6389 	set_address_segment (info, loc, inner);
6390       else
6391 	{
6392 	  /* The only other possibilities are a base or an index.  */
6393 	  rtx *base_term = get_base_term (inner);
6394 	  rtx *index_term = get_index_term (inner);
6395 	  gcc_assert (base_term || index_term);
6396 	  if (!base_term)
6397 	    set_address_index (info, loc, index_term);
6398 	  else if (!index_term)
6399 	    set_address_base (info, loc, base_term);
6400 	  else
6401 	    {
6402 	      gcc_assert (base_term == index_term);
6403 	      ops[out] = loc;
6404 	      inner_ops[out] = base_term;
6405 	      ++out;
6406 	    }
6407 	}
6408     }
6409 
6410   /* Classify the remaining OPS members as bases and indexes.  */
6411   if (out == 1)
6412     {
6413       /* If we haven't seen a base or an index yet, assume that this is
6414 	 the base.  If we were confident that another term was the base
6415 	 or index, treat the remaining operand as the other kind.  */
6416       if (!info->base)
6417 	set_address_base (info, ops[0], inner_ops[0]);
6418       else
6419 	set_address_index (info, ops[0], inner_ops[0]);
6420     }
6421   else if (out == 2)
6422     {
6423       /* In the event of a tie, assume the base comes first.  */
6424       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6425 		    GET_CODE (*ops[1]))
6426 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6427 		       GET_CODE (*ops[0])))
6428 	{
6429 	  set_address_base (info, ops[0], inner_ops[0]);
6430 	  set_address_index (info, ops[1], inner_ops[1]);
6431 	}
6432       else
6433 	{
6434 	  set_address_base (info, ops[1], inner_ops[1]);
6435 	  set_address_index (info, ops[0], inner_ops[0]);
6436 	}
6437     }
6438   else
6439     gcc_assert (out == 0);
6440 }
6441 
6442 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6443    or VOIDmode if not known.  AS is the address space associated with LOC.
6444    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6445 
6446 void
6447 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6448 		   addr_space_t as, enum rtx_code outer_code)
6449 {
6450   memset (info, 0, sizeof (*info));
6451   info->mode = mode;
6452   info->as = as;
6453   info->addr_outer_code = outer_code;
6454   info->outer = loc;
6455   info->inner = strip_address_mutations (loc, &outer_code);
6456   info->base_outer_code = outer_code;
6457   switch (GET_CODE (*info->inner))
6458     {
6459     case PRE_DEC:
6460     case PRE_INC:
6461     case POST_DEC:
6462     case POST_INC:
6463       decompose_incdec_address (info);
6464       break;
6465 
6466     case PRE_MODIFY:
6467     case POST_MODIFY:
6468       decompose_automod_address (info);
6469       break;
6470 
6471     default:
6472       decompose_normal_address (info);
6473       break;
6474     }
6475 }
6476 
6477 /* Describe address operand LOC in INFO.  */
6478 
6479 void
6480 decompose_lea_address (struct address_info *info, rtx *loc)
6481 {
6482   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6483 }
6484 
6485 /* Describe the address of MEM X in INFO.  */
6486 
6487 void
6488 decompose_mem_address (struct address_info *info, rtx x)
6489 {
6490   gcc_assert (MEM_P (x));
6491   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6492 		     MEM_ADDR_SPACE (x), MEM);
6493 }
6494 
6495 /* Update INFO after a change to the address it describes.  */
6496 
6497 void
6498 update_address (struct address_info *info)
6499 {
6500   decompose_address (info, info->outer, info->mode, info->as,
6501 		     info->addr_outer_code);
6502 }
6503 
6504 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6505    more complicated than that.  */
6506 
6507 HOST_WIDE_INT
6508 get_index_scale (const struct address_info *info)
6509 {
6510   rtx index = *info->index;
6511   if (GET_CODE (index) == MULT
6512       && CONST_INT_P (XEXP (index, 1))
6513       && info->index_term == &XEXP (index, 0))
6514     return INTVAL (XEXP (index, 1));
6515 
6516   if (GET_CODE (index) == ASHIFT
6517       && CONST_INT_P (XEXP (index, 1))
6518       && info->index_term == &XEXP (index, 0))
6519     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6520 
6521   if (info->index == info->index_term)
6522     return 1;
6523 
6524   return 0;
6525 }
6526 
6527 /* Return the "index code" of INFO, in the form required by
6528    ok_for_base_p_1.  */
6529 
6530 enum rtx_code
6531 get_index_code (const struct address_info *info)
6532 {
6533   if (info->index)
6534     return GET_CODE (*info->index);
6535 
6536   if (info->disp)
6537     return GET_CODE (*info->disp);
6538 
6539   return SCRATCH;
6540 }
6541 
6542 /* Return true if RTL X contains a SYMBOL_REF.  */
6543 
6544 bool
6545 contains_symbol_ref_p (const_rtx x)
6546 {
6547   subrtx_iterator::array_type array;
6548   FOR_EACH_SUBRTX (iter, array, x, ALL)
6549     if (SYMBOL_REF_P (*iter))
6550       return true;
6551 
6552   return false;
6553 }
6554 
6555 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6556 
6557 bool
6558 contains_symbolic_reference_p (const_rtx x)
6559 {
6560   subrtx_iterator::array_type array;
6561   FOR_EACH_SUBRTX (iter, array, x, ALL)
6562     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6563       return true;
6564 
6565   return false;
6566 }
6567 
6568 /* Return true if RTL X contains a constant pool address.  */
6569 
6570 bool
6571 contains_constant_pool_address_p (const_rtx x)
6572 {
6573   subrtx_iterator::array_type array;
6574   FOR_EACH_SUBRTX (iter, array, x, ALL)
6575     if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6576       return true;
6577 
6578   return false;
6579 }
6580 
6581 
6582 /* Return true if X contains a thread-local symbol.  */
6583 
6584 bool
6585 tls_referenced_p (const_rtx x)
6586 {
6587   if (!targetm.have_tls)
6588     return false;
6589 
6590   subrtx_iterator::array_type array;
6591   FOR_EACH_SUBRTX (iter, array, x, ALL)
6592     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6593       return true;
6594   return false;
6595 }
6596