xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/rtlanal.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45 
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
47                                                    const_rtx, machine_mode,
48                                                    unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
50 					     const_rtx, machine_mode,
51                                              unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
53                                                 machine_mode,
54                                                 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
56                                           machine_mode, unsigned int);
57 
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
60 
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63    SIGN_EXTEND then while narrowing we also have to enforce the
64    representation and sign-extend the value to mode DESTINATION_REP.
65 
66    If the value is already sign-extended to DESTINATION_REP mode we
67    can just switch to DESTINATION mode on it.  For each pair of
68    integral modes SOURCE and DESTINATION, when truncating from SOURCE
69    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70    contains the number of high-order bits in SOURCE that have to be
71    copies of the sign-bit so that we can do this mode-switch to
72    DESTINATION.  */
73 
74 static unsigned int
75 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
76 
77 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
78    elements.  Return the new base of ARRAY.  */
79 
80 template <typename T>
81 typename T::value_type *
82 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
83 						  value_type *base,
84 						  size_t i, value_type x)
85 {
86   if (base == array.stack)
87     {
88       if (i < LOCAL_ELEMS)
89 	{
90 	  base[i] = x;
91 	  return base;
92 	}
93       gcc_checking_assert (i == LOCAL_ELEMS);
94       /* A previous iteration might also have moved from the stack to the
95 	 heap, in which case the heap array will already be big enough.  */
96       if (vec_safe_length (array.heap) <= i)
97 	vec_safe_grow (array.heap, i + 1);
98       base = array.heap->address ();
99       memcpy (base, array.stack, sizeof (array.stack));
100       base[LOCAL_ELEMS] = x;
101       return base;
102     }
103   unsigned int length = array.heap->length ();
104   if (length > i)
105     {
106       gcc_checking_assert (base == array.heap->address ());
107       base[i] = x;
108       return base;
109     }
110   else
111     {
112       gcc_checking_assert (i == length);
113       vec_safe_push (array.heap, x);
114       return array.heap->address ();
115     }
116 }
117 
118 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
119    number of elements added to the worklist.  */
120 
121 template <typename T>
122 size_t
123 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
124 						    value_type *base,
125 						    size_t end, rtx_type x)
126 {
127   enum rtx_code code = GET_CODE (x);
128   const char *format = GET_RTX_FORMAT (code);
129   size_t orig_end = end;
130   if (__builtin_expect (INSN_P (x), false))
131     {
132       /* Put the pattern at the top of the queue, since that's what
133 	 we're likely to want most.  It also allows for the SEQUENCE
134 	 code below.  */
135       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
136 	if (format[i] == 'e')
137 	  {
138 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
139 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
140 	      base[end++] = subx;
141 	    else
142 	      base = add_single_to_queue (array, base, end++, subx);
143 	  }
144     }
145   else
146     for (int i = 0; format[i]; ++i)
147       if (format[i] == 'e')
148 	{
149 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
150 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
151 	    base[end++] = subx;
152 	  else
153 	    base = add_single_to_queue (array, base, end++, subx);
154 	}
155       else if (format[i] == 'E')
156 	{
157 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
158 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
159 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
160 	    for (unsigned int j = 0; j < length; j++)
161 	      base[end++] = T::get_value (vec[j]);
162 	  else
163 	    for (unsigned int j = 0; j < length; j++)
164 	      base = add_single_to_queue (array, base, end++,
165 					  T::get_value (vec[j]));
166 	  if (code == SEQUENCE && end == length)
167 	    /* If the subrtxes of the sequence fill the entire array then
168 	       we know that no other parts of a containing insn are queued.
169 	       The caller is therefore iterating over the sequence as a
170 	       PATTERN (...), so we also want the patterns of the
171 	       subinstructions.  */
172 	    for (unsigned int j = 0; j < length; j++)
173 	      {
174 		typename T::rtx_type x = T::get_rtx (base[j]);
175 		if (INSN_P (x))
176 		  base[j] = T::get_value (PATTERN (x));
177 	      }
178 	}
179   return end - orig_end;
180 }
181 
182 template <typename T>
183 void
184 generic_subrtx_iterator <T>::free_array (array_type &array)
185 {
186   vec_free (array.heap);
187 }
188 
189 template <typename T>
190 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
191 
192 template class generic_subrtx_iterator <const_rtx_accessor>;
193 template class generic_subrtx_iterator <rtx_var_accessor>;
194 template class generic_subrtx_iterator <rtx_ptr_accessor>;
195 
196 /* Return 1 if the value of X is unstable
197    (would be different at a different point in the program).
198    The frame pointer, arg pointer, etc. are considered stable
199    (within one function) and so is anything marked `unchanging'.  */
200 
201 int
202 rtx_unstable_p (const_rtx x)
203 {
204   const RTX_CODE code = GET_CODE (x);
205   int i;
206   const char *fmt;
207 
208   switch (code)
209     {
210     case MEM:
211       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
212 
213     case CONST:
214     CASE_CONST_ANY:
215     case SYMBOL_REF:
216     case LABEL_REF:
217       return 0;
218 
219     case REG:
220       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
221       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
222 	  /* The arg pointer varies if it is not a fixed register.  */
223 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
224 	return 0;
225       /* ??? When call-clobbered, the value is stable modulo the restore
226 	 that must happen after a call.  This currently screws up local-alloc
227 	 into believing that the restore is not needed.  */
228       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
229 	return 0;
230       return 1;
231 
232     case ASM_OPERANDS:
233       if (MEM_VOLATILE_P (x))
234 	return 1;
235 
236       /* Fall through.  */
237 
238     default:
239       break;
240     }
241 
242   fmt = GET_RTX_FORMAT (code);
243   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244     if (fmt[i] == 'e')
245       {
246 	if (rtx_unstable_p (XEXP (x, i)))
247 	  return 1;
248       }
249     else if (fmt[i] == 'E')
250       {
251 	int j;
252 	for (j = 0; j < XVECLEN (x, i); j++)
253 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
254 	    return 1;
255       }
256 
257   return 0;
258 }
259 
260 /* Return 1 if X has a value that can vary even between two
261    executions of the program.  0 means X can be compared reliably
262    against certain constants or near-constants.
263    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264    zero, we are slightly more conservative.
265    The frame pointer and the arg pointer are considered constant.  */
266 
267 bool
268 rtx_varies_p (const_rtx x, bool for_alias)
269 {
270   RTX_CODE code;
271   int i;
272   const char *fmt;
273 
274   if (!x)
275     return 0;
276 
277   code = GET_CODE (x);
278   switch (code)
279     {
280     case MEM:
281       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
282 
283     case CONST:
284     CASE_CONST_ANY:
285     case SYMBOL_REF:
286     case LABEL_REF:
287       return 0;
288 
289     case REG:
290       /* Note that we have to test for the actual rtx used for the frame
291 	 and arg pointers and not just the register number in case we have
292 	 eliminated the frame and/or arg pointer and are using it
293 	 for pseudos.  */
294       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
295 	  /* The arg pointer varies if it is not a fixed register.  */
296 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
297 	return 0;
298       if (x == pic_offset_table_rtx
299 	  /* ??? When call-clobbered, the value is stable modulo the restore
300 	     that must happen after a call.  This currently screws up
301 	     local-alloc into believing that the restore is not needed, so we
302 	     must return 0 only if we are called from alias analysis.  */
303 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
304 	return 0;
305       return 1;
306 
307     case LO_SUM:
308       /* The operand 0 of a LO_SUM is considered constant
309 	 (in fact it is related specifically to operand 1)
310 	 during alias analysis.  */
311       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
312 	     || rtx_varies_p (XEXP (x, 1), for_alias);
313 
314     case ASM_OPERANDS:
315       if (MEM_VOLATILE_P (x))
316 	return 1;
317 
318       /* Fall through.  */
319 
320     default:
321       break;
322     }
323 
324   fmt = GET_RTX_FORMAT (code);
325   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
326     if (fmt[i] == 'e')
327       {
328 	if (rtx_varies_p (XEXP (x, i), for_alias))
329 	  return 1;
330       }
331     else if (fmt[i] == 'E')
332       {
333 	int j;
334 	for (j = 0; j < XVECLEN (x, i); j++)
335 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
336 	    return 1;
337       }
338 
339   return 0;
340 }
341 
342 /* Compute an approximation for the offset between the register
343    FROM and TO for the current function, as it was at the start
344    of the routine.  */
345 
346 static HOST_WIDE_INT
347 get_initial_register_offset (int from, int to)
348 {
349   static const struct elim_table_t
350   {
351     const int from;
352     const int to;
353   } table[] = ELIMINABLE_REGS;
354   HOST_WIDE_INT offset1, offset2;
355   unsigned int i, j;
356 
357   if (to == from)
358     return 0;
359 
360   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
361      is completed, but we need to give at least an estimate for the stack
362      pointer based on the frame size.  */
363   if (!epilogue_completed)
364     {
365       offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367       offset1 = - offset1;
368 #endif
369       if (to == STACK_POINTER_REGNUM)
370 	return offset1;
371       else if (from == STACK_POINTER_REGNUM)
372 	return - offset1;
373       else
374 	return 0;
375      }
376 
377   for (i = 0; i < ARRAY_SIZE (table); i++)
378       if (table[i].from == from)
379 	{
380 	  if (table[i].to == to)
381 	    {
382 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 					  offset1);
384 	      return offset1;
385 	    }
386 	  for (j = 0; j < ARRAY_SIZE (table); j++)
387 	    {
388 	      if (table[j].to == to
389 		  && table[j].from == table[i].to)
390 		{
391 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 					      offset1);
393 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 					      offset2);
395 		  return offset1 + offset2;
396 		}
397 	      if (table[j].from == to
398 		  && table[j].to == table[i].to)
399 		{
400 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 					      offset1);
402 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 					      offset2);
404 		  return offset1 - offset2;
405 		}
406 	    }
407 	}
408       else if (table[i].to == from)
409 	{
410 	  if (table[i].from == to)
411 	    {
412 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 					  offset1);
414 	      return - offset1;
415 	    }
416 	  for (j = 0; j < ARRAY_SIZE (table); j++)
417 	    {
418 	      if (table[j].to == to
419 		  && table[j].from == table[i].from)
420 		{
421 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 					      offset1);
423 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 					      offset2);
425 		  return - offset1 + offset2;
426 		}
427 	      if (table[j].from == to
428 		  && table[j].to == table[i].from)
429 		{
430 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 					      offset1);
432 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 					      offset2);
434 		  return - offset1 - offset2;
435 		}
436 	    }
437 	}
438 
439   /* If the requested register combination was not found,
440      try a different more simple combination.  */
441   if (from == ARG_POINTER_REGNUM)
442     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443   else if (to == ARG_POINTER_REGNUM)
444     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445   else if (from == HARD_FRAME_POINTER_REGNUM)
446     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447   else if (to == HARD_FRAME_POINTER_REGNUM)
448     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449   else
450     return 0;
451 }
452 
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
455    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456    references on strict alignment machines.  */
457 
458 static int
459 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
460 		       machine_mode mode, bool unaligned_mems)
461 {
462   enum rtx_code code = GET_CODE (x);
463 
464   /* The offset must be a multiple of the mode size if we are considering
465      unaligned memory references on strict alignment machines.  */
466   if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
467     {
468       HOST_WIDE_INT actual_offset = offset;
469 
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 	     the real alignment of %sp.  However, when it does this, the
473 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
474       if (SPARC_STACK_BOUNDARY_HACK
475 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
476 	actual_offset -= STACK_POINTER_OFFSET;
477 #endif
478 
479       if (actual_offset % GET_MODE_SIZE (mode) != 0)
480 	return 1;
481     }
482 
483   switch (code)
484     {
485     case SYMBOL_REF:
486       if (SYMBOL_REF_WEAK (x))
487 	return 1;
488       if (!CONSTANT_POOL_ADDRESS_P (x))
489 	{
490 	  tree decl;
491 	  HOST_WIDE_INT decl_size;
492 
493 	  if (offset < 0)
494 	    return 1;
495 	  if (size == 0)
496 	    size = GET_MODE_SIZE (mode);
497 	  if (size == 0)
498 	    return offset != 0;
499 
500 	  /* If the size of the access or of the symbol is unknown,
501 	     assume the worst.  */
502 	  decl = SYMBOL_REF_DECL (x);
503 
504 	  /* Else check that the access is in bounds.  TODO: restructure
505 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
506 	  if (!decl)
507 	    decl_size = -1;
508 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 	    decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
510 			 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
511 			 : -1);
512 	  else if (TREE_CODE (decl) == STRING_CST)
513 	    decl_size = TREE_STRING_LENGTH (decl);
514 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
515 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
516 	  else
517 	    decl_size = -1;
518 
519 	  return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
520         }
521 
522       return 0;
523 
524     case LABEL_REF:
525       return 0;
526 
527     case REG:
528       /* Stack references are assumed not to trap, but we need to deal with
529 	 nonsensical offsets.  */
530       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
531 	 || x == stack_pointer_rtx
532 	 /* The arg pointer varies if it is not a fixed register.  */
533 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
534 	{
535 #ifdef RED_ZONE_SIZE
536 	  HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
537 #else
538 	  HOST_WIDE_INT red_zone_size = 0;
539 #endif
540 	  HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
541 					 / BITS_PER_UNIT;
542 	  HOST_WIDE_INT low_bound, high_bound;
543 
544 	  if (size == 0)
545 	    size = GET_MODE_SIZE (mode);
546 	  if (size == 0)
547 	    return 1;
548 
549 	  if (x == frame_pointer_rtx)
550 	    {
551 	      if (FRAME_GROWS_DOWNWARD)
552 		{
553 		  high_bound = STARTING_FRAME_OFFSET;
554 		  low_bound  = high_bound - get_frame_size ();
555 		}
556 	      else
557 		{
558 		  low_bound  = STARTING_FRAME_OFFSET;
559 		  high_bound = low_bound + get_frame_size ();
560 		}
561 	    }
562 	  else if (x == hard_frame_pointer_rtx)
563 	    {
564 	      HOST_WIDE_INT sp_offset
565 		= get_initial_register_offset (STACK_POINTER_REGNUM,
566 					       HARD_FRAME_POINTER_REGNUM);
567 	      HOST_WIDE_INT ap_offset
568 		= get_initial_register_offset (ARG_POINTER_REGNUM,
569 					       HARD_FRAME_POINTER_REGNUM);
570 
571 #if STACK_GROWS_DOWNWARD
572 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
573 	      high_bound = ap_offset
574 			   + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 			   + crtl->args.size
577 #endif
578 			   + stack_boundary;
579 #else
580 	      high_bound = sp_offset + red_zone_size + stack_boundary;
581 	      low_bound  = ap_offset
582 			   + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 			   - crtl->args.size
585 #endif
586 			   - stack_boundary;
587 #endif
588 	    }
589 	  else if (x == stack_pointer_rtx)
590 	    {
591 	      HOST_WIDE_INT ap_offset
592 		= get_initial_register_offset (ARG_POINTER_REGNUM,
593 					       STACK_POINTER_REGNUM);
594 
595 #if STACK_GROWS_DOWNWARD
596 	      low_bound  = - red_zone_size - stack_boundary;
597 	      high_bound = ap_offset
598 			   + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 			   + crtl->args.size
601 #endif
602 			   + stack_boundary;
603 #else
604 	      high_bound = red_zone_size + stack_boundary;
605 	      low_bound  = ap_offset
606 			   + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 			   - crtl->args.size
609 #endif
610 			   - stack_boundary;
611 #endif
612 	    }
613 	  else
614 	    {
615 	      /* We assume that accesses are safe to at least the
616 		 next stack boundary.
617 		 Examples are varargs and __builtin_return_address.  */
618 #if ARGS_GROW_DOWNWARD
619 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 			   + stack_boundary;
621 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
622 			   - crtl->args.size - stack_boundary;
623 #else
624 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
625 			   - stack_boundary;
626 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 			   + crtl->args.size + stack_boundary;
628 #endif
629 	    }
630 
631 	  if (offset >= low_bound && offset <= high_bound - size)
632 	    return 0;
633 	  return 1;
634 	}
635       /* All of the virtual frame registers are stack references.  */
636       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
637 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
638 	return 0;
639       return 1;
640 
641     case CONST:
642       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
643 				    mode, unaligned_mems);
644 
645     case PLUS:
646       /* An address is assumed not to trap if:
647          - it is the pic register plus a constant.  */
648       if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
649 	return 0;
650 
651       /* - or it is an address that can't trap plus a constant integer.  */
652       if (CONST_INT_P (XEXP (x, 1))
653 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
654 				     size, mode, unaligned_mems))
655 	return 0;
656 
657       return 1;
658 
659     case LO_SUM:
660     case PRE_MODIFY:
661       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
662 				    mode, unaligned_mems);
663 
664     case PRE_DEC:
665     case PRE_INC:
666     case POST_DEC:
667     case POST_INC:
668     case POST_MODIFY:
669       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
670 				    mode, unaligned_mems);
671 
672     default:
673       break;
674     }
675 
676   /* If it isn't one of the case above, it can cause a trap.  */
677   return 1;
678 }
679 
680 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
681 
682 int
683 rtx_addr_can_trap_p (const_rtx x)
684 {
685   return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
686 }
687 
688 /* Return true if X contains a MEM subrtx.  */
689 
690 bool
691 contains_mem_rtx_p (rtx x)
692 {
693   subrtx_iterator::array_type array;
694   FOR_EACH_SUBRTX (iter, array, x, ALL)
695     if (MEM_P (*iter))
696       return true;
697 
698   return false;
699 }
700 
701 /* Return true if X is an address that is known to not be zero.  */
702 
703 bool
704 nonzero_address_p (const_rtx x)
705 {
706   const enum rtx_code code = GET_CODE (x);
707 
708   switch (code)
709     {
710     case SYMBOL_REF:
711       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
712 
713     case LABEL_REF:
714       return true;
715 
716     case REG:
717       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
718       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
719 	  || x == stack_pointer_rtx
720 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
721 	return true;
722       /* All of the virtual frame registers are stack references.  */
723       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
724 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
725 	return true;
726       return false;
727 
728     case CONST:
729       return nonzero_address_p (XEXP (x, 0));
730 
731     case PLUS:
732       /* Handle PIC references.  */
733       if (XEXP (x, 0) == pic_offset_table_rtx
734 	       && CONSTANT_P (XEXP (x, 1)))
735 	return true;
736       return false;
737 
738     case PRE_MODIFY:
739       /* Similar to the above; allow positive offsets.  Further, since
740 	 auto-inc is only allowed in memories, the register must be a
741 	 pointer.  */
742       if (CONST_INT_P (XEXP (x, 1))
743 	  && INTVAL (XEXP (x, 1)) > 0)
744 	return true;
745       return nonzero_address_p (XEXP (x, 0));
746 
747     case PRE_INC:
748       /* Similarly.  Further, the offset is always positive.  */
749       return true;
750 
751     case PRE_DEC:
752     case POST_DEC:
753     case POST_INC:
754     case POST_MODIFY:
755       return nonzero_address_p (XEXP (x, 0));
756 
757     case LO_SUM:
758       return nonzero_address_p (XEXP (x, 1));
759 
760     default:
761       break;
762     }
763 
764   /* If it isn't one of the case above, might be zero.  */
765   return false;
766 }
767 
768 /* Return 1 if X refers to a memory location whose address
769    cannot be compared reliably with constant addresses,
770    or if X refers to a BLKmode memory object.
771    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
772    zero, we are slightly more conservative.  */
773 
774 bool
775 rtx_addr_varies_p (const_rtx x, bool for_alias)
776 {
777   enum rtx_code code;
778   int i;
779   const char *fmt;
780 
781   if (x == 0)
782     return 0;
783 
784   code = GET_CODE (x);
785   if (code == MEM)
786     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
787 
788   fmt = GET_RTX_FORMAT (code);
789   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
790     if (fmt[i] == 'e')
791       {
792 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
793 	  return 1;
794       }
795     else if (fmt[i] == 'E')
796       {
797 	int j;
798 	for (j = 0; j < XVECLEN (x, i); j++)
799 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
800 	    return 1;
801       }
802   return 0;
803 }
804 
805 /* Return the CALL in X if there is one.  */
806 
807 rtx
808 get_call_rtx_from (rtx x)
809 {
810   if (INSN_P (x))
811     x = PATTERN (x);
812   if (GET_CODE (x) == PARALLEL)
813     x = XVECEXP (x, 0, 0);
814   if (GET_CODE (x) == SET)
815     x = SET_SRC (x);
816   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
817     return x;
818   return NULL_RTX;
819 }
820 
821 /* Return the value of the integer term in X, if one is apparent;
822    otherwise return 0.
823    Only obvious integer terms are detected.
824    This is used in cse.c with the `related_value' field.  */
825 
826 HOST_WIDE_INT
827 get_integer_term (const_rtx x)
828 {
829   if (GET_CODE (x) == CONST)
830     x = XEXP (x, 0);
831 
832   if (GET_CODE (x) == MINUS
833       && CONST_INT_P (XEXP (x, 1)))
834     return - INTVAL (XEXP (x, 1));
835   if (GET_CODE (x) == PLUS
836       && CONST_INT_P (XEXP (x, 1)))
837     return INTVAL (XEXP (x, 1));
838   return 0;
839 }
840 
841 /* If X is a constant, return the value sans apparent integer term;
842    otherwise return 0.
843    Only obvious integer terms are detected.  */
844 
845 rtx
846 get_related_value (const_rtx x)
847 {
848   if (GET_CODE (x) != CONST)
849     return 0;
850   x = XEXP (x, 0);
851   if (GET_CODE (x) == PLUS
852       && CONST_INT_P (XEXP (x, 1)))
853     return XEXP (x, 0);
854   else if (GET_CODE (x) == MINUS
855 	   && CONST_INT_P (XEXP (x, 1)))
856     return XEXP (x, 0);
857   return 0;
858 }
859 
860 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
861    to somewhere in the same object or object_block as SYMBOL.  */
862 
863 bool
864 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
865 {
866   tree decl;
867 
868   if (GET_CODE (symbol) != SYMBOL_REF)
869     return false;
870 
871   if (offset == 0)
872     return true;
873 
874   if (offset > 0)
875     {
876       if (CONSTANT_POOL_ADDRESS_P (symbol)
877 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
878 	return true;
879 
880       decl = SYMBOL_REF_DECL (symbol);
881       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
882 	return true;
883     }
884 
885   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
886       && SYMBOL_REF_BLOCK (symbol)
887       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
888       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
889 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
890     return true;
891 
892   return false;
893 }
894 
895 /* Split X into a base and a constant offset, storing them in *BASE_OUT
896    and *OFFSET_OUT respectively.  */
897 
898 void
899 split_const (rtx x, rtx *base_out, rtx *offset_out)
900 {
901   if (GET_CODE (x) == CONST)
902     {
903       x = XEXP (x, 0);
904       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
905 	{
906 	  *base_out = XEXP (x, 0);
907 	  *offset_out = XEXP (x, 1);
908 	  return;
909 	}
910     }
911   *base_out = x;
912   *offset_out = const0_rtx;
913 }
914 
915 /* Return the number of places FIND appears within X.  If COUNT_DEST is
916    zero, we do not count occurrences inside the destination of a SET.  */
917 
918 int
919 count_occurrences (const_rtx x, const_rtx find, int count_dest)
920 {
921   int i, j;
922   enum rtx_code code;
923   const char *format_ptr;
924   int count;
925 
926   if (x == find)
927     return 1;
928 
929   code = GET_CODE (x);
930 
931   switch (code)
932     {
933     case REG:
934     CASE_CONST_ANY:
935     case SYMBOL_REF:
936     case CODE_LABEL:
937     case PC:
938     case CC0:
939       return 0;
940 
941     case EXPR_LIST:
942       count = count_occurrences (XEXP (x, 0), find, count_dest);
943       if (XEXP (x, 1))
944 	count += count_occurrences (XEXP (x, 1), find, count_dest);
945       return count;
946 
947     case MEM:
948       if (MEM_P (find) && rtx_equal_p (x, find))
949 	return 1;
950       break;
951 
952     case SET:
953       if (SET_DEST (x) == find && ! count_dest)
954 	return count_occurrences (SET_SRC (x), find, count_dest);
955       break;
956 
957     default:
958       break;
959     }
960 
961   format_ptr = GET_RTX_FORMAT (code);
962   count = 0;
963 
964   for (i = 0; i < GET_RTX_LENGTH (code); i++)
965     {
966       switch (*format_ptr++)
967 	{
968 	case 'e':
969 	  count += count_occurrences (XEXP (x, i), find, count_dest);
970 	  break;
971 
972 	case 'E':
973 	  for (j = 0; j < XVECLEN (x, i); j++)
974 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
975 	  break;
976 	}
977     }
978   return count;
979 }
980 
981 
982 /* Return TRUE if OP is a register or subreg of a register that
983    holds an unsigned quantity.  Otherwise, return FALSE.  */
984 
985 bool
986 unsigned_reg_p (rtx op)
987 {
988   if (REG_P (op)
989       && REG_EXPR (op)
990       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
991     return true;
992 
993   if (GET_CODE (op) == SUBREG
994       && SUBREG_PROMOTED_SIGN (op))
995     return true;
996 
997   return false;
998 }
999 
1000 
1001 /* Nonzero if register REG appears somewhere within IN.
1002    Also works if REG is not a register; in this case it checks
1003    for a subexpression of IN that is Lisp "equal" to REG.  */
1004 
1005 int
1006 reg_mentioned_p (const_rtx reg, const_rtx in)
1007 {
1008   const char *fmt;
1009   int i;
1010   enum rtx_code code;
1011 
1012   if (in == 0)
1013     return 0;
1014 
1015   if (reg == in)
1016     return 1;
1017 
1018   if (GET_CODE (in) == LABEL_REF)
1019     return reg == label_ref_label (in);
1020 
1021   code = GET_CODE (in);
1022 
1023   switch (code)
1024     {
1025       /* Compare registers by number.  */
1026     case REG:
1027       return REG_P (reg) && REGNO (in) == REGNO (reg);
1028 
1029       /* These codes have no constituent expressions
1030 	 and are unique.  */
1031     case SCRATCH:
1032     case CC0:
1033     case PC:
1034       return 0;
1035 
1036     CASE_CONST_ANY:
1037       /* These are kept unique for a given value.  */
1038       return 0;
1039 
1040     default:
1041       break;
1042     }
1043 
1044   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1045     return 1;
1046 
1047   fmt = GET_RTX_FORMAT (code);
1048 
1049   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1050     {
1051       if (fmt[i] == 'E')
1052 	{
1053 	  int j;
1054 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1055 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1056 	      return 1;
1057 	}
1058       else if (fmt[i] == 'e'
1059 	       && reg_mentioned_p (reg, XEXP (in, i)))
1060 	return 1;
1061     }
1062   return 0;
1063 }
1064 
1065 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1066    no CODE_LABEL insn.  */
1067 
1068 int
1069 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1070 {
1071   rtx_insn *p;
1072   if (beg == end)
1073     return 0;
1074   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1075     if (LABEL_P (p))
1076       return 0;
1077   return 1;
1078 }
1079 
1080 /* Nonzero if register REG is used in an insn between
1081    FROM_INSN and TO_INSN (exclusive of those two).  */
1082 
1083 int
1084 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1085 		    const rtx_insn *to_insn)
1086 {
1087   rtx_insn *insn;
1088 
1089   if (from_insn == to_insn)
1090     return 0;
1091 
1092   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1093     if (NONDEBUG_INSN_P (insn)
1094 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1095 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1096       return 1;
1097   return 0;
1098 }
1099 
1100 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1101    is entirely replaced by a new value and the only use is as a SET_DEST,
1102    we do not consider it a reference.  */
1103 
1104 int
1105 reg_referenced_p (const_rtx x, const_rtx body)
1106 {
1107   int i;
1108 
1109   switch (GET_CODE (body))
1110     {
1111     case SET:
1112       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1113 	return 1;
1114 
1115       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1116 	 of a REG that occupies all of the REG, the insn references X if
1117 	 it is mentioned in the destination.  */
1118       if (GET_CODE (SET_DEST (body)) != CC0
1119 	  && GET_CODE (SET_DEST (body)) != PC
1120 	  && !REG_P (SET_DEST (body))
1121 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1122 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1123 		&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1124 		      + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1125 		    == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1126 			 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1127 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1128 	return 1;
1129       return 0;
1130 
1131     case ASM_OPERANDS:
1132       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1133 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1134 	  return 1;
1135       return 0;
1136 
1137     case CALL:
1138     case USE:
1139     case IF_THEN_ELSE:
1140       return reg_overlap_mentioned_p (x, body);
1141 
1142     case TRAP_IF:
1143       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1144 
1145     case PREFETCH:
1146       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1147 
1148     case UNSPEC:
1149     case UNSPEC_VOLATILE:
1150       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1151 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1152 	  return 1;
1153       return 0;
1154 
1155     case PARALLEL:
1156       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1157 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1158 	  return 1;
1159       return 0;
1160 
1161     case CLOBBER:
1162       if (MEM_P (XEXP (body, 0)))
1163 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1164 	  return 1;
1165       return 0;
1166 
1167     case COND_EXEC:
1168       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1169 	return 1;
1170       return reg_referenced_p (x, COND_EXEC_CODE (body));
1171 
1172     default:
1173       return 0;
1174     }
1175 }
1176 
1177 /* Nonzero if register REG is set or clobbered in an insn between
1178    FROM_INSN and TO_INSN (exclusive of those two).  */
1179 
1180 int
1181 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1182 		   const rtx_insn *to_insn)
1183 {
1184   const rtx_insn *insn;
1185 
1186   if (from_insn == to_insn)
1187     return 0;
1188 
1189   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1190     if (INSN_P (insn) && reg_set_p (reg, insn))
1191       return 1;
1192   return 0;
1193 }
1194 
1195 /* Return true if REG is set or clobbered inside INSN.  */
1196 
1197 int
1198 reg_set_p (const_rtx reg, const_rtx insn)
1199 {
1200   /* After delay slot handling, call and branch insns might be in a
1201      sequence.  Check all the elements there.  */
1202   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1203     {
1204       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1205 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1206 	  return true;
1207 
1208       return false;
1209     }
1210 
1211   /* We can be passed an insn or part of one.  If we are passed an insn,
1212      check if a side-effect of the insn clobbers REG.  */
1213   if (INSN_P (insn)
1214       && (FIND_REG_INC_NOTE (insn, reg)
1215 	  || (CALL_P (insn)
1216 	      && ((REG_P (reg)
1217 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1218 		   && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1219 					       GET_MODE (reg), REGNO (reg)))
1220 		  || MEM_P (reg)
1221 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1222     return true;
1223 
1224   return set_of (reg, insn) != NULL_RTX;
1225 }
1226 
1227 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1228    only if none of them are modified between START and END.  Return 1 if
1229    X contains a MEM; this routine does use memory aliasing.  */
1230 
1231 int
1232 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1233 {
1234   const enum rtx_code code = GET_CODE (x);
1235   const char *fmt;
1236   int i, j;
1237   rtx_insn *insn;
1238 
1239   if (start == end)
1240     return 0;
1241 
1242   switch (code)
1243     {
1244     CASE_CONST_ANY:
1245     case CONST:
1246     case SYMBOL_REF:
1247     case LABEL_REF:
1248       return 0;
1249 
1250     case PC:
1251     case CC0:
1252       return 1;
1253 
1254     case MEM:
1255       if (modified_between_p (XEXP (x, 0), start, end))
1256 	return 1;
1257       if (MEM_READONLY_P (x))
1258 	return 0;
1259       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1260 	if (memory_modified_in_insn_p (x, insn))
1261 	  return 1;
1262       return 0;
1263 
1264     case REG:
1265       return reg_set_between_p (x, start, end);
1266 
1267     default:
1268       break;
1269     }
1270 
1271   fmt = GET_RTX_FORMAT (code);
1272   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1273     {
1274       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1275 	return 1;
1276 
1277       else if (fmt[i] == 'E')
1278 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1279 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1280 	    return 1;
1281     }
1282 
1283   return 0;
1284 }
1285 
1286 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1287    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1288    does use memory aliasing.  */
1289 
1290 int
1291 modified_in_p (const_rtx x, const_rtx insn)
1292 {
1293   const enum rtx_code code = GET_CODE (x);
1294   const char *fmt;
1295   int i, j;
1296 
1297   switch (code)
1298     {
1299     CASE_CONST_ANY:
1300     case CONST:
1301     case SYMBOL_REF:
1302     case LABEL_REF:
1303       return 0;
1304 
1305     case PC:
1306     case CC0:
1307       return 1;
1308 
1309     case MEM:
1310       if (modified_in_p (XEXP (x, 0), insn))
1311 	return 1;
1312       if (MEM_READONLY_P (x))
1313 	return 0;
1314       if (memory_modified_in_insn_p (x, insn))
1315 	return 1;
1316       return 0;
1317 
1318     case REG:
1319       return reg_set_p (x, insn);
1320 
1321     default:
1322       break;
1323     }
1324 
1325   fmt = GET_RTX_FORMAT (code);
1326   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1327     {
1328       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1329 	return 1;
1330 
1331       else if (fmt[i] == 'E')
1332 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1333 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1334 	    return 1;
1335     }
1336 
1337   return 0;
1338 }
1339 
1340 /* Helper function for set_of.  */
1341 struct set_of_data
1342   {
1343     const_rtx found;
1344     const_rtx pat;
1345   };
1346 
1347 static void
1348 set_of_1 (rtx x, const_rtx pat, void *data1)
1349 {
1350   struct set_of_data *const data = (struct set_of_data *) (data1);
1351   if (rtx_equal_p (x, data->pat)
1352       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1353     data->found = pat;
1354 }
1355 
1356 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1357    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1358 const_rtx
1359 set_of (const_rtx pat, const_rtx insn)
1360 {
1361   struct set_of_data data;
1362   data.found = NULL_RTX;
1363   data.pat = pat;
1364   note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1365   return data.found;
1366 }
1367 
1368 /* Add all hard register in X to *PSET.  */
1369 void
1370 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1371 {
1372   subrtx_iterator::array_type array;
1373   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1374     {
1375       const_rtx x = *iter;
1376       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1377 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1378     }
1379 }
1380 
1381 /* This function, called through note_stores, collects sets and
1382    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1383    by DATA.  */
1384 void
1385 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1386 {
1387   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1388   if (REG_P (x) && HARD_REGISTER_P (x))
1389     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1390 }
1391 
1392 /* Examine INSN, and compute the set of hard registers written by it.
1393    Store it in *PSET.  Should only be called after reload.  */
1394 void
1395 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1396 {
1397   rtx link;
1398 
1399   CLEAR_HARD_REG_SET (*pset);
1400   note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1401   if (CALL_P (insn))
1402     {
1403       if (implicit)
1404 	IOR_HARD_REG_SET (*pset, call_used_reg_set);
1405 
1406       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1407 	record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1408     }
1409   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1410     if (REG_NOTE_KIND (link) == REG_INC)
1411       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1412 }
1413 
1414 /* Like record_hard_reg_sets, but called through note_uses.  */
1415 void
1416 record_hard_reg_uses (rtx *px, void *data)
1417 {
1418   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1419 }
1420 
1421 /* Given an INSN, return a SET expression if this insn has only a single SET.
1422    It may also have CLOBBERs, USEs, or SET whose output
1423    will not be used, which we ignore.  */
1424 
1425 rtx
1426 single_set_2 (const rtx_insn *insn, const_rtx pat)
1427 {
1428   rtx set = NULL;
1429   int set_verified = 1;
1430   int i;
1431 
1432   if (GET_CODE (pat) == PARALLEL)
1433     {
1434       for (i = 0; i < XVECLEN (pat, 0); i++)
1435 	{
1436 	  rtx sub = XVECEXP (pat, 0, i);
1437 	  switch (GET_CODE (sub))
1438 	    {
1439 	    case USE:
1440 	    case CLOBBER:
1441 	      break;
1442 
1443 	    case SET:
1444 	      /* We can consider insns having multiple sets, where all
1445 		 but one are dead as single set insns.  In common case
1446 		 only single set is present in the pattern so we want
1447 		 to avoid checking for REG_UNUSED notes unless necessary.
1448 
1449 		 When we reach set first time, we just expect this is
1450 		 the single set we are looking for and only when more
1451 		 sets are found in the insn, we check them.  */
1452 	      if (!set_verified)
1453 		{
1454 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1455 		      && !side_effects_p (set))
1456 		    set = NULL;
1457 		  else
1458 		    set_verified = 1;
1459 		}
1460 	      if (!set)
1461 		set = sub, set_verified = 0;
1462 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1463 		       || side_effects_p (sub))
1464 		return NULL_RTX;
1465 	      break;
1466 
1467 	    default:
1468 	      return NULL_RTX;
1469 	    }
1470 	}
1471     }
1472   return set;
1473 }
1474 
1475 /* Given an INSN, return nonzero if it has more than one SET, else return
1476    zero.  */
1477 
1478 int
1479 multiple_sets (const_rtx insn)
1480 {
1481   int found;
1482   int i;
1483 
1484   /* INSN must be an insn.  */
1485   if (! INSN_P (insn))
1486     return 0;
1487 
1488   /* Only a PARALLEL can have multiple SETs.  */
1489   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1490     {
1491       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1492 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1493 	  {
1494 	    /* If we have already found a SET, then return now.  */
1495 	    if (found)
1496 	      return 1;
1497 	    else
1498 	      found = 1;
1499 	  }
1500     }
1501 
1502   /* Either zero or one SET.  */
1503   return 0;
1504 }
1505 
1506 /* Return nonzero if the destination of SET equals the source
1507    and there are no side effects.  */
1508 
1509 int
1510 set_noop_p (const_rtx set)
1511 {
1512   rtx src = SET_SRC (set);
1513   rtx dst = SET_DEST (set);
1514 
1515   if (dst == pc_rtx && src == pc_rtx)
1516     return 1;
1517 
1518   if (MEM_P (dst) && MEM_P (src))
1519     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1520 
1521   if (GET_CODE (dst) == ZERO_EXTRACT)
1522     return rtx_equal_p (XEXP (dst, 0), src)
1523 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1524 	   && !side_effects_p (src);
1525 
1526   if (GET_CODE (dst) == STRICT_LOW_PART)
1527     dst = XEXP (dst, 0);
1528 
1529   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1530     {
1531       if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1532 	return 0;
1533       src = SUBREG_REG (src);
1534       dst = SUBREG_REG (dst);
1535     }
1536 
1537   /* It is a NOOP if destination overlaps with selected src vector
1538      elements.  */
1539   if (GET_CODE (src) == VEC_SELECT
1540       && REG_P (XEXP (src, 0)) && REG_P (dst)
1541       && HARD_REGISTER_P (XEXP (src, 0))
1542       && HARD_REGISTER_P (dst))
1543     {
1544       int i;
1545       rtx par = XEXP (src, 1);
1546       rtx src0 = XEXP (src, 0);
1547       int c0 = INTVAL (XVECEXP (par, 0, 0));
1548       HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1549 
1550       for (i = 1; i < XVECLEN (par, 0); i++)
1551 	if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1552 	  return 0;
1553       return
1554 	simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1555 			       offset, GET_MODE (dst)) == (int) REGNO (dst);
1556     }
1557 
1558   return (REG_P (src) && REG_P (dst)
1559 	  && REGNO (src) == REGNO (dst));
1560 }
1561 
1562 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1563    value to itself.  */
1564 
1565 int
1566 noop_move_p (const rtx_insn *insn)
1567 {
1568   rtx pat = PATTERN (insn);
1569 
1570   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1571     return 1;
1572 
1573   /* Insns carrying these notes are useful later on.  */
1574   if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1575     return 0;
1576 
1577   /* Check the code to be executed for COND_EXEC.  */
1578   if (GET_CODE (pat) == COND_EXEC)
1579     pat = COND_EXEC_CODE (pat);
1580 
1581   if (GET_CODE (pat) == SET && set_noop_p (pat))
1582     return 1;
1583 
1584   if (GET_CODE (pat) == PARALLEL)
1585     {
1586       int i;
1587       /* If nothing but SETs of registers to themselves,
1588 	 this insn can also be deleted.  */
1589       for (i = 0; i < XVECLEN (pat, 0); i++)
1590 	{
1591 	  rtx tem = XVECEXP (pat, 0, i);
1592 
1593 	  if (GET_CODE (tem) == USE
1594 	      || GET_CODE (tem) == CLOBBER)
1595 	    continue;
1596 
1597 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1598 	    return 0;
1599 	}
1600 
1601       return 1;
1602     }
1603   return 0;
1604 }
1605 
1606 
1607 /* Return nonzero if register in range [REGNO, ENDREGNO)
1608    appears either explicitly or implicitly in X
1609    other than being stored into.
1610 
1611    References contained within the substructure at LOC do not count.
1612    LOC may be zero, meaning don't ignore anything.  */
1613 
1614 bool
1615 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1616 		   rtx *loc)
1617 {
1618   int i;
1619   unsigned int x_regno;
1620   RTX_CODE code;
1621   const char *fmt;
1622 
1623  repeat:
1624   /* The contents of a REG_NONNEG note is always zero, so we must come here
1625      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1626   if (x == 0)
1627     return false;
1628 
1629   code = GET_CODE (x);
1630 
1631   switch (code)
1632     {
1633     case REG:
1634       x_regno = REGNO (x);
1635 
1636       /* If we modifying the stack, frame, or argument pointer, it will
1637 	 clobber a virtual register.  In fact, we could be more precise,
1638 	 but it isn't worth it.  */
1639       if ((x_regno == STACK_POINTER_REGNUM
1640 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1641 	       && x_regno == ARG_POINTER_REGNUM)
1642 	   || x_regno == FRAME_POINTER_REGNUM)
1643 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1644 	return true;
1645 
1646       return endregno > x_regno && regno < END_REGNO (x);
1647 
1648     case SUBREG:
1649       /* If this is a SUBREG of a hard reg, we can see exactly which
1650 	 registers are being modified.  Otherwise, handle normally.  */
1651       if (REG_P (SUBREG_REG (x))
1652 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1653 	{
1654 	  unsigned int inner_regno = subreg_regno (x);
1655 	  unsigned int inner_endregno
1656 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1657 			     ? subreg_nregs (x) : 1);
1658 
1659 	  return endregno > inner_regno && regno < inner_endregno;
1660 	}
1661       break;
1662 
1663     case CLOBBER:
1664     case SET:
1665       if (&SET_DEST (x) != loc
1666 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1667 	     a pseudo but not for hard registers since we can
1668 	     treat each word individually.  */
1669 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1670 	       && loc != &SUBREG_REG (SET_DEST (x))
1671 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1672 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1673 	       && refers_to_regno_p (regno, endregno,
1674 				     SUBREG_REG (SET_DEST (x)), loc))
1675 	      || (!REG_P (SET_DEST (x))
1676 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1677 	return true;
1678 
1679       if (code == CLOBBER || loc == &SET_SRC (x))
1680 	return false;
1681       x = SET_SRC (x);
1682       goto repeat;
1683 
1684     default:
1685       break;
1686     }
1687 
1688   /* X does not match, so try its subexpressions.  */
1689 
1690   fmt = GET_RTX_FORMAT (code);
1691   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1692     {
1693       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1694 	{
1695 	  if (i == 0)
1696 	    {
1697 	      x = XEXP (x, 0);
1698 	      goto repeat;
1699 	    }
1700 	  else
1701 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1702 	      return true;
1703 	}
1704       else if (fmt[i] == 'E')
1705 	{
1706 	  int j;
1707 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1708 	    if (loc != &XVECEXP (x, i, j)
1709 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1710 	      return true;
1711 	}
1712     }
1713   return false;
1714 }
1715 
1716 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1717    we check if any register number in X conflicts with the relevant register
1718    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1719    contains a MEM (we don't bother checking for memory addresses that can't
1720    conflict because we expect this to be a rare case.  */
1721 
1722 int
1723 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1724 {
1725   unsigned int regno, endregno;
1726 
1727   /* If either argument is a constant, then modifying X can not
1728      affect IN.  Here we look at IN, we can profitably combine
1729      CONSTANT_P (x) with the switch statement below.  */
1730   if (CONSTANT_P (in))
1731     return 0;
1732 
1733  recurse:
1734   switch (GET_CODE (x))
1735     {
1736     case STRICT_LOW_PART:
1737     case ZERO_EXTRACT:
1738     case SIGN_EXTRACT:
1739       /* Overly conservative.  */
1740       x = XEXP (x, 0);
1741       goto recurse;
1742 
1743     case SUBREG:
1744       regno = REGNO (SUBREG_REG (x));
1745       if (regno < FIRST_PSEUDO_REGISTER)
1746 	regno = subreg_regno (x);
1747       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1748 			  ? subreg_nregs (x) : 1);
1749       goto do_reg;
1750 
1751     case REG:
1752       regno = REGNO (x);
1753       endregno = END_REGNO (x);
1754     do_reg:
1755       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1756 
1757     case MEM:
1758       {
1759 	const char *fmt;
1760 	int i;
1761 
1762 	if (MEM_P (in))
1763 	  return 1;
1764 
1765 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1766 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1767 	  if (fmt[i] == 'e')
1768 	    {
1769 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1770 		return 1;
1771 	    }
1772 	  else if (fmt[i] == 'E')
1773 	    {
1774 	      int j;
1775 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1776 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1777 		  return 1;
1778 	    }
1779 
1780 	return 0;
1781       }
1782 
1783     case SCRATCH:
1784     case PC:
1785     case CC0:
1786       return reg_mentioned_p (x, in);
1787 
1788     case PARALLEL:
1789       {
1790 	int i;
1791 
1792 	/* If any register in here refers to it we return true.  */
1793 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1794 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1795 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1796 	    return 1;
1797 	return 0;
1798       }
1799 
1800     default:
1801       gcc_assert (CONSTANT_P (x));
1802       return 0;
1803     }
1804 }
1805 
1806 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1807    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1808    ignored by note_stores, but passed to FUN.
1809 
1810    FUN receives three arguments:
1811    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1812    2. the SET or CLOBBER rtx that does the store,
1813    3. the pointer DATA provided to note_stores.
1814 
1815   If the item being stored in or clobbered is a SUBREG of a hard register,
1816   the SUBREG will be passed.  */
1817 
1818 void
1819 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1820 {
1821   int i;
1822 
1823   if (GET_CODE (x) == COND_EXEC)
1824     x = COND_EXEC_CODE (x);
1825 
1826   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1827     {
1828       rtx dest = SET_DEST (x);
1829 
1830       while ((GET_CODE (dest) == SUBREG
1831 	      && (!REG_P (SUBREG_REG (dest))
1832 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1833 	     || GET_CODE (dest) == ZERO_EXTRACT
1834 	     || GET_CODE (dest) == STRICT_LOW_PART)
1835 	dest = XEXP (dest, 0);
1836 
1837       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1838 	 each of whose first operand is a register.  */
1839       if (GET_CODE (dest) == PARALLEL)
1840 	{
1841 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1842 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1843 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1844 	}
1845       else
1846 	(*fun) (dest, x, data);
1847     }
1848 
1849   else if (GET_CODE (x) == PARALLEL)
1850     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1851       note_stores (XVECEXP (x, 0, i), fun, data);
1852 }
1853 
1854 /* Like notes_stores, but call FUN for each expression that is being
1855    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
1856    FUN for each expression, not any interior subexpressions.  FUN receives a
1857    pointer to the expression and the DATA passed to this function.
1858 
1859    Note that this is not quite the same test as that done in reg_referenced_p
1860    since that considers something as being referenced if it is being
1861    partially set, while we do not.  */
1862 
1863 void
1864 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1865 {
1866   rtx body = *pbody;
1867   int i;
1868 
1869   switch (GET_CODE (body))
1870     {
1871     case COND_EXEC:
1872       (*fun) (&COND_EXEC_TEST (body), data);
1873       note_uses (&COND_EXEC_CODE (body), fun, data);
1874       return;
1875 
1876     case PARALLEL:
1877       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1878 	note_uses (&XVECEXP (body, 0, i), fun, data);
1879       return;
1880 
1881     case SEQUENCE:
1882       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1883 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1884       return;
1885 
1886     case USE:
1887       (*fun) (&XEXP (body, 0), data);
1888       return;
1889 
1890     case ASM_OPERANDS:
1891       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1892 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1893       return;
1894 
1895     case TRAP_IF:
1896       (*fun) (&TRAP_CONDITION (body), data);
1897       return;
1898 
1899     case PREFETCH:
1900       (*fun) (&XEXP (body, 0), data);
1901       return;
1902 
1903     case UNSPEC:
1904     case UNSPEC_VOLATILE:
1905       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1906 	(*fun) (&XVECEXP (body, 0, i), data);
1907       return;
1908 
1909     case CLOBBER:
1910       if (MEM_P (XEXP (body, 0)))
1911 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
1912       return;
1913 
1914     case SET:
1915       {
1916 	rtx dest = SET_DEST (body);
1917 
1918 	/* For sets we replace everything in source plus registers in memory
1919 	   expression in store and operands of a ZERO_EXTRACT.  */
1920 	(*fun) (&SET_SRC (body), data);
1921 
1922 	if (GET_CODE (dest) == ZERO_EXTRACT)
1923 	  {
1924 	    (*fun) (&XEXP (dest, 1), data);
1925 	    (*fun) (&XEXP (dest, 2), data);
1926 	  }
1927 
1928 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1929 	  dest = XEXP (dest, 0);
1930 
1931 	if (MEM_P (dest))
1932 	  (*fun) (&XEXP (dest, 0), data);
1933       }
1934       return;
1935 
1936     default:
1937       /* All the other possibilities never store.  */
1938       (*fun) (pbody, data);
1939       return;
1940     }
1941 }
1942 
1943 /* Return nonzero if X's old contents don't survive after INSN.
1944    This will be true if X is (cc0) or if X is a register and
1945    X dies in INSN or because INSN entirely sets X.
1946 
1947    "Entirely set" means set directly and not through a SUBREG, or
1948    ZERO_EXTRACT, so no trace of the old contents remains.
1949    Likewise, REG_INC does not count.
1950 
1951    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
1952    but for this use that makes no difference, since regs don't overlap
1953    during their lifetimes.  Therefore, this function may be used
1954    at any time after deaths have been computed.
1955 
1956    If REG is a hard reg that occupies multiple machine registers, this
1957    function will only return 1 if each of those registers will be replaced
1958    by INSN.  */
1959 
1960 int
1961 dead_or_set_p (const rtx_insn *insn, const_rtx x)
1962 {
1963   unsigned int regno, end_regno;
1964   unsigned int i;
1965 
1966   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
1967   if (GET_CODE (x) == CC0)
1968     return 1;
1969 
1970   gcc_assert (REG_P (x));
1971 
1972   regno = REGNO (x);
1973   end_regno = END_REGNO (x);
1974   for (i = regno; i < end_regno; i++)
1975     if (! dead_or_set_regno_p (insn, i))
1976       return 0;
1977 
1978   return 1;
1979 }
1980 
1981 /* Return TRUE iff DEST is a register or subreg of a register and
1982    doesn't change the number of words of the inner register, and any
1983    part of the register is TEST_REGNO.  */
1984 
1985 static bool
1986 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1987 {
1988   unsigned int regno, endregno;
1989 
1990   if (GET_CODE (dest) == SUBREG
1991       && (((GET_MODE_SIZE (GET_MODE (dest))
1992 	    + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1993 	  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1994 	       + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1995     dest = SUBREG_REG (dest);
1996 
1997   if (!REG_P (dest))
1998     return false;
1999 
2000   regno = REGNO (dest);
2001   endregno = END_REGNO (dest);
2002   return (test_regno >= regno && test_regno < endregno);
2003 }
2004 
2005 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2006    any member matches the covers_regno_no_parallel_p criteria.  */
2007 
2008 static bool
2009 covers_regno_p (const_rtx dest, unsigned int test_regno)
2010 {
2011   if (GET_CODE (dest) == PARALLEL)
2012     {
2013       /* Some targets place small structures in registers for return
2014 	 values of functions, and those registers are wrapped in
2015 	 PARALLELs that we may see as the destination of a SET.  */
2016       int i;
2017 
2018       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2019 	{
2020 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2021 	  if (inner != NULL_RTX
2022 	      && covers_regno_no_parallel_p (inner, test_regno))
2023 	    return true;
2024 	}
2025 
2026       return false;
2027     }
2028   else
2029     return covers_regno_no_parallel_p (dest, test_regno);
2030 }
2031 
2032 /* Utility function for dead_or_set_p to check an individual register. */
2033 
2034 int
2035 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2036 {
2037   const_rtx pattern;
2038 
2039   /* See if there is a death note for something that includes TEST_REGNO.  */
2040   if (find_regno_note (insn, REG_DEAD, test_regno))
2041     return 1;
2042 
2043   if (CALL_P (insn)
2044       && find_regno_fusage (insn, CLOBBER, test_regno))
2045     return 1;
2046 
2047   pattern = PATTERN (insn);
2048 
2049   /* If a COND_EXEC is not executed, the value survives.  */
2050   if (GET_CODE (pattern) == COND_EXEC)
2051     return 0;
2052 
2053   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2054     return covers_regno_p (SET_DEST (pattern), test_regno);
2055   else if (GET_CODE (pattern) == PARALLEL)
2056     {
2057       int i;
2058 
2059       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2060 	{
2061 	  rtx body = XVECEXP (pattern, 0, i);
2062 
2063 	  if (GET_CODE (body) == COND_EXEC)
2064 	    body = COND_EXEC_CODE (body);
2065 
2066 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2067 	      && covers_regno_p (SET_DEST (body), test_regno))
2068 	    return 1;
2069 	}
2070     }
2071 
2072   return 0;
2073 }
2074 
2075 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2076    If DATUM is nonzero, look for one whose datum is DATUM.  */
2077 
2078 rtx
2079 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2080 {
2081   rtx link;
2082 
2083   gcc_checking_assert (insn);
2084 
2085   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2086   if (! INSN_P (insn))
2087     return 0;
2088   if (datum == 0)
2089     {
2090       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2091 	if (REG_NOTE_KIND (link) == kind)
2092 	  return link;
2093       return 0;
2094     }
2095 
2096   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2097     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2098       return link;
2099   return 0;
2100 }
2101 
2102 /* Return the reg-note of kind KIND in insn INSN which applies to register
2103    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2104    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2105    it might be the case that the note overlaps REGNO.  */
2106 
2107 rtx
2108 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2109 {
2110   rtx link;
2111 
2112   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2113   if (! INSN_P (insn))
2114     return 0;
2115 
2116   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2117     if (REG_NOTE_KIND (link) == kind
2118 	/* Verify that it is a register, so that scratch and MEM won't cause a
2119 	   problem here.  */
2120 	&& REG_P (XEXP (link, 0))
2121 	&& REGNO (XEXP (link, 0)) <= regno
2122 	&& END_REGNO (XEXP (link, 0)) > regno)
2123       return link;
2124   return 0;
2125 }
2126 
2127 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2128    has such a note.  */
2129 
2130 rtx
2131 find_reg_equal_equiv_note (const_rtx insn)
2132 {
2133   rtx link;
2134 
2135   if (!INSN_P (insn))
2136     return 0;
2137 
2138   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2139     if (REG_NOTE_KIND (link) == REG_EQUAL
2140 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2141       {
2142 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2143 	   insns that have multiple sets.  Checking single_set to
2144 	   make sure of this is not the proper check, as explained
2145 	   in the comment in set_unique_reg_note.
2146 
2147 	   This should be changed into an assert.  */
2148 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2149 	  return 0;
2150 	return link;
2151       }
2152   return NULL;
2153 }
2154 
2155 /* Check whether INSN is a single_set whose source is known to be
2156    equivalent to a constant.  Return that constant if so, otherwise
2157    return null.  */
2158 
2159 rtx
2160 find_constant_src (const rtx_insn *insn)
2161 {
2162   rtx note, set, x;
2163 
2164   set = single_set (insn);
2165   if (set)
2166     {
2167       x = avoid_constant_pool_reference (SET_SRC (set));
2168       if (CONSTANT_P (x))
2169 	return x;
2170     }
2171 
2172   note = find_reg_equal_equiv_note (insn);
2173   if (note && CONSTANT_P (XEXP (note, 0)))
2174     return XEXP (note, 0);
2175 
2176   return NULL_RTX;
2177 }
2178 
2179 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2180    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2181 
2182 int
2183 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2184 {
2185   /* If it's not a CALL_INSN, it can't possibly have a
2186      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2187   if (!CALL_P (insn))
2188     return 0;
2189 
2190   gcc_assert (datum);
2191 
2192   if (!REG_P (datum))
2193     {
2194       rtx link;
2195 
2196       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2197 	   link;
2198 	   link = XEXP (link, 1))
2199 	if (GET_CODE (XEXP (link, 0)) == code
2200 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2201 	  return 1;
2202     }
2203   else
2204     {
2205       unsigned int regno = REGNO (datum);
2206 
2207       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2208 	 to pseudo registers, so don't bother checking.  */
2209 
2210       if (regno < FIRST_PSEUDO_REGISTER)
2211 	{
2212 	  unsigned int end_regno = END_REGNO (datum);
2213 	  unsigned int i;
2214 
2215 	  for (i = regno; i < end_regno; i++)
2216 	    if (find_regno_fusage (insn, code, i))
2217 	      return 1;
2218 	}
2219     }
2220 
2221   return 0;
2222 }
2223 
2224 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2225    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2226 
2227 int
2228 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2229 {
2230   rtx link;
2231 
2232   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2233      to pseudo registers, so don't bother checking.  */
2234 
2235   if (regno >= FIRST_PSEUDO_REGISTER
2236       || !CALL_P (insn) )
2237     return 0;
2238 
2239   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2240     {
2241       rtx op, reg;
2242 
2243       if (GET_CODE (op = XEXP (link, 0)) == code
2244 	  && REG_P (reg = XEXP (op, 0))
2245 	  && REGNO (reg) <= regno
2246 	  && END_REGNO (reg) > regno)
2247 	return 1;
2248     }
2249 
2250   return 0;
2251 }
2252 
2253 
2254 /* Return true if KIND is an integer REG_NOTE.  */
2255 
2256 static bool
2257 int_reg_note_p (enum reg_note kind)
2258 {
2259   return kind == REG_BR_PROB;
2260 }
2261 
2262 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2263    stored as the pointer to the next register note.  */
2264 
2265 rtx
2266 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2267 {
2268   rtx note;
2269 
2270   gcc_checking_assert (!int_reg_note_p (kind));
2271   switch (kind)
2272     {
2273     case REG_CC_SETTER:
2274     case REG_CC_USER:
2275     case REG_LABEL_TARGET:
2276     case REG_LABEL_OPERAND:
2277     case REG_TM:
2278       /* These types of register notes use an INSN_LIST rather than an
2279 	 EXPR_LIST, so that copying is done right and dumps look
2280 	 better.  */
2281       note = alloc_INSN_LIST (datum, list);
2282       PUT_REG_NOTE_KIND (note, kind);
2283       break;
2284 
2285     default:
2286       note = alloc_EXPR_LIST (kind, datum, list);
2287       break;
2288     }
2289 
2290   return note;
2291 }
2292 
2293 /* Add register note with kind KIND and datum DATUM to INSN.  */
2294 
2295 void
2296 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2297 {
2298   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2299 }
2300 
2301 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2302 
2303 void
2304 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2305 {
2306   gcc_checking_assert (int_reg_note_p (kind));
2307   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2308 				       datum, REG_NOTES (insn));
2309 }
2310 
2311 /* Add a register note like NOTE to INSN.  */
2312 
2313 void
2314 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2315 {
2316   if (GET_CODE (note) == INT_LIST)
2317     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2318   else
2319     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2320 }
2321 
2322 /* Duplicate NOTE and return the copy.  */
2323 rtx
2324 duplicate_reg_note (rtx note)
2325 {
2326   reg_note kind = REG_NOTE_KIND (note);
2327 
2328   if (GET_CODE (note) == INT_LIST)
2329     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2330   else if (GET_CODE (note) == EXPR_LIST)
2331     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2332   else
2333     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2334 }
2335 
2336 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2337 
2338 void
2339 remove_note (rtx_insn *insn, const_rtx note)
2340 {
2341   rtx link;
2342 
2343   if (note == NULL_RTX)
2344     return;
2345 
2346   if (REG_NOTES (insn) == note)
2347     REG_NOTES (insn) = XEXP (note, 1);
2348   else
2349     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2350       if (XEXP (link, 1) == note)
2351 	{
2352 	  XEXP (link, 1) = XEXP (note, 1);
2353 	  break;
2354 	}
2355 
2356   switch (REG_NOTE_KIND (note))
2357     {
2358     case REG_EQUAL:
2359     case REG_EQUIV:
2360       df_notes_rescan (insn);
2361       break;
2362     default:
2363       break;
2364     }
2365 }
2366 
2367 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2368    Return true if any note has been removed.  */
2369 
2370 bool
2371 remove_reg_equal_equiv_notes (rtx_insn *insn)
2372 {
2373   rtx *loc;
2374   bool ret = false;
2375 
2376   loc = &REG_NOTES (insn);
2377   while (*loc)
2378     {
2379       enum reg_note kind = REG_NOTE_KIND (*loc);
2380       if (kind == REG_EQUAL || kind == REG_EQUIV)
2381 	{
2382 	  *loc = XEXP (*loc, 1);
2383 	  ret = true;
2384 	}
2385       else
2386 	loc = &XEXP (*loc, 1);
2387     }
2388   return ret;
2389 }
2390 
2391 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2392 
2393 void
2394 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2395 {
2396   df_ref eq_use;
2397 
2398   if (!df)
2399     return;
2400 
2401   /* This loop is a little tricky.  We cannot just go down the chain because
2402      it is being modified by some actions in the loop.  So we just iterate
2403      over the head.  We plan to drain the list anyway.  */
2404   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2405     {
2406       rtx_insn *insn = DF_REF_INSN (eq_use);
2407       rtx note = find_reg_equal_equiv_note (insn);
2408 
2409       /* This assert is generally triggered when someone deletes a REG_EQUAL
2410 	 or REG_EQUIV note by hacking the list manually rather than calling
2411 	 remove_note.  */
2412       gcc_assert (note);
2413 
2414       remove_note (insn, note);
2415     }
2416 }
2417 
2418 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2419    return 1 if it is found.  A simple equality test is used to determine if
2420    NODE matches.  */
2421 
2422 bool
2423 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2424 {
2425   const_rtx x;
2426 
2427   for (x = listp; x; x = XEXP (x, 1))
2428     if (node == XEXP (x, 0))
2429       return true;
2430 
2431   return false;
2432 }
2433 
2434 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2435    remove that entry from the list if it is found.
2436 
2437    A simple equality test is used to determine if NODE matches.  */
2438 
2439 void
2440 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2441 {
2442   rtx_expr_list *temp = *listp;
2443   rtx_expr_list *prev = NULL;
2444 
2445   while (temp)
2446     {
2447       if (node == temp->element ())
2448 	{
2449 	  /* Splice the node out of the list.  */
2450 	  if (prev)
2451 	    XEXP (prev, 1) = temp->next ();
2452 	  else
2453 	    *listp = temp->next ();
2454 
2455 	  return;
2456 	}
2457 
2458       prev = temp;
2459       temp = temp->next ();
2460     }
2461 }
2462 
2463 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2464    remove that entry from the list if it is found.
2465 
2466    A simple equality test is used to determine if NODE matches.  */
2467 
2468 void
2469 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2470 {
2471   rtx_insn_list *temp = *listp;
2472   rtx_insn_list *prev = NULL;
2473 
2474   while (temp)
2475     {
2476       if (node == temp->insn ())
2477 	{
2478 	  /* Splice the node out of the list.  */
2479 	  if (prev)
2480 	    XEXP (prev, 1) = temp->next ();
2481 	  else
2482 	    *listp = temp->next ();
2483 
2484 	  return;
2485 	}
2486 
2487       prev = temp;
2488       temp = temp->next ();
2489     }
2490 }
2491 
2492 /* Nonzero if X contains any volatile instructions.  These are instructions
2493    which may cause unpredictable machine state instructions, and thus no
2494    instructions or register uses should be moved or combined across them.
2495    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2496 
2497 int
2498 volatile_insn_p (const_rtx x)
2499 {
2500   const RTX_CODE code = GET_CODE (x);
2501   switch (code)
2502     {
2503     case LABEL_REF:
2504     case SYMBOL_REF:
2505     case CONST:
2506     CASE_CONST_ANY:
2507     case CC0:
2508     case PC:
2509     case REG:
2510     case SCRATCH:
2511     case CLOBBER:
2512     case ADDR_VEC:
2513     case ADDR_DIFF_VEC:
2514     case CALL:
2515     case MEM:
2516       return 0;
2517 
2518     case UNSPEC_VOLATILE:
2519       return 1;
2520 
2521     case ASM_INPUT:
2522     case ASM_OPERANDS:
2523       if (MEM_VOLATILE_P (x))
2524 	return 1;
2525 
2526     default:
2527       break;
2528     }
2529 
2530   /* Recursively scan the operands of this expression.  */
2531 
2532   {
2533     const char *const fmt = GET_RTX_FORMAT (code);
2534     int i;
2535 
2536     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2537       {
2538 	if (fmt[i] == 'e')
2539 	  {
2540 	    if (volatile_insn_p (XEXP (x, i)))
2541 	      return 1;
2542 	  }
2543 	else if (fmt[i] == 'E')
2544 	  {
2545 	    int j;
2546 	    for (j = 0; j < XVECLEN (x, i); j++)
2547 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2548 		return 1;
2549 	  }
2550       }
2551   }
2552   return 0;
2553 }
2554 
2555 /* Nonzero if X contains any volatile memory references
2556    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
2557 
2558 int
2559 volatile_refs_p (const_rtx x)
2560 {
2561   const RTX_CODE code = GET_CODE (x);
2562   switch (code)
2563     {
2564     case LABEL_REF:
2565     case SYMBOL_REF:
2566     case CONST:
2567     CASE_CONST_ANY:
2568     case CC0:
2569     case PC:
2570     case REG:
2571     case SCRATCH:
2572     case CLOBBER:
2573     case ADDR_VEC:
2574     case ADDR_DIFF_VEC:
2575       return 0;
2576 
2577     case UNSPEC_VOLATILE:
2578       return 1;
2579 
2580     case MEM:
2581     case ASM_INPUT:
2582     case ASM_OPERANDS:
2583       if (MEM_VOLATILE_P (x))
2584 	return 1;
2585 
2586     default:
2587       break;
2588     }
2589 
2590   /* Recursively scan the operands of this expression.  */
2591 
2592   {
2593     const char *const fmt = GET_RTX_FORMAT (code);
2594     int i;
2595 
2596     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2597       {
2598 	if (fmt[i] == 'e')
2599 	  {
2600 	    if (volatile_refs_p (XEXP (x, i)))
2601 	      return 1;
2602 	  }
2603 	else if (fmt[i] == 'E')
2604 	  {
2605 	    int j;
2606 	    for (j = 0; j < XVECLEN (x, i); j++)
2607 	      if (volatile_refs_p (XVECEXP (x, i, j)))
2608 		return 1;
2609 	  }
2610       }
2611   }
2612   return 0;
2613 }
2614 
2615 /* Similar to above, except that it also rejects register pre- and post-
2616    incrementing.  */
2617 
2618 int
2619 side_effects_p (const_rtx x)
2620 {
2621   const RTX_CODE code = GET_CODE (x);
2622   switch (code)
2623     {
2624     case LABEL_REF:
2625     case SYMBOL_REF:
2626     case CONST:
2627     CASE_CONST_ANY:
2628     case CC0:
2629     case PC:
2630     case REG:
2631     case SCRATCH:
2632     case ADDR_VEC:
2633     case ADDR_DIFF_VEC:
2634     case VAR_LOCATION:
2635       return 0;
2636 
2637     case CLOBBER:
2638       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
2639 	 when some combination can't be done.  If we see one, don't think
2640 	 that we can simplify the expression.  */
2641       return (GET_MODE (x) != VOIDmode);
2642 
2643     case PRE_INC:
2644     case PRE_DEC:
2645     case POST_INC:
2646     case POST_DEC:
2647     case PRE_MODIFY:
2648     case POST_MODIFY:
2649     case CALL:
2650     case UNSPEC_VOLATILE:
2651       return 1;
2652 
2653     case MEM:
2654     case ASM_INPUT:
2655     case ASM_OPERANDS:
2656       if (MEM_VOLATILE_P (x))
2657 	return 1;
2658 
2659     default:
2660       break;
2661     }
2662 
2663   /* Recursively scan the operands of this expression.  */
2664 
2665   {
2666     const char *fmt = GET_RTX_FORMAT (code);
2667     int i;
2668 
2669     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2670       {
2671 	if (fmt[i] == 'e')
2672 	  {
2673 	    if (side_effects_p (XEXP (x, i)))
2674 	      return 1;
2675 	  }
2676 	else if (fmt[i] == 'E')
2677 	  {
2678 	    int j;
2679 	    for (j = 0; j < XVECLEN (x, i); j++)
2680 	      if (side_effects_p (XVECEXP (x, i, j)))
2681 		return 1;
2682 	  }
2683       }
2684   }
2685   return 0;
2686 }
2687 
2688 /* Return nonzero if evaluating rtx X might cause a trap.
2689    FLAGS controls how to consider MEMs.  A nonzero means the context
2690    of the access may have changed from the original, such that the
2691    address may have become invalid.  */
2692 
2693 int
2694 may_trap_p_1 (const_rtx x, unsigned flags)
2695 {
2696   int i;
2697   enum rtx_code code;
2698   const char *fmt;
2699 
2700   /* We make no distinction currently, but this function is part of
2701      the internal target-hooks ABI so we keep the parameter as
2702      "unsigned flags".  */
2703   bool code_changed = flags != 0;
2704 
2705   if (x == 0)
2706     return 0;
2707   code = GET_CODE (x);
2708   switch (code)
2709     {
2710       /* Handle these cases quickly.  */
2711     CASE_CONST_ANY:
2712     case SYMBOL_REF:
2713     case LABEL_REF:
2714     case CONST:
2715     case PC:
2716     case CC0:
2717     case REG:
2718     case SCRATCH:
2719       return 0;
2720 
2721     case UNSPEC:
2722       return targetm.unspec_may_trap_p (x, flags);
2723 
2724     case UNSPEC_VOLATILE:
2725     case ASM_INPUT:
2726     case TRAP_IF:
2727       return 1;
2728 
2729     case ASM_OPERANDS:
2730       return MEM_VOLATILE_P (x);
2731 
2732       /* Memory ref can trap unless it's a static var or a stack slot.  */
2733     case MEM:
2734       /* Recognize specific pattern of stack checking probes.  */
2735       if (flag_stack_check
2736 	  && MEM_VOLATILE_P (x)
2737 	  && XEXP (x, 0) == stack_pointer_rtx)
2738 	return 1;
2739       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2740 	     reference; moving it out of context such as when moving code
2741 	     when optimizing, might cause its address to become invalid.  */
2742 	  code_changed
2743 	  || !MEM_NOTRAP_P (x))
2744 	{
2745 	  HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2746 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2747 					GET_MODE (x), code_changed);
2748 	}
2749 
2750       return 0;
2751 
2752       /* Division by a non-constant might trap.  */
2753     case DIV:
2754     case MOD:
2755     case UDIV:
2756     case UMOD:
2757       if (HONOR_SNANS (x))
2758 	return 1;
2759       if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2760 	return flag_trapping_math;
2761       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2762 	return 1;
2763       break;
2764 
2765     case EXPR_LIST:
2766       /* An EXPR_LIST is used to represent a function call.  This
2767 	 certainly may trap.  */
2768       return 1;
2769 
2770     case GE:
2771     case GT:
2772     case LE:
2773     case LT:
2774     case LTGT:
2775     case COMPARE:
2776       /* Some floating point comparisons may trap.  */
2777       if (!flag_trapping_math)
2778 	break;
2779       /* ??? There is no machine independent way to check for tests that trap
2780 	 when COMPARE is used, though many targets do make this distinction.
2781 	 For instance, sparc uses CCFPE for compares which generate exceptions
2782 	 and CCFP for compares which do not generate exceptions.  */
2783       if (HONOR_NANS (x))
2784 	return 1;
2785       /* But often the compare has some CC mode, so check operand
2786 	 modes as well.  */
2787       if (HONOR_NANS (XEXP (x, 0))
2788 	  || HONOR_NANS (XEXP (x, 1)))
2789 	return 1;
2790       break;
2791 
2792     case EQ:
2793     case NE:
2794       if (HONOR_SNANS (x))
2795 	return 1;
2796       /* Often comparison is CC mode, so check operand modes.  */
2797       if (HONOR_SNANS (XEXP (x, 0))
2798 	  || HONOR_SNANS (XEXP (x, 1)))
2799 	return 1;
2800       break;
2801 
2802     case FIX:
2803       /* Conversion of floating point might trap.  */
2804       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2805 	return 1;
2806       break;
2807 
2808     case NEG:
2809     case ABS:
2810     case SUBREG:
2811       /* These operations don't trap even with floating point.  */
2812       break;
2813 
2814     default:
2815       /* Any floating arithmetic may trap.  */
2816       if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2817 	return 1;
2818     }
2819 
2820   fmt = GET_RTX_FORMAT (code);
2821   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2822     {
2823       if (fmt[i] == 'e')
2824 	{
2825 	  if (may_trap_p_1 (XEXP (x, i), flags))
2826 	    return 1;
2827 	}
2828       else if (fmt[i] == 'E')
2829 	{
2830 	  int j;
2831 	  for (j = 0; j < XVECLEN (x, i); j++)
2832 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2833 	      return 1;
2834 	}
2835     }
2836   return 0;
2837 }
2838 
2839 /* Return nonzero if evaluating rtx X might cause a trap.  */
2840 
2841 int
2842 may_trap_p (const_rtx x)
2843 {
2844   return may_trap_p_1 (x, 0);
2845 }
2846 
2847 /* Same as above, but additionally return nonzero if evaluating rtx X might
2848    cause a fault.  We define a fault for the purpose of this function as a
2849    erroneous execution condition that cannot be encountered during the normal
2850    execution of a valid program; the typical example is an unaligned memory
2851    access on a strict alignment machine.  The compiler guarantees that it
2852    doesn't generate code that will fault from a valid program, but this
2853    guarantee doesn't mean anything for individual instructions.  Consider
2854    the following example:
2855 
2856       struct S { int d; union { char *cp; int *ip; }; };
2857 
2858       int foo(struct S *s)
2859       {
2860 	if (s->d == 1)
2861 	  return *s->ip;
2862 	else
2863 	  return *s->cp;
2864       }
2865 
2866    on a strict alignment machine.  In a valid program, foo will never be
2867    invoked on a structure for which d is equal to 1 and the underlying
2868    unique field of the union not aligned on a 4-byte boundary, but the
2869    expression *s->ip might cause a fault if considered individually.
2870 
2871    At the RTL level, potentially problematic expressions will almost always
2872    verify may_trap_p; for example, the above dereference can be emitted as
2873    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2874    However, suppose that foo is inlined in a caller that causes s->cp to
2875    point to a local character variable and guarantees that s->d is not set
2876    to 1; foo may have been effectively translated into pseudo-RTL as:
2877 
2878       if ((reg:SI) == 1)
2879 	(set (reg:SI) (mem:SI (%fp - 7)))
2880       else
2881 	(set (reg:QI) (mem:QI (%fp - 7)))
2882 
2883    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2884    memory reference to a stack slot, but it will certainly cause a fault
2885    on a strict alignment machine.  */
2886 
2887 int
2888 may_trap_or_fault_p (const_rtx x)
2889 {
2890   return may_trap_p_1 (x, 1);
2891 }
2892 
2893 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2894    i.e., an inequality.  */
2895 
2896 int
2897 inequality_comparisons_p (const_rtx x)
2898 {
2899   const char *fmt;
2900   int len, i;
2901   const enum rtx_code code = GET_CODE (x);
2902 
2903   switch (code)
2904     {
2905     case REG:
2906     case SCRATCH:
2907     case PC:
2908     case CC0:
2909     CASE_CONST_ANY:
2910     case CONST:
2911     case LABEL_REF:
2912     case SYMBOL_REF:
2913       return 0;
2914 
2915     case LT:
2916     case LTU:
2917     case GT:
2918     case GTU:
2919     case LE:
2920     case LEU:
2921     case GE:
2922     case GEU:
2923       return 1;
2924 
2925     default:
2926       break;
2927     }
2928 
2929   len = GET_RTX_LENGTH (code);
2930   fmt = GET_RTX_FORMAT (code);
2931 
2932   for (i = 0; i < len; i++)
2933     {
2934       if (fmt[i] == 'e')
2935 	{
2936 	  if (inequality_comparisons_p (XEXP (x, i)))
2937 	    return 1;
2938 	}
2939       else if (fmt[i] == 'E')
2940 	{
2941 	  int j;
2942 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2943 	    if (inequality_comparisons_p (XVECEXP (x, i, j)))
2944 	      return 1;
2945 	}
2946     }
2947 
2948   return 0;
2949 }
2950 
2951 /* Replace any occurrence of FROM in X with TO.  The function does
2952    not enter into CONST_DOUBLE for the replace.
2953 
2954    Note that copying is not done so X must not be shared unless all copies
2955    are to be modified.
2956 
2957    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2958    those pointer-equal ones.  */
2959 
2960 rtx
2961 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
2962 {
2963   int i, j;
2964   const char *fmt;
2965 
2966   if (x == from)
2967     return to;
2968 
2969   /* Allow this function to make replacements in EXPR_LISTs.  */
2970   if (x == 0)
2971     return 0;
2972 
2973   if (all_regs
2974       && REG_P (x)
2975       && REG_P (from)
2976       && REGNO (x) == REGNO (from))
2977     {
2978       gcc_assert (GET_MODE (x) == GET_MODE (from));
2979       return to;
2980     }
2981   else if (GET_CODE (x) == SUBREG)
2982     {
2983       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
2984 
2985       if (CONST_INT_P (new_rtx))
2986 	{
2987 	  x = simplify_subreg (GET_MODE (x), new_rtx,
2988 			       GET_MODE (SUBREG_REG (x)),
2989 			       SUBREG_BYTE (x));
2990 	  gcc_assert (x);
2991 	}
2992       else
2993 	SUBREG_REG (x) = new_rtx;
2994 
2995       return x;
2996     }
2997   else if (GET_CODE (x) == ZERO_EXTEND)
2998     {
2999       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3000 
3001       if (CONST_INT_P (new_rtx))
3002 	{
3003 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3004 					new_rtx, GET_MODE (XEXP (x, 0)));
3005 	  gcc_assert (x);
3006 	}
3007       else
3008 	XEXP (x, 0) = new_rtx;
3009 
3010       return x;
3011     }
3012 
3013   fmt = GET_RTX_FORMAT (GET_CODE (x));
3014   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3015     {
3016       if (fmt[i] == 'e')
3017 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3018       else if (fmt[i] == 'E')
3019 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3020 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3021 					   from, to, all_regs);
3022     }
3023 
3024   return x;
3025 }
3026 
3027 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3028    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3029 
3030 void
3031 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3032 {
3033   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3034   rtx x = *loc;
3035   if (JUMP_TABLE_DATA_P (x))
3036     {
3037       x = PATTERN (x);
3038       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3039       int len = GET_NUM_ELEM (vec);
3040       for (int i = 0; i < len; ++i)
3041 	{
3042 	  rtx ref = RTVEC_ELT (vec, i);
3043 	  if (XEXP (ref, 0) == old_label)
3044 	    {
3045 	      XEXP (ref, 0) = new_label;
3046 	      if (update_label_nuses)
3047 		{
3048 		  ++LABEL_NUSES (new_label);
3049 		  --LABEL_NUSES (old_label);
3050 		}
3051 	    }
3052 	}
3053       return;
3054     }
3055 
3056   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3057      field.  This is not handled by the iterator because it doesn't
3058      handle unprinted ('0') fields.  */
3059   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3060     JUMP_LABEL (x) = new_label;
3061 
3062   subrtx_ptr_iterator::array_type array;
3063   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3064     {
3065       rtx *loc = *iter;
3066       if (rtx x = *loc)
3067 	{
3068 	  if (GET_CODE (x) == SYMBOL_REF
3069 	      && CONSTANT_POOL_ADDRESS_P (x))
3070 	    {
3071 	      rtx c = get_pool_constant (x);
3072 	      if (rtx_referenced_p (old_label, c))
3073 		{
3074 		  /* Create a copy of constant C; replace the label inside
3075 		     but do not update LABEL_NUSES because uses in constant pool
3076 		     are not counted.  */
3077 		  rtx new_c = copy_rtx (c);
3078 		  replace_label (&new_c, old_label, new_label, false);
3079 
3080 		  /* Add the new constant NEW_C to constant pool and replace
3081 		     the old reference to constant by new reference.  */
3082 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3083 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3084 		}
3085 	    }
3086 
3087 	  if ((GET_CODE (x) == LABEL_REF
3088 	       || GET_CODE (x) == INSN_LIST)
3089 	      && XEXP (x, 0) == old_label)
3090 	    {
3091 	      XEXP (x, 0) = new_label;
3092 	      if (update_label_nuses)
3093 		{
3094 		  ++LABEL_NUSES (new_label);
3095 		  --LABEL_NUSES (old_label);
3096 		}
3097 	    }
3098 	}
3099     }
3100 }
3101 
3102 void
3103 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3104 		       rtx_insn *new_label, bool update_label_nuses)
3105 {
3106   rtx insn_as_rtx = insn;
3107   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3108   gcc_checking_assert (insn_as_rtx == insn);
3109 }
3110 
3111 /* Return true if X is referenced in BODY.  */
3112 
3113 bool
3114 rtx_referenced_p (const_rtx x, const_rtx body)
3115 {
3116   subrtx_iterator::array_type array;
3117   FOR_EACH_SUBRTX (iter, array, body, ALL)
3118     if (const_rtx y = *iter)
3119       {
3120 	/* Check if a label_ref Y refers to label X.  */
3121 	if (GET_CODE (y) == LABEL_REF
3122 	    && LABEL_P (x)
3123 	    && label_ref_label (y) == x)
3124 	  return true;
3125 
3126 	if (rtx_equal_p (x, y))
3127 	  return true;
3128 
3129 	/* If Y is a reference to pool constant traverse the constant.  */
3130 	if (GET_CODE (y) == SYMBOL_REF
3131 	    && CONSTANT_POOL_ADDRESS_P (y))
3132 	  iter.substitute (get_pool_constant (y));
3133       }
3134   return false;
3135 }
3136 
3137 /* If INSN is a tablejump return true and store the label (before jump table) to
3138    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3139 
3140 bool
3141 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3142 	     rtx_jump_table_data **tablep)
3143 {
3144   if (!JUMP_P (insn))
3145     return false;
3146 
3147   rtx target = JUMP_LABEL (insn);
3148   if (target == NULL_RTX || ANY_RETURN_P (target))
3149     return false;
3150 
3151   rtx_insn *label = as_a<rtx_insn *> (target);
3152   rtx_insn *table = next_insn (label);
3153   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3154     return false;
3155 
3156   if (labelp)
3157     *labelp = label;
3158   if (tablep)
3159     *tablep = as_a <rtx_jump_table_data *> (table);
3160   return true;
3161 }
3162 
3163 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3164    constant that is not in the constant pool and not in the condition
3165    of an IF_THEN_ELSE.  */
3166 
3167 static int
3168 computed_jump_p_1 (const_rtx x)
3169 {
3170   const enum rtx_code code = GET_CODE (x);
3171   int i, j;
3172   const char *fmt;
3173 
3174   switch (code)
3175     {
3176     case LABEL_REF:
3177     case PC:
3178       return 0;
3179 
3180     case CONST:
3181     CASE_CONST_ANY:
3182     case SYMBOL_REF:
3183     case REG:
3184       return 1;
3185 
3186     case MEM:
3187       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3188 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3189 
3190     case IF_THEN_ELSE:
3191       return (computed_jump_p_1 (XEXP (x, 1))
3192 	      || computed_jump_p_1 (XEXP (x, 2)));
3193 
3194     default:
3195       break;
3196     }
3197 
3198   fmt = GET_RTX_FORMAT (code);
3199   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3200     {
3201       if (fmt[i] == 'e'
3202 	  && computed_jump_p_1 (XEXP (x, i)))
3203 	return 1;
3204 
3205       else if (fmt[i] == 'E')
3206 	for (j = 0; j < XVECLEN (x, i); j++)
3207 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3208 	    return 1;
3209     }
3210 
3211   return 0;
3212 }
3213 
3214 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3215 
3216    Tablejumps and casesi insns are not considered indirect jumps;
3217    we can recognize them by a (use (label_ref)).  */
3218 
3219 int
3220 computed_jump_p (const rtx_insn *insn)
3221 {
3222   int i;
3223   if (JUMP_P (insn))
3224     {
3225       rtx pat = PATTERN (insn);
3226 
3227       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3228       if (JUMP_LABEL (insn) != NULL)
3229 	return 0;
3230 
3231       if (GET_CODE (pat) == PARALLEL)
3232 	{
3233 	  int len = XVECLEN (pat, 0);
3234 	  int has_use_labelref = 0;
3235 
3236 	  for (i = len - 1; i >= 0; i--)
3237 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3238 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3239 		    == LABEL_REF))
3240 	      {
3241 	        has_use_labelref = 1;
3242 	        break;
3243 	      }
3244 
3245 	  if (! has_use_labelref)
3246 	    for (i = len - 1; i >= 0; i--)
3247 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3248 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3249 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3250 		return 1;
3251 	}
3252       else if (GET_CODE (pat) == SET
3253 	       && SET_DEST (pat) == pc_rtx
3254 	       && computed_jump_p_1 (SET_SRC (pat)))
3255 	return 1;
3256     }
3257   return 0;
3258 }
3259 
3260 
3261 
3262 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3263    the equivalent add insn and pass the result to FN, using DATA as the
3264    final argument.  */
3265 
3266 static int
3267 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3268 {
3269   rtx x = XEXP (mem, 0);
3270   switch (GET_CODE (x))
3271     {
3272     case PRE_INC:
3273     case POST_INC:
3274       {
3275 	int size = GET_MODE_SIZE (GET_MODE (mem));
3276 	rtx r1 = XEXP (x, 0);
3277 	rtx c = gen_int_mode (size, GET_MODE (r1));
3278 	return fn (mem, x, r1, r1, c, data);
3279       }
3280 
3281     case PRE_DEC:
3282     case POST_DEC:
3283       {
3284 	int size = GET_MODE_SIZE (GET_MODE (mem));
3285 	rtx r1 = XEXP (x, 0);
3286 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3287 	return fn (mem, x, r1, r1, c, data);
3288       }
3289 
3290     case PRE_MODIFY:
3291     case POST_MODIFY:
3292       {
3293 	rtx r1 = XEXP (x, 0);
3294 	rtx add = XEXP (x, 1);
3295 	return fn (mem, x, r1, add, NULL, data);
3296       }
3297 
3298     default:
3299       gcc_unreachable ();
3300     }
3301 }
3302 
3303 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3304    For each such autoinc operation found, call FN, passing it
3305    the innermost enclosing MEM, the operation itself, the RTX modified
3306    by the operation, two RTXs (the second may be NULL) that, once
3307    added, represent the value to be held by the modified RTX
3308    afterwards, and DATA.  FN is to return 0 to continue the
3309    traversal or any other value to have it returned to the caller of
3310    for_each_inc_dec.  */
3311 
3312 int
3313 for_each_inc_dec (rtx x,
3314 		  for_each_inc_dec_fn fn,
3315 		  void *data)
3316 {
3317   subrtx_var_iterator::array_type array;
3318   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3319     {
3320       rtx mem = *iter;
3321       if (mem
3322 	  && MEM_P (mem)
3323 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3324 	{
3325 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3326 	  if (res != 0)
3327 	    return res;
3328 	  iter.skip_subrtxes ();
3329 	}
3330     }
3331   return 0;
3332 }
3333 
3334 
3335 /* Searches X for any reference to REGNO, returning the rtx of the
3336    reference found if any.  Otherwise, returns NULL_RTX.  */
3337 
3338 rtx
3339 regno_use_in (unsigned int regno, rtx x)
3340 {
3341   const char *fmt;
3342   int i, j;
3343   rtx tem;
3344 
3345   if (REG_P (x) && REGNO (x) == regno)
3346     return x;
3347 
3348   fmt = GET_RTX_FORMAT (GET_CODE (x));
3349   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3350     {
3351       if (fmt[i] == 'e')
3352 	{
3353 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3354 	    return tem;
3355 	}
3356       else if (fmt[i] == 'E')
3357 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3358 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3359 	    return tem;
3360     }
3361 
3362   return NULL_RTX;
3363 }
3364 
3365 /* Return a value indicating whether OP, an operand of a commutative
3366    operation, is preferred as the first or second operand.  The more
3367    positive the value, the stronger the preference for being the first
3368    operand.  */
3369 
3370 int
3371 commutative_operand_precedence (rtx op)
3372 {
3373   enum rtx_code code = GET_CODE (op);
3374 
3375   /* Constants always become the second operand.  Prefer "nice" constants.  */
3376   if (code == CONST_INT)
3377     return -8;
3378   if (code == CONST_WIDE_INT)
3379     return -7;
3380   if (code == CONST_DOUBLE)
3381     return -7;
3382   if (code == CONST_FIXED)
3383     return -7;
3384   op = avoid_constant_pool_reference (op);
3385   code = GET_CODE (op);
3386 
3387   switch (GET_RTX_CLASS (code))
3388     {
3389     case RTX_CONST_OBJ:
3390       if (code == CONST_INT)
3391         return -6;
3392       if (code == CONST_WIDE_INT)
3393         return -6;
3394       if (code == CONST_DOUBLE)
3395         return -5;
3396       if (code == CONST_FIXED)
3397         return -5;
3398       return -4;
3399 
3400     case RTX_EXTRA:
3401       /* SUBREGs of objects should come second.  */
3402       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3403         return -3;
3404       return 0;
3405 
3406     case RTX_OBJ:
3407       /* Complex expressions should be the first, so decrease priority
3408          of objects.  Prefer pointer objects over non pointer objects.  */
3409       if ((REG_P (op) && REG_POINTER (op))
3410 	  || (MEM_P (op) && MEM_POINTER (op)))
3411 	return -1;
3412       return -2;
3413 
3414     case RTX_COMM_ARITH:
3415       /* Prefer operands that are themselves commutative to be first.
3416          This helps to make things linear.  In particular,
3417          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3418       return 4;
3419 
3420     case RTX_BIN_ARITH:
3421       /* If only one operand is a binary expression, it will be the first
3422          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3423          is canonical, although it will usually be further simplified.  */
3424       return 2;
3425 
3426     case RTX_UNARY:
3427       /* Then prefer NEG and NOT.  */
3428       if (code == NEG || code == NOT)
3429         return 1;
3430       /* FALLTHRU */
3431 
3432     default:
3433       return 0;
3434     }
3435 }
3436 
3437 /* Return 1 iff it is necessary to swap operands of commutative operation
3438    in order to canonicalize expression.  */
3439 
3440 bool
3441 swap_commutative_operands_p (rtx x, rtx y)
3442 {
3443   return (commutative_operand_precedence (x)
3444 	  < commutative_operand_precedence (y));
3445 }
3446 
3447 /* Return 1 if X is an autoincrement side effect and the register is
3448    not the stack pointer.  */
3449 int
3450 auto_inc_p (const_rtx x)
3451 {
3452   switch (GET_CODE (x))
3453     {
3454     case PRE_INC:
3455     case POST_INC:
3456     case PRE_DEC:
3457     case POST_DEC:
3458     case PRE_MODIFY:
3459     case POST_MODIFY:
3460       /* There are no REG_INC notes for SP.  */
3461       if (XEXP (x, 0) != stack_pointer_rtx)
3462 	return 1;
3463     default:
3464       break;
3465     }
3466   return 0;
3467 }
3468 
3469 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3470 int
3471 loc_mentioned_in_p (rtx *loc, const_rtx in)
3472 {
3473   enum rtx_code code;
3474   const char *fmt;
3475   int i, j;
3476 
3477   if (!in)
3478     return 0;
3479 
3480   code = GET_CODE (in);
3481   fmt = GET_RTX_FORMAT (code);
3482   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3483     {
3484       if (fmt[i] == 'e')
3485 	{
3486 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3487 	    return 1;
3488 	}
3489       else if (fmt[i] == 'E')
3490 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3491 	  if (loc == &XVECEXP (in, i, j)
3492 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3493 	    return 1;
3494     }
3495   return 0;
3496 }
3497 
3498 /* Helper function for subreg_lsb.  Given a subreg's OUTER_MODE, INNER_MODE,
3499    and SUBREG_BYTE, return the bit offset where the subreg begins
3500    (counting from the least significant bit of the operand).  */
3501 
3502 unsigned int
3503 subreg_lsb_1 (machine_mode outer_mode,
3504 	      machine_mode inner_mode,
3505 	      unsigned int subreg_byte)
3506 {
3507   unsigned int bitpos;
3508   unsigned int byte;
3509   unsigned int word;
3510 
3511   /* A paradoxical subreg begins at bit position 0.  */
3512   if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3513     return 0;
3514 
3515   if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3516     /* If the subreg crosses a word boundary ensure that
3517        it also begins and ends on a word boundary.  */
3518     gcc_assert (!((subreg_byte % UNITS_PER_WORD
3519 		  + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3520 		  && (subreg_byte % UNITS_PER_WORD
3521 		      || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3522 
3523   if (WORDS_BIG_ENDIAN)
3524     word = (GET_MODE_SIZE (inner_mode)
3525 	    - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3526   else
3527     word = subreg_byte / UNITS_PER_WORD;
3528   bitpos = word * BITS_PER_WORD;
3529 
3530   if (BYTES_BIG_ENDIAN)
3531     byte = (GET_MODE_SIZE (inner_mode)
3532 	    - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3533   else
3534     byte = subreg_byte % UNITS_PER_WORD;
3535   bitpos += byte * BITS_PER_UNIT;
3536 
3537   return bitpos;
3538 }
3539 
3540 /* Given a subreg X, return the bit offset where the subreg begins
3541    (counting from the least significant bit of the reg).  */
3542 
3543 unsigned int
3544 subreg_lsb (const_rtx x)
3545 {
3546   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3547 		       SUBREG_BYTE (x));
3548 }
3549 
3550 /* Return the subreg byte offset for a subreg whose outer value has
3551    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3552    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3553    lsb of the inner value.  This is the inverse of the calculation
3554    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3555 
3556 unsigned int
3557 subreg_size_offset_from_lsb (unsigned int outer_bytes,
3558 			     unsigned int inner_bytes,
3559 			     unsigned int lsb_shift)
3560 {
3561   /* A paradoxical subreg begins at bit position 0.  */
3562   if (outer_bytes > inner_bytes)
3563     {
3564       gcc_checking_assert (lsb_shift == 0);
3565       return 0;
3566     }
3567 
3568   gcc_assert (lsb_shift % BITS_PER_UNIT == 0);
3569   unsigned int lower_bytes = lsb_shift / BITS_PER_UNIT;
3570   unsigned int upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3571   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3572     return upper_bytes;
3573   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3574     return lower_bytes;
3575   else
3576     {
3577       unsigned int lower_word_part = lower_bytes & -UNITS_PER_WORD;
3578       unsigned int upper_word_part = upper_bytes & -UNITS_PER_WORD;
3579       if (WORDS_BIG_ENDIAN)
3580 	return upper_word_part + (lower_bytes - lower_word_part);
3581       else
3582 	return lower_word_part + (upper_bytes - upper_word_part);
3583     }
3584 }
3585 
3586 /* Fill in information about a subreg of a hard register.
3587    xregno - A regno of an inner hard subreg_reg (or what will become one).
3588    xmode  - The mode of xregno.
3589    offset - The byte offset.
3590    ymode  - The mode of a top level SUBREG (or what may become one).
3591    info   - Pointer to structure to fill in.
3592 
3593    Rather than considering one particular inner register (and thus one
3594    particular "outer" register) in isolation, this function really uses
3595    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
3596    function does not check whether adding INFO->offset to XREGNO gives
3597    a valid hard register; even if INFO->offset + XREGNO is out of range,
3598    there might be another register of the same type that is in range.
3599    Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3600    register, since that can depend on things like whether the final
3601    register number is even or odd.  Callers that want to check whether
3602    this particular subreg can be replaced by a simple (reg ...) should
3603    use simplify_subreg_regno.  */
3604 
3605 void
3606 subreg_get_info (unsigned int xregno, machine_mode xmode,
3607 		 unsigned int offset, machine_mode ymode,
3608 		 struct subreg_info *info)
3609 {
3610   unsigned int nregs_xmode, nregs_ymode;
3611 
3612   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3613 
3614   unsigned int xsize = GET_MODE_SIZE (xmode);
3615   unsigned int ysize = GET_MODE_SIZE (ymode);
3616   bool rknown = false;
3617 
3618   /* If the register representation of a non-scalar mode has holes in it,
3619      we expect the scalar units to be concatenated together, with the holes
3620      distributed evenly among the scalar units.  Each scalar unit must occupy
3621      at least one register.  */
3622   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3623     {
3624       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3625       unsigned int nunits = GET_MODE_NUNITS (xmode);
3626       machine_mode xmode_unit = GET_MODE_INNER (xmode);
3627       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3628       gcc_assert (nregs_xmode
3629 		  == (nunits
3630 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3631       gcc_assert (hard_regno_nregs[xregno][xmode]
3632 		  == hard_regno_nregs[xregno][xmode_unit] * nunits);
3633 
3634       /* You can only ask for a SUBREG of a value with holes in the middle
3635 	 if you don't cross the holes.  (Such a SUBREG should be done by
3636 	 picking a different register class, or doing it in memory if
3637 	 necessary.)  An example of a value with holes is XCmode on 32-bit
3638 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3639 	 3 for each part, but in memory it's two 128-bit parts.
3640 	 Padding is assumed to be at the end (not necessarily the 'high part')
3641 	 of each unit.  */
3642       if ((offset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3643 	  && (offset / GET_MODE_SIZE (xmode_unit)
3644 	      != ((offset + ysize - 1) / GET_MODE_SIZE (xmode_unit))))
3645 	{
3646 	  info->representable_p = false;
3647 	  rknown = true;
3648 	}
3649     }
3650   else
3651     nregs_xmode = hard_regno_nregs[xregno][xmode];
3652 
3653   nregs_ymode = hard_regno_nregs[xregno][ymode];
3654 
3655   /* Paradoxical subregs are otherwise valid.  */
3656   if (!rknown && offset == 0 && ysize > xsize)
3657     {
3658       info->representable_p = true;
3659       /* If this is a big endian paradoxical subreg, which uses more
3660 	 actual hard registers than the original register, we must
3661 	 return a negative offset so that we find the proper highpart
3662 	 of the register.
3663 
3664 	 We assume that the ordering of registers within a multi-register
3665 	 value has a consistent endianness: if bytes and register words
3666 	 have different endianness, the hard registers that make up a
3667 	 multi-register value must be at least word-sized.  */
3668       if (REG_WORDS_BIG_ENDIAN)
3669 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
3670       else
3671 	info->offset = 0;
3672       info->nregs = nregs_ymode;
3673       return;
3674     }
3675 
3676   /* If registers store different numbers of bits in the different
3677      modes, we cannot generally form this subreg.  */
3678   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3679       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3680       && (xsize % nregs_xmode) == 0
3681       && (ysize % nregs_ymode) == 0)
3682     {
3683       int regsize_xmode = xsize / nregs_xmode;
3684       int regsize_ymode = ysize / nregs_ymode;
3685       if (!rknown
3686 	  && ((nregs_ymode > 1 && regsize_xmode > regsize_ymode)
3687 	      || (nregs_xmode > 1 && regsize_ymode > regsize_xmode)))
3688 	{
3689 	  info->representable_p = false;
3690 	  info->nregs = CEIL (ysize, regsize_xmode);
3691 	  info->offset = offset / regsize_xmode;
3692 	  return;
3693 	}
3694       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3695 	 would go outside of XMODE.  */
3696       if (!rknown && ysize + offset > xsize)
3697 	{
3698 	  info->representable_p = false;
3699 	  info->nregs = nregs_ymode;
3700 	  info->offset = offset / regsize_xmode;
3701 	  return;
3702 	}
3703       /* Quick exit for the simple and common case of extracting whole
3704 	 subregisters from a multiregister value.  */
3705       /* ??? It would be better to integrate this into the code below,
3706 	 if we can generalize the concept enough and figure out how
3707 	 odd-sized modes can coexist with the other weird cases we support.  */
3708       if (!rknown
3709 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3710 	  && regsize_xmode == regsize_ymode
3711 	  && (offset % regsize_ymode) == 0)
3712 	{
3713 	  info->representable_p = true;
3714 	  info->nregs = nregs_ymode;
3715 	  info->offset = offset / regsize_ymode;
3716 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3717 	  return;
3718 	}
3719     }
3720 
3721   /* Lowpart subregs are otherwise valid.  */
3722   if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3723     {
3724       info->representable_p = true;
3725       rknown = true;
3726 
3727       if (offset == 0 || nregs_xmode == nregs_ymode)
3728 	{
3729 	  info->offset = 0;
3730 	  info->nregs = nregs_ymode;
3731 	  return;
3732 	}
3733     }
3734 
3735   /* Set NUM_BLOCKS to the number of independently-representable YMODE
3736      values there are in (reg:XMODE XREGNO).  We can view the register
3737      as consisting of this number of independent "blocks", where each
3738      block occupies NREGS_YMODE registers and contains exactly one
3739      representable YMODE value.  */
3740   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3741   unsigned int num_blocks = nregs_xmode / nregs_ymode;
3742 
3743   /* Calculate the number of bytes in each block.  This must always
3744      be exact, otherwise we don't know how to verify the constraint.
3745      These conditions may be relaxed but subreg_regno_offset would
3746      need to be redesigned.  */
3747   gcc_assert ((xsize % num_blocks) == 0);
3748   unsigned int bytes_per_block = xsize / num_blocks;
3749 
3750   /* Get the number of the first block that contains the subreg and the byte
3751      offset of the subreg from the start of that block.  */
3752   unsigned int block_number = offset / bytes_per_block;
3753   unsigned int subblock_offset = offset % bytes_per_block;
3754 
3755   if (!rknown)
3756     {
3757       /* Only the lowpart of each block is representable.  */
3758       info->representable_p
3759 	= (subblock_offset
3760 	   == subreg_size_lowpart_offset (ysize, bytes_per_block));
3761       rknown = true;
3762     }
3763 
3764   /* We assume that the ordering of registers within a multi-register
3765      value has a consistent endianness: if bytes and register words
3766      have different endianness, the hard registers that make up a
3767      multi-register value must be at least word-sized.  */
3768   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3769     /* The block number we calculated above followed memory endianness.
3770        Convert it to register endianness by counting back from the end.
3771        (Note that, because of the assumption above, each block must be
3772        at least word-sized.)  */
3773     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3774   else
3775     info->offset = block_number * nregs_ymode;
3776   info->nregs = nregs_ymode;
3777 }
3778 
3779 /* This function returns the regno offset of a subreg expression.
3780    xregno - A regno of an inner hard subreg_reg (or what will become one).
3781    xmode  - The mode of xregno.
3782    offset - The byte offset.
3783    ymode  - The mode of a top level SUBREG (or what may become one).
3784    RETURN - The regno offset which would be used.  */
3785 unsigned int
3786 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3787 		     unsigned int offset, machine_mode ymode)
3788 {
3789   struct subreg_info info;
3790   subreg_get_info (xregno, xmode, offset, ymode, &info);
3791   return info.offset;
3792 }
3793 
3794 /* This function returns true when the offset is representable via
3795    subreg_offset in the given regno.
3796    xregno - A regno of an inner hard subreg_reg (or what will become one).
3797    xmode  - The mode of xregno.
3798    offset - The byte offset.
3799    ymode  - The mode of a top level SUBREG (or what may become one).
3800    RETURN - Whether the offset is representable.  */
3801 bool
3802 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3803 			       unsigned int offset, machine_mode ymode)
3804 {
3805   struct subreg_info info;
3806   subreg_get_info (xregno, xmode, offset, ymode, &info);
3807   return info.representable_p;
3808 }
3809 
3810 /* Return the number of a YMODE register to which
3811 
3812        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3813 
3814    can be simplified.  Return -1 if the subreg can't be simplified.
3815 
3816    XREGNO is a hard register number.  */
3817 
3818 int
3819 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3820 		       unsigned int offset, machine_mode ymode)
3821 {
3822   struct subreg_info info;
3823   unsigned int yregno;
3824 
3825 #ifdef CANNOT_CHANGE_MODE_CLASS
3826   /* Give the backend a chance to disallow the mode change.  */
3827   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3828       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3829       && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3830       /* We can use mode change in LRA for some transformations.  */
3831       && ! lra_in_progress)
3832     return -1;
3833 #endif
3834 
3835   /* We shouldn't simplify stack-related registers.  */
3836   if ((!reload_completed || frame_pointer_needed)
3837       && xregno == FRAME_POINTER_REGNUM)
3838     return -1;
3839 
3840   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3841       && xregno == ARG_POINTER_REGNUM)
3842     return -1;
3843 
3844   if (xregno == STACK_POINTER_REGNUM
3845       /* We should convert hard stack register in LRA if it is
3846 	 possible.  */
3847       && ! lra_in_progress)
3848     return -1;
3849 
3850   /* Try to get the register offset.  */
3851   subreg_get_info (xregno, xmode, offset, ymode, &info);
3852   if (!info.representable_p)
3853     return -1;
3854 
3855   /* Make sure that the offsetted register value is in range.  */
3856   yregno = xregno + info.offset;
3857   if (!HARD_REGISTER_NUM_P (yregno))
3858     return -1;
3859 
3860   /* See whether (reg:YMODE YREGNO) is valid.
3861 
3862      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3863      This is a kludge to work around how complex FP arguments are passed
3864      on IA-64 and should be fixed.  See PR target/49226.  */
3865   if (!HARD_REGNO_MODE_OK (yregno, ymode)
3866       && HARD_REGNO_MODE_OK (xregno, xmode))
3867     return -1;
3868 
3869   return (int) yregno;
3870 }
3871 
3872 /* Return the final regno that a subreg expression refers to.  */
3873 unsigned int
3874 subreg_regno (const_rtx x)
3875 {
3876   unsigned int ret;
3877   rtx subreg = SUBREG_REG (x);
3878   int regno = REGNO (subreg);
3879 
3880   ret = regno + subreg_regno_offset (regno,
3881 				     GET_MODE (subreg),
3882 				     SUBREG_BYTE (x),
3883 				     GET_MODE (x));
3884   return ret;
3885 
3886 }
3887 
3888 /* Return the number of registers that a subreg expression refers
3889    to.  */
3890 unsigned int
3891 subreg_nregs (const_rtx x)
3892 {
3893   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3894 }
3895 
3896 /* Return the number of registers that a subreg REG with REGNO
3897    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
3898    changed so that the regno can be passed in. */
3899 
3900 unsigned int
3901 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3902 {
3903   struct subreg_info info;
3904   rtx subreg = SUBREG_REG (x);
3905 
3906   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3907 		   &info);
3908   return info.nregs;
3909 }
3910 
3911 struct parms_set_data
3912 {
3913   int nregs;
3914   HARD_REG_SET regs;
3915 };
3916 
3917 /* Helper function for noticing stores to parameter registers.  */
3918 static void
3919 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3920 {
3921   struct parms_set_data *const d = (struct parms_set_data *) data;
3922   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3923       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3924     {
3925       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3926       d->nregs--;
3927     }
3928 }
3929 
3930 /* Look backward for first parameter to be loaded.
3931    Note that loads of all parameters will not necessarily be
3932    found if CSE has eliminated some of them (e.g., an argument
3933    to the outer function is passed down as a parameter).
3934    Do not skip BOUNDARY.  */
3935 rtx_insn *
3936 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3937 {
3938   struct parms_set_data parm;
3939   rtx p;
3940   rtx_insn *before, *first_set;
3941 
3942   /* Since different machines initialize their parameter registers
3943      in different orders, assume nothing.  Collect the set of all
3944      parameter registers.  */
3945   CLEAR_HARD_REG_SET (parm.regs);
3946   parm.nregs = 0;
3947   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3948     if (GET_CODE (XEXP (p, 0)) == USE
3949 	&& REG_P (XEXP (XEXP (p, 0), 0))
3950 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
3951       {
3952 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3953 
3954 	/* We only care about registers which can hold function
3955 	   arguments.  */
3956 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3957 	  continue;
3958 
3959 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3960 	parm.nregs++;
3961       }
3962   before = call_insn;
3963   first_set = call_insn;
3964 
3965   /* Search backward for the first set of a register in this set.  */
3966   while (parm.nregs && before != boundary)
3967     {
3968       before = PREV_INSN (before);
3969 
3970       /* It is possible that some loads got CSEed from one call to
3971          another.  Stop in that case.  */
3972       if (CALL_P (before))
3973 	break;
3974 
3975       /* Our caller needs either ensure that we will find all sets
3976          (in case code has not been optimized yet), or take care
3977          for possible labels in a way by setting boundary to preceding
3978          CODE_LABEL.  */
3979       if (LABEL_P (before))
3980 	{
3981 	  gcc_assert (before == boundary);
3982 	  break;
3983 	}
3984 
3985       if (INSN_P (before))
3986 	{
3987 	  int nregs_old = parm.nregs;
3988 	  note_stores (PATTERN (before), parms_set, &parm);
3989 	  /* If we found something that did not set a parameter reg,
3990 	     we're done.  Do not keep going, as that might result
3991 	     in hoisting an insn before the setting of a pseudo
3992 	     that is used by the hoisted insn. */
3993 	  if (nregs_old != parm.nregs)
3994 	    first_set = before;
3995 	  else
3996 	    break;
3997 	}
3998     }
3999   return first_set;
4000 }
4001 
4002 /* Return true if we should avoid inserting code between INSN and preceding
4003    call instruction.  */
4004 
4005 bool
4006 keep_with_call_p (const rtx_insn *insn)
4007 {
4008   rtx set;
4009 
4010   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4011     {
4012       if (REG_P (SET_DEST (set))
4013 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4014 	  && fixed_regs[REGNO (SET_DEST (set))]
4015 	  && general_operand (SET_SRC (set), VOIDmode))
4016 	return true;
4017       if (REG_P (SET_SRC (set))
4018 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4019 	  && REG_P (SET_DEST (set))
4020 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4021 	return true;
4022       /* There may be a stack pop just after the call and before the store
4023 	 of the return register.  Search for the actual store when deciding
4024 	 if we can break or not.  */
4025       if (SET_DEST (set) == stack_pointer_rtx)
4026 	{
4027 	  /* This CONST_CAST is okay because next_nonnote_insn just
4028 	     returns its argument and we assign it to a const_rtx
4029 	     variable.  */
4030 	  const rtx_insn *i2
4031 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4032 	  if (i2 && keep_with_call_p (i2))
4033 	    return true;
4034 	}
4035     }
4036   return false;
4037 }
4038 
4039 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4040    to non-complex jumps.  That is, direct unconditional, conditional,
4041    and tablejumps, but not computed jumps or returns.  It also does
4042    not apply to the fallthru case of a conditional jump.  */
4043 
4044 bool
4045 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4046 {
4047   rtx tmp = JUMP_LABEL (jump_insn);
4048   rtx_jump_table_data *table;
4049 
4050   if (label == tmp)
4051     return true;
4052 
4053   if (tablejump_p (jump_insn, NULL, &table))
4054     {
4055       rtvec vec = table->get_labels ();
4056       int i, veclen = GET_NUM_ELEM (vec);
4057 
4058       for (i = 0; i < veclen; ++i)
4059 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4060 	  return true;
4061     }
4062 
4063   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4064     return true;
4065 
4066   return false;
4067 }
4068 
4069 
4070 /* Return an estimate of the cost of computing rtx X.
4071    One use is in cse, to decide which expression to keep in the hash table.
4072    Another is in rtl generation, to pick the cheapest way to multiply.
4073    Other uses like the latter are expected in the future.
4074 
4075    X appears as operand OPNO in an expression with code OUTER_CODE.
4076    SPEED specifies whether costs optimized for speed or size should
4077    be returned.  */
4078 
4079 int
4080 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4081 	  int opno, bool speed)
4082 {
4083   int i, j;
4084   enum rtx_code code;
4085   const char *fmt;
4086   int total;
4087   int factor;
4088 
4089   if (x == 0)
4090     return 0;
4091 
4092   if (GET_MODE (x) != VOIDmode)
4093     mode = GET_MODE (x);
4094 
4095   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4096      many insns, taking N times as long.  */
4097   factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4098   if (factor == 0)
4099     factor = 1;
4100 
4101   /* Compute the default costs of certain things.
4102      Note that targetm.rtx_costs can override the defaults.  */
4103 
4104   code = GET_CODE (x);
4105   switch (code)
4106     {
4107     case MULT:
4108       /* Multiplication has time-complexity O(N*N), where N is the
4109 	 number of units (translated from digits) when using
4110 	 schoolbook long multiplication.  */
4111       total = factor * factor * COSTS_N_INSNS (5);
4112       break;
4113     case DIV:
4114     case UDIV:
4115     case MOD:
4116     case UMOD:
4117       /* Similarly, complexity for schoolbook long division.  */
4118       total = factor * factor * COSTS_N_INSNS (7);
4119       break;
4120     case USE:
4121       /* Used in combine.c as a marker.  */
4122       total = 0;
4123       break;
4124     case SET:
4125       /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4126 	 the mode for the factor.  */
4127       mode = GET_MODE (SET_DEST (x));
4128       factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4129       if (factor == 0)
4130 	factor = 1;
4131       /* FALLTHRU */
4132     default:
4133       total = factor * COSTS_N_INSNS (1);
4134     }
4135 
4136   switch (code)
4137     {
4138     case REG:
4139       return 0;
4140 
4141     case SUBREG:
4142       total = 0;
4143       /* If we can't tie these modes, make this expensive.  The larger
4144 	 the mode, the more expensive it is.  */
4145       if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4146 	return COSTS_N_INSNS (2 + factor);
4147       break;
4148 
4149     default:
4150       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4151 	return total;
4152       break;
4153     }
4154 
4155   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4156      which is already in total.  */
4157 
4158   fmt = GET_RTX_FORMAT (code);
4159   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4160     if (fmt[i] == 'e')
4161       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4162     else if (fmt[i] == 'E')
4163       for (j = 0; j < XVECLEN (x, i); j++)
4164 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4165 
4166   return total;
4167 }
4168 
4169 /* Fill in the structure C with information about both speed and size rtx
4170    costs for X, which is operand OPNO in an expression with code OUTER.  */
4171 
4172 void
4173 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4174 		   struct full_rtx_costs *c)
4175 {
4176   c->speed = rtx_cost (x, mode, outer, opno, true);
4177   c->size = rtx_cost (x, mode, outer, opno, false);
4178 }
4179 
4180 
4181 /* Return cost of address expression X.
4182    Expect that X is properly formed address reference.
4183 
4184    SPEED parameter specify whether costs optimized for speed or size should
4185    be returned.  */
4186 
4187 int
4188 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4189 {
4190   /* We may be asked for cost of various unusual addresses, such as operands
4191      of push instruction.  It is not worthwhile to complicate writing
4192      of the target hook by such cases.  */
4193 
4194   if (!memory_address_addr_space_p (mode, x, as))
4195     return 1000;
4196 
4197   return targetm.address_cost (x, mode, as, speed);
4198 }
4199 
4200 /* If the target doesn't override, compute the cost as with arithmetic.  */
4201 
4202 int
4203 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4204 {
4205   return rtx_cost (x, Pmode, MEM, 0, speed);
4206 }
4207 
4208 
4209 unsigned HOST_WIDE_INT
4210 nonzero_bits (const_rtx x, machine_mode mode)
4211 {
4212   return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4213 }
4214 
4215 unsigned int
4216 num_sign_bit_copies (const_rtx x, machine_mode mode)
4217 {
4218   return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4219 }
4220 
4221 /* Return true if nonzero_bits1 might recurse into both operands
4222    of X.  */
4223 
4224 static inline bool
4225 nonzero_bits_binary_arith_p (const_rtx x)
4226 {
4227   if (!ARITHMETIC_P (x))
4228     return false;
4229   switch (GET_CODE (x))
4230     {
4231     case AND:
4232     case XOR:
4233     case IOR:
4234     case UMIN:
4235     case UMAX:
4236     case SMIN:
4237     case SMAX:
4238     case PLUS:
4239     case MINUS:
4240     case MULT:
4241     case DIV:
4242     case UDIV:
4243     case MOD:
4244     case UMOD:
4245       return true;
4246     default:
4247       return false;
4248     }
4249 }
4250 
4251 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4252    It avoids exponential behavior in nonzero_bits1 when X has
4253    identical subexpressions on the first or the second level.  */
4254 
4255 static unsigned HOST_WIDE_INT
4256 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4257 		     machine_mode known_mode,
4258 		     unsigned HOST_WIDE_INT known_ret)
4259 {
4260   if (x == known_x && mode == known_mode)
4261     return known_ret;
4262 
4263   /* Try to find identical subexpressions.  If found call
4264      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4265      precomputed value for the subexpression as KNOWN_RET.  */
4266 
4267   if (nonzero_bits_binary_arith_p (x))
4268     {
4269       rtx x0 = XEXP (x, 0);
4270       rtx x1 = XEXP (x, 1);
4271 
4272       /* Check the first level.  */
4273       if (x0 == x1)
4274 	return nonzero_bits1 (x, mode, x0, mode,
4275 			      cached_nonzero_bits (x0, mode, known_x,
4276 						   known_mode, known_ret));
4277 
4278       /* Check the second level.  */
4279       if (nonzero_bits_binary_arith_p (x0)
4280 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4281 	return nonzero_bits1 (x, mode, x1, mode,
4282 			      cached_nonzero_bits (x1, mode, known_x,
4283 						   known_mode, known_ret));
4284 
4285       if (nonzero_bits_binary_arith_p (x1)
4286 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4287 	return nonzero_bits1 (x, mode, x0, mode,
4288 			      cached_nonzero_bits (x0, mode, known_x,
4289 						   known_mode, known_ret));
4290     }
4291 
4292   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4293 }
4294 
4295 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4296    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4297    is less useful.  We can't allow both, because that results in exponential
4298    run time recursion.  There is a nullstone testcase that triggered
4299    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4300 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4301 
4302 /* Given an expression, X, compute which bits in X can be nonzero.
4303    We don't care about bits outside of those defined in MODE.
4304 
4305    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4306    an arithmetic operation, we can do better.  */
4307 
4308 static unsigned HOST_WIDE_INT
4309 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4310 	       machine_mode known_mode,
4311 	       unsigned HOST_WIDE_INT known_ret)
4312 {
4313   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4314   unsigned HOST_WIDE_INT inner_nz;
4315   enum rtx_code code = GET_CODE (x);
4316   machine_mode inner_mode;
4317   unsigned int mode_width = GET_MODE_PRECISION (mode);
4318 
4319   /* For floating-point and vector values, assume all bits are needed.  */
4320   if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4321       || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4322     return nonzero;
4323 
4324   /* If X is wider than MODE, use its mode instead.  */
4325   if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4326     {
4327       mode = GET_MODE (x);
4328       nonzero = GET_MODE_MASK (mode);
4329       mode_width = GET_MODE_PRECISION (mode);
4330     }
4331 
4332   if (mode_width > HOST_BITS_PER_WIDE_INT)
4333     /* Our only callers in this case look for single bit values.  So
4334        just return the mode mask.  Those tests will then be false.  */
4335     return nonzero;
4336 
4337   /* If MODE is wider than X, but both are a single word for both the host
4338      and target machines, we can compute this from which bits of the object
4339      might be nonzero in its own mode, taking into account the fact that, on
4340      CISC machines, accessing an object in a wider mode generally causes the
4341      high-order bits to become undefined, so they are not known to be zero.
4342      We extend this reasoning to RISC machines for operations that might not
4343      operate on the full registers.  */
4344   if (GET_MODE (x) != VOIDmode
4345       && GET_MODE (x) != mode
4346       && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4347       && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4348       && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x))
4349       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4350     {
4351       nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4352 				      known_x, known_mode, known_ret);
4353       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4354       return nonzero;
4355     }
4356 
4357   /* Please keep nonzero_bits_binary_arith_p above in sync with
4358      the code in the switch below.  */
4359   switch (code)
4360     {
4361     case REG:
4362 #if defined(POINTERS_EXTEND_UNSIGNED)
4363       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4364 	 all the bits above ptr_mode are known to be zero.  */
4365       /* As we do not know which address space the pointer is referring to,
4366 	 we can do this only if the target does not support different pointer
4367 	 or address modes depending on the address space.  */
4368       if (target_default_pointer_address_modes_p ()
4369 	  && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4370 	  && REG_POINTER (x)
4371 	  && !targetm.have_ptr_extend ())
4372 	nonzero &= GET_MODE_MASK (ptr_mode);
4373 #endif
4374 
4375       /* Include declared information about alignment of pointers.  */
4376       /* ??? We don't properly preserve REG_POINTER changes across
4377 	 pointer-to-integer casts, so we can't trust it except for
4378 	 things that we know must be pointers.  See execute/960116-1.c.  */
4379       if ((x == stack_pointer_rtx
4380 	   || x == frame_pointer_rtx
4381 	   || x == arg_pointer_rtx)
4382 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4383 	{
4384 	  unsigned HOST_WIDE_INT alignment
4385 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4386 
4387 #ifdef PUSH_ROUNDING
4388 	  /* If PUSH_ROUNDING is defined, it is possible for the
4389 	     stack to be momentarily aligned only to that amount,
4390 	     so we pick the least alignment.  */
4391 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4392 	    alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4393 			     alignment);
4394 #endif
4395 
4396 	  nonzero &= ~(alignment - 1);
4397 	}
4398 
4399       {
4400 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4401 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4402 					      known_mode, known_ret,
4403 					      &nonzero_for_hook);
4404 
4405 	if (new_rtx)
4406 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4407 						   known_mode, known_ret);
4408 
4409 	return nonzero_for_hook;
4410       }
4411 
4412     case CONST_INT:
4413       /* If X is negative in MODE, sign-extend the value.  */
4414       if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4415 	  && mode_width < BITS_PER_WORD
4416 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
4417 	     != 0)
4418 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4419 
4420       return UINTVAL (x);
4421 
4422     case MEM:
4423       /* In many, if not most, RISC machines, reading a byte from memory
4424 	 zeros the rest of the register.  Noticing that fact saves a lot
4425 	 of extra zero-extends.  */
4426       if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
4427 	nonzero &= GET_MODE_MASK (GET_MODE (x));
4428       break;
4429 
4430     case EQ:  case NE:
4431     case UNEQ:  case LTGT:
4432     case GT:  case GTU:  case UNGT:
4433     case LT:  case LTU:  case UNLT:
4434     case GE:  case GEU:  case UNGE:
4435     case LE:  case LEU:  case UNLE:
4436     case UNORDERED: case ORDERED:
4437       /* If this produces an integer result, we know which bits are set.
4438 	 Code here used to clear bits outside the mode of X, but that is
4439 	 now done above.  */
4440       /* Mind that MODE is the mode the caller wants to look at this
4441 	 operation in, and not the actual operation mode.  We can wind
4442 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4443 	 that describes the results of a vector compare.  */
4444       if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4445 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4446 	nonzero = STORE_FLAG_VALUE;
4447       break;
4448 
4449     case NEG:
4450 #if 0
4451       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4452 	 and num_sign_bit_copies.  */
4453       if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4454 	  == GET_MODE_PRECISION (GET_MODE (x)))
4455 	nonzero = 1;
4456 #endif
4457 
4458       if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4459 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4460       break;
4461 
4462     case ABS:
4463 #if 0
4464       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4465 	 and num_sign_bit_copies.  */
4466       if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4467 	  == GET_MODE_PRECISION (GET_MODE (x)))
4468 	nonzero = 1;
4469 #endif
4470       break;
4471 
4472     case TRUNCATE:
4473       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4474 				       known_x, known_mode, known_ret)
4475 		  & GET_MODE_MASK (mode));
4476       break;
4477 
4478     case ZERO_EXTEND:
4479       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4480 				      known_x, known_mode, known_ret);
4481       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4482 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4483       break;
4484 
4485     case SIGN_EXTEND:
4486       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4487 	 Otherwise, show all the bits in the outer mode but not the inner
4488 	 may be nonzero.  */
4489       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4490 				      known_x, known_mode, known_ret);
4491       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4492 	{
4493 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4494 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4495 	    inner_nz |= (GET_MODE_MASK (mode)
4496 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4497 	}
4498 
4499       nonzero &= inner_nz;
4500       break;
4501 
4502     case AND:
4503       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4504 				       known_x, known_mode, known_ret)
4505       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4506 					known_x, known_mode, known_ret);
4507       break;
4508 
4509     case XOR:   case IOR:
4510     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4511       {
4512 	unsigned HOST_WIDE_INT nonzero0
4513 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4514 				  known_x, known_mode, known_ret);
4515 
4516 	/* Don't call nonzero_bits for the second time if it cannot change
4517 	   anything.  */
4518 	if ((nonzero & nonzero0) != nonzero)
4519 	  nonzero &= nonzero0
4520       		     | cached_nonzero_bits (XEXP (x, 1), mode,
4521 					    known_x, known_mode, known_ret);
4522       }
4523       break;
4524 
4525     case PLUS:  case MINUS:
4526     case MULT:
4527     case DIV:   case UDIV:
4528     case MOD:   case UMOD:
4529       /* We can apply the rules of arithmetic to compute the number of
4530 	 high- and low-order zero bits of these operations.  We start by
4531 	 computing the width (position of the highest-order nonzero bit)
4532 	 and the number of low-order zero bits for each value.  */
4533       {
4534 	unsigned HOST_WIDE_INT nz0
4535 	  = cached_nonzero_bits (XEXP (x, 0), mode,
4536 				 known_x, known_mode, known_ret);
4537 	unsigned HOST_WIDE_INT nz1
4538 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4539 				 known_x, known_mode, known_ret);
4540 	int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4541 	int width0 = floor_log2 (nz0) + 1;
4542 	int width1 = floor_log2 (nz1) + 1;
4543 	int low0 = ctz_or_zero (nz0);
4544 	int low1 = ctz_or_zero (nz1);
4545 	unsigned HOST_WIDE_INT op0_maybe_minusp
4546 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
4547 	unsigned HOST_WIDE_INT op1_maybe_minusp
4548 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
4549 	unsigned int result_width = mode_width;
4550 	int result_low = 0;
4551 
4552 	switch (code)
4553 	  {
4554 	  case PLUS:
4555 	    result_width = MAX (width0, width1) + 1;
4556 	    result_low = MIN (low0, low1);
4557 	    break;
4558 	  case MINUS:
4559 	    result_low = MIN (low0, low1);
4560 	    break;
4561 	  case MULT:
4562 	    result_width = width0 + width1;
4563 	    result_low = low0 + low1;
4564 	    break;
4565 	  case DIV:
4566 	    if (width1 == 0)
4567 	      break;
4568 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4569 	      result_width = width0;
4570 	    break;
4571 	  case UDIV:
4572 	    if (width1 == 0)
4573 	      break;
4574 	    result_width = width0;
4575 	    break;
4576 	  case MOD:
4577 	    if (width1 == 0)
4578 	      break;
4579 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4580 	      result_width = MIN (width0, width1);
4581 	    result_low = MIN (low0, low1);
4582 	    break;
4583 	  case UMOD:
4584 	    if (width1 == 0)
4585 	      break;
4586 	    result_width = MIN (width0, width1);
4587 	    result_low = MIN (low0, low1);
4588 	    break;
4589 	  default:
4590 	    gcc_unreachable ();
4591 	  }
4592 
4593 	if (result_width < mode_width)
4594 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4595 
4596 	if (result_low > 0)
4597 	  nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4598       }
4599       break;
4600 
4601     case ZERO_EXTRACT:
4602       if (CONST_INT_P (XEXP (x, 1))
4603 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4604 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4605       break;
4606 
4607     case SUBREG:
4608       /* If this is a SUBREG formed for a promoted variable that has
4609 	 been zero-extended, we know that at least the high-order bits
4610 	 are zero, though others might be too.  */
4611       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4612 	nonzero = GET_MODE_MASK (GET_MODE (x))
4613 		  & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4614 					 known_x, known_mode, known_ret);
4615 
4616       /* If the inner mode is a single word for both the host and target
4617 	 machines, we can compute this from which bits of the inner
4618 	 object might be nonzero.  */
4619       inner_mode = GET_MODE (SUBREG_REG (x));
4620       if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4621 	  && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)
4622 	{
4623 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4624 					  known_x, known_mode, known_ret);
4625 
4626           /* On a typical CISC machine, accessing an object in a wider mode
4627 	     causes the high-order bits to become undefined.  So they are
4628 	     not known to be zero.
4629 
4630 	     On a typical RISC machine, we only have to worry about the way
4631 	     loads are extended.  Otherwise, if we get a reload for the inner
4632 	     part, it may be loaded from the stack, and then we may lose all
4633 	     the zero bits that existed before the store to the stack.  */
4634 	  rtx_code extend_op;
4635 	  if ((!WORD_REGISTER_OPERATIONS
4636 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4637 		   ? val_signbit_known_set_p (inner_mode, nonzero)
4638 		   : extend_op != ZERO_EXTEND)
4639 	       || !MEM_P (SUBREG_REG (x)))
4640 	      && GET_MODE_PRECISION (GET_MODE (x))
4641 		  > GET_MODE_PRECISION (inner_mode))
4642 	    nonzero
4643 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4644 	}
4645       break;
4646 
4647     case ASHIFTRT:
4648     case LSHIFTRT:
4649     case ASHIFT:
4650     case ROTATE:
4651       /* The nonzero bits are in two classes: any bits within MODE
4652 	 that aren't in GET_MODE (x) are always significant.  The rest of the
4653 	 nonzero bits are those that are significant in the operand of
4654 	 the shift when shifted the appropriate number of bits.  This
4655 	 shows that high-order bits are cleared by the right shift and
4656 	 low-order bits by left shifts.  */
4657       if (CONST_INT_P (XEXP (x, 1))
4658 	  && INTVAL (XEXP (x, 1)) >= 0
4659 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4660 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4661 	{
4662 	  machine_mode inner_mode = GET_MODE (x);
4663 	  unsigned int width = GET_MODE_PRECISION (inner_mode);
4664 	  int count = INTVAL (XEXP (x, 1));
4665 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4666 	  unsigned HOST_WIDE_INT op_nonzero
4667 	    = cached_nonzero_bits (XEXP (x, 0), mode,
4668 				   known_x, known_mode, known_ret);
4669 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4670 	  unsigned HOST_WIDE_INT outer = 0;
4671 
4672 	  if (mode_width > width)
4673 	    outer = (op_nonzero & nonzero & ~mode_mask);
4674 
4675 	  if (code == LSHIFTRT)
4676 	    inner >>= count;
4677 	  else if (code == ASHIFTRT)
4678 	    {
4679 	      inner >>= count;
4680 
4681 	      /* If the sign bit may have been nonzero before the shift, we
4682 		 need to mark all the places it could have been copied to
4683 		 by the shift as possibly nonzero.  */
4684 	      if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
4685 		inner |= ((HOST_WIDE_INT_1U << count) - 1)
4686 			   << (width - count);
4687 	    }
4688 	  else if (code == ASHIFT)
4689 	    inner <<= count;
4690 	  else
4691 	    inner = ((inner << (count % width)
4692 		      | (inner >> (width - (count % width)))) & mode_mask);
4693 
4694 	  nonzero &= (outer | inner);
4695 	}
4696       break;
4697 
4698     case FFS:
4699     case POPCOUNT:
4700       /* This is at most the number of bits in the mode.  */
4701       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4702       break;
4703 
4704     case CLZ:
4705       /* If CLZ has a known value at zero, then the nonzero bits are
4706 	 that value, plus the number of bits in the mode minus one.  */
4707       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4708 	nonzero
4709 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4710       else
4711 	nonzero = -1;
4712       break;
4713 
4714     case CTZ:
4715       /* If CTZ has a known value at zero, then the nonzero bits are
4716 	 that value, plus the number of bits in the mode minus one.  */
4717       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4718 	nonzero
4719 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4720       else
4721 	nonzero = -1;
4722       break;
4723 
4724     case CLRSB:
4725       /* This is at most the number of bits in the mode minus 1.  */
4726       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4727       break;
4728 
4729     case PARITY:
4730       nonzero = 1;
4731       break;
4732 
4733     case IF_THEN_ELSE:
4734       {
4735 	unsigned HOST_WIDE_INT nonzero_true
4736 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4737 				 known_x, known_mode, known_ret);
4738 
4739 	/* Don't call nonzero_bits for the second time if it cannot change
4740 	   anything.  */
4741 	if ((nonzero & nonzero_true) != nonzero)
4742 	  nonzero &= nonzero_true
4743       		     | cached_nonzero_bits (XEXP (x, 2), mode,
4744 					    known_x, known_mode, known_ret);
4745       }
4746       break;
4747 
4748     default:
4749       break;
4750     }
4751 
4752   return nonzero;
4753 }
4754 
4755 /* See the macro definition above.  */
4756 #undef cached_num_sign_bit_copies
4757 
4758 
4759 /* Return true if num_sign_bit_copies1 might recurse into both operands
4760    of X.  */
4761 
4762 static inline bool
4763 num_sign_bit_copies_binary_arith_p (const_rtx x)
4764 {
4765   if (!ARITHMETIC_P (x))
4766     return false;
4767   switch (GET_CODE (x))
4768     {
4769     case IOR:
4770     case AND:
4771     case XOR:
4772     case SMIN:
4773     case SMAX:
4774     case UMIN:
4775     case UMAX:
4776     case PLUS:
4777     case MINUS:
4778     case MULT:
4779       return true;
4780     default:
4781       return false;
4782     }
4783 }
4784 
4785 /* The function cached_num_sign_bit_copies is a wrapper around
4786    num_sign_bit_copies1.  It avoids exponential behavior in
4787    num_sign_bit_copies1 when X has identical subexpressions on the
4788    first or the second level.  */
4789 
4790 static unsigned int
4791 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4792 			    machine_mode known_mode,
4793 			    unsigned int known_ret)
4794 {
4795   if (x == known_x && mode == known_mode)
4796     return known_ret;
4797 
4798   /* Try to find identical subexpressions.  If found call
4799      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4800      the precomputed value for the subexpression as KNOWN_RET.  */
4801 
4802   if (num_sign_bit_copies_binary_arith_p (x))
4803     {
4804       rtx x0 = XEXP (x, 0);
4805       rtx x1 = XEXP (x, 1);
4806 
4807       /* Check the first level.  */
4808       if (x0 == x1)
4809 	return
4810 	  num_sign_bit_copies1 (x, mode, x0, mode,
4811 				cached_num_sign_bit_copies (x0, mode, known_x,
4812 							    known_mode,
4813 							    known_ret));
4814 
4815       /* Check the second level.  */
4816       if (num_sign_bit_copies_binary_arith_p (x0)
4817 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4818 	return
4819 	  num_sign_bit_copies1 (x, mode, x1, mode,
4820 				cached_num_sign_bit_copies (x1, mode, known_x,
4821 							    known_mode,
4822 							    known_ret));
4823 
4824       if (num_sign_bit_copies_binary_arith_p (x1)
4825 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4826 	return
4827 	  num_sign_bit_copies1 (x, mode, x0, mode,
4828 				cached_num_sign_bit_copies (x0, mode, known_x,
4829 							    known_mode,
4830 							    known_ret));
4831     }
4832 
4833   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4834 }
4835 
4836 /* Return the number of bits at the high-order end of X that are known to
4837    be equal to the sign bit.  X will be used in mode MODE; if MODE is
4838    VOIDmode, X will be used in its own mode.  The returned value  will always
4839    be between 1 and the number of bits in MODE.  */
4840 
4841 static unsigned int
4842 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4843 		      machine_mode known_mode,
4844 		      unsigned int known_ret)
4845 {
4846   enum rtx_code code = GET_CODE (x);
4847   machine_mode inner_mode;
4848   int num0, num1, result;
4849   unsigned HOST_WIDE_INT nonzero;
4850 
4851   /* If we weren't given a mode, use the mode of X.  If the mode is still
4852      VOIDmode, we don't know anything.  Likewise if one of the modes is
4853      floating-point.  */
4854 
4855   if (mode == VOIDmode)
4856     mode = GET_MODE (x);
4857 
4858   gcc_checking_assert (mode != BLKmode);
4859 
4860   if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4861       || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4862     return 1;
4863 
4864   /* For a smaller mode, just ignore the high bits.  */
4865   unsigned int bitwidth = GET_MODE_PRECISION (mode);
4866   if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4867     {
4868       num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4869 					 known_x, known_mode, known_ret);
4870       return MAX (1,
4871 		  num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4872     }
4873 
4874   if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4875     {
4876       /* If this machine does not do all register operations on the entire
4877 	 register and MODE is wider than the mode of X, we can say nothing
4878 	 at all about the high-order bits.  We extend this reasoning to RISC
4879 	 machines for operations that might not operate on full registers.  */
4880       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4881 	return 1;
4882 
4883       /* Likewise on machines that do, if the mode of the object is smaller
4884 	 than a word and loads of that size don't sign extend, we can say
4885 	 nothing about the high order bits.  */
4886       if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4887 	  && load_extend_op (GET_MODE (x)) != SIGN_EXTEND)
4888 	return 1;
4889     }
4890 
4891   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4892      the code in the switch below.  */
4893   switch (code)
4894     {
4895     case REG:
4896 
4897 #if defined(POINTERS_EXTEND_UNSIGNED)
4898       /* If pointers extend signed and this is a pointer in Pmode, say that
4899 	 all the bits above ptr_mode are known to be sign bit copies.  */
4900       /* As we do not know which address space the pointer is referring to,
4901 	 we can do this only if the target does not support different pointer
4902 	 or address modes depending on the address space.  */
4903       if (target_default_pointer_address_modes_p ()
4904 	  && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4905 	  && mode == Pmode && REG_POINTER (x)
4906 	  && !targetm.have_ptr_extend ())
4907 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4908 #endif
4909 
4910       {
4911 	unsigned int copies_for_hook = 1, copies = 1;
4912 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4913 						     known_mode, known_ret,
4914 						     &copies_for_hook);
4915 
4916 	if (new_rtx)
4917 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4918 					       known_mode, known_ret);
4919 
4920 	if (copies > 1 || copies_for_hook > 1)
4921 	  return MAX (copies, copies_for_hook);
4922 
4923 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
4924       }
4925       break;
4926 
4927     case MEM:
4928       /* Some RISC machines sign-extend all loads of smaller than a word.  */
4929       if (load_extend_op (GET_MODE (x)) == SIGN_EXTEND)
4930 	return MAX (1, ((int) bitwidth
4931 			- (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4932       break;
4933 
4934     case CONST_INT:
4935       /* If the constant is negative, take its 1's complement and remask.
4936 	 Then see how many zero bits we have.  */
4937       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4938       if (bitwidth <= HOST_BITS_PER_WIDE_INT
4939 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4940 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
4941 
4942       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4943 
4944     case SUBREG:
4945       /* If this is a SUBREG for a promoted object that is sign-extended
4946 	 and we are looking at it in a wider mode, we know that at least the
4947 	 high-order bits are known to be sign bit copies.  */
4948 
4949       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4950 	{
4951 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4952 					     known_x, known_mode, known_ret);
4953 	  return MAX ((int) bitwidth
4954 		      - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4955 		      num0);
4956 	}
4957 
4958       /* For a smaller object, just ignore the high bits.  */
4959       inner_mode = GET_MODE (SUBREG_REG (x));
4960       if (bitwidth <= GET_MODE_PRECISION (inner_mode))
4961 	{
4962 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4963 					     known_x, known_mode, known_ret);
4964 	  return
4965 	    MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth));
4966 	}
4967 
4968       /* For paradoxical SUBREGs on machines where all register operations
4969 	 affect the entire register, just look inside.  Note that we are
4970 	 passing MODE to the recursive call, so the number of sign bit
4971 	 copies will remain relative to that mode, not the inner mode.
4972 
4973          This works only if loads sign extend.  Otherwise, if we get a
4974 	 reload for the inner part, it may be loaded from the stack, and
4975 	 then we lose all sign bit copies that existed before the store
4976 	 to the stack.  */
4977 
4978       if (WORD_REGISTER_OPERATIONS
4979 	  && load_extend_op (inner_mode) == SIGN_EXTEND
4980 	  && paradoxical_subreg_p (x)
4981 	  && MEM_P (SUBREG_REG (x)))
4982 	return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4983 					   known_x, known_mode, known_ret);
4984       break;
4985 
4986     case SIGN_EXTRACT:
4987       if (CONST_INT_P (XEXP (x, 1)))
4988 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4989       break;
4990 
4991     case SIGN_EXTEND:
4992       return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4993 	      + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4994 					    known_x, known_mode, known_ret));
4995 
4996     case TRUNCATE:
4997       /* For a smaller object, just ignore the high bits.  */
4998       num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4999 					 known_x, known_mode, known_ret);
5000       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
5001 				    - bitwidth)));
5002 
5003     case NOT:
5004       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5005 					 known_x, known_mode, known_ret);
5006 
5007     case ROTATE:       case ROTATERT:
5008       /* If we are rotating left by a number of bits less than the number
5009 	 of sign bit copies, we can just subtract that amount from the
5010 	 number.  */
5011       if (CONST_INT_P (XEXP (x, 1))
5012 	  && INTVAL (XEXP (x, 1)) >= 0
5013 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5014 	{
5015 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5016 					     known_x, known_mode, known_ret);
5017 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5018 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5019 	}
5020       break;
5021 
5022     case NEG:
5023       /* In general, this subtracts one sign bit copy.  But if the value
5024 	 is known to be positive, the number of sign bit copies is the
5025 	 same as that of the input.  Finally, if the input has just one bit
5026 	 that might be nonzero, all the bits are copies of the sign bit.  */
5027       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5028 					 known_x, known_mode, known_ret);
5029       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5030 	return num0 > 1 ? num0 - 1 : 1;
5031 
5032       nonzero = nonzero_bits (XEXP (x, 0), mode);
5033       if (nonzero == 1)
5034 	return bitwidth;
5035 
5036       if (num0 > 1
5037 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5038 	num0--;
5039 
5040       return num0;
5041 
5042     case IOR:   case AND:   case XOR:
5043     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5044       /* Logical operations will preserve the number of sign-bit copies.
5045 	 MIN and MAX operations always return one of the operands.  */
5046       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5047 					 known_x, known_mode, known_ret);
5048       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5049 					 known_x, known_mode, known_ret);
5050 
5051       /* If num1 is clearing some of the top bits then regardless of
5052 	 the other term, we are guaranteed to have at least that many
5053 	 high-order zero bits.  */
5054       if (code == AND
5055 	  && num1 > 1
5056 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5057 	  && CONST_INT_P (XEXP (x, 1))
5058 	  && (UINTVAL (XEXP (x, 1))
5059 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5060 	return num1;
5061 
5062       /* Similarly for IOR when setting high-order bits.  */
5063       if (code == IOR
5064 	  && num1 > 1
5065 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5066 	  && CONST_INT_P (XEXP (x, 1))
5067 	  && (UINTVAL (XEXP (x, 1))
5068 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5069 	return num1;
5070 
5071       return MIN (num0, num1);
5072 
5073     case PLUS:  case MINUS:
5074       /* For addition and subtraction, we can have a 1-bit carry.  However,
5075 	 if we are subtracting 1 from a positive number, there will not
5076 	 be such a carry.  Furthermore, if the positive number is known to
5077 	 be 0 or 1, we know the result is either -1 or 0.  */
5078 
5079       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5080 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5081 	{
5082 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5083 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5084 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5085 		    : bitwidth - floor_log2 (nonzero) - 1);
5086 	}
5087 
5088       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5089 					 known_x, known_mode, known_ret);
5090       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5091 					 known_x, known_mode, known_ret);
5092       result = MAX (1, MIN (num0, num1) - 1);
5093 
5094       return result;
5095 
5096     case MULT:
5097       /* The number of bits of the product is the sum of the number of
5098 	 bits of both terms.  However, unless one of the terms if known
5099 	 to be positive, we must allow for an additional bit since negating
5100 	 a negative number can remove one sign bit copy.  */
5101 
5102       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5103 					 known_x, known_mode, known_ret);
5104       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5105 					 known_x, known_mode, known_ret);
5106 
5107       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5108       if (result > 0
5109 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5110 	      || (((nonzero_bits (XEXP (x, 0), mode)
5111 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5112 		  && ((nonzero_bits (XEXP (x, 1), mode)
5113 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5114 		      != 0))))
5115 	result--;
5116 
5117       return MAX (1, result);
5118 
5119     case UDIV:
5120       /* The result must be <= the first operand.  If the first operand
5121 	 has the high bit set, we know nothing about the number of sign
5122 	 bit copies.  */
5123       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5124 	return 1;
5125       else if ((nonzero_bits (XEXP (x, 0), mode)
5126 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5127 	return 1;
5128       else
5129 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5130 					   known_x, known_mode, known_ret);
5131 
5132     case UMOD:
5133       /* The result must be <= the second operand.  If the second operand
5134 	 has (or just might have) the high bit set, we know nothing about
5135 	 the number of sign bit copies.  */
5136       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5137 	return 1;
5138       else if ((nonzero_bits (XEXP (x, 1), mode)
5139 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5140 	return 1;
5141       else
5142 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5143 					   known_x, known_mode, known_ret);
5144 
5145     case DIV:
5146       /* Similar to unsigned division, except that we have to worry about
5147 	 the case where the divisor is negative, in which case we have
5148 	 to add 1.  */
5149       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5150 					   known_x, known_mode, known_ret);
5151       if (result > 1
5152 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5153 	      || (nonzero_bits (XEXP (x, 1), mode)
5154 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5155 	result--;
5156 
5157       return result;
5158 
5159     case MOD:
5160       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5161 					   known_x, known_mode, known_ret);
5162       if (result > 1
5163 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5164 	      || (nonzero_bits (XEXP (x, 1), mode)
5165 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5166 	result--;
5167 
5168       return result;
5169 
5170     case ASHIFTRT:
5171       /* Shifts by a constant add to the number of bits equal to the
5172 	 sign bit.  */
5173       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5174 					 known_x, known_mode, known_ret);
5175       if (CONST_INT_P (XEXP (x, 1))
5176 	  && INTVAL (XEXP (x, 1)) > 0
5177 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5178 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5179 
5180       return num0;
5181 
5182     case ASHIFT:
5183       /* Left shifts destroy copies.  */
5184       if (!CONST_INT_P (XEXP (x, 1))
5185 	  || INTVAL (XEXP (x, 1)) < 0
5186 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5187 	  || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5188 	return 1;
5189 
5190       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5191 					 known_x, known_mode, known_ret);
5192       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5193 
5194     case IF_THEN_ELSE:
5195       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5196 					 known_x, known_mode, known_ret);
5197       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5198 					 known_x, known_mode, known_ret);
5199       return MIN (num0, num1);
5200 
5201     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5202     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5203     case GEU: case GTU: case LEU: case LTU:
5204     case UNORDERED: case ORDERED:
5205       /* If the constant is negative, take its 1's complement and remask.
5206 	 Then see how many zero bits we have.  */
5207       nonzero = STORE_FLAG_VALUE;
5208       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5209 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5210 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5211 
5212       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5213 
5214     default:
5215       break;
5216     }
5217 
5218   /* If we haven't been able to figure it out by one of the above rules,
5219      see if some of the high-order bits are known to be zero.  If so,
5220      count those bits and return one less than that amount.  If we can't
5221      safely compute the mask for this mode, always return BITWIDTH.  */
5222 
5223   bitwidth = GET_MODE_PRECISION (mode);
5224   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5225     return 1;
5226 
5227   nonzero = nonzero_bits (x, mode);
5228   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5229 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5230 }
5231 
5232 /* Calculate the rtx_cost of a single instruction.  A return value of
5233    zero indicates an instruction pattern without a known cost.  */
5234 
5235 int
5236 insn_rtx_cost (rtx pat, bool speed)
5237 {
5238   int i, cost;
5239   rtx set;
5240 
5241   /* Extract the single set rtx from the instruction pattern.
5242      We can't use single_set since we only have the pattern.  */
5243   if (GET_CODE (pat) == SET)
5244     set = pat;
5245   else if (GET_CODE (pat) == PARALLEL)
5246     {
5247       set = NULL_RTX;
5248       for (i = 0; i < XVECLEN (pat, 0); i++)
5249 	{
5250 	  rtx x = XVECEXP (pat, 0, i);
5251 	  if (GET_CODE (x) == SET)
5252 	    {
5253 	      if (set)
5254 		return 0;
5255 	      set = x;
5256 	    }
5257 	}
5258       if (!set)
5259 	return 0;
5260     }
5261   else
5262     return 0;
5263 
5264   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5265   return cost > 0 ? cost : COSTS_N_INSNS (1);
5266 }
5267 
5268 /* Returns estimate on cost of computing SEQ.  */
5269 
5270 unsigned
5271 seq_cost (const rtx_insn *seq, bool speed)
5272 {
5273   unsigned cost = 0;
5274   rtx set;
5275 
5276   for (; seq; seq = NEXT_INSN (seq))
5277     {
5278       set = single_set (seq);
5279       if (set)
5280         cost += set_rtx_cost (set, speed);
5281       else
5282         cost++;
5283     }
5284 
5285   return cost;
5286 }
5287 
5288 /* Given an insn INSN and condition COND, return the condition in a
5289    canonical form to simplify testing by callers.  Specifically:
5290 
5291    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5292    (2) Both operands will be machine operands; (cc0) will have been replaced.
5293    (3) If an operand is a constant, it will be the second operand.
5294    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5295        for GE, GEU, and LEU.
5296 
5297    If the condition cannot be understood, or is an inequality floating-point
5298    comparison which needs to be reversed, 0 will be returned.
5299 
5300    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5301 
5302    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5303    insn used in locating the condition was found.  If a replacement test
5304    of the condition is desired, it should be placed in front of that
5305    insn and we will be sure that the inputs are still valid.
5306 
5307    If WANT_REG is nonzero, we wish the condition to be relative to that
5308    register, if possible.  Therefore, do not canonicalize the condition
5309    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5310    to be a compare to a CC mode register.
5311 
5312    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5313    and at INSN.  */
5314 
5315 rtx
5316 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5317 			rtx_insn **earliest,
5318 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5319 {
5320   enum rtx_code code;
5321   rtx_insn *prev = insn;
5322   const_rtx set;
5323   rtx tem;
5324   rtx op0, op1;
5325   int reverse_code = 0;
5326   machine_mode mode;
5327   basic_block bb = BLOCK_FOR_INSN (insn);
5328 
5329   code = GET_CODE (cond);
5330   mode = GET_MODE (cond);
5331   op0 = XEXP (cond, 0);
5332   op1 = XEXP (cond, 1);
5333 
5334   if (reverse)
5335     code = reversed_comparison_code (cond, insn);
5336   if (code == UNKNOWN)
5337     return 0;
5338 
5339   if (earliest)
5340     *earliest = insn;
5341 
5342   /* If we are comparing a register with zero, see if the register is set
5343      in the previous insn to a COMPARE or a comparison operation.  Perform
5344      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5345      in cse.c  */
5346 
5347   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5348 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5349 	 && op1 == CONST0_RTX (GET_MODE (op0))
5350 	 && op0 != want_reg)
5351     {
5352       /* Set nonzero when we find something of interest.  */
5353       rtx x = 0;
5354 
5355       /* If comparison with cc0, import actual comparison from compare
5356 	 insn.  */
5357       if (op0 == cc0_rtx)
5358 	{
5359 	  if ((prev = prev_nonnote_insn (prev)) == 0
5360 	      || !NONJUMP_INSN_P (prev)
5361 	      || (set = single_set (prev)) == 0
5362 	      || SET_DEST (set) != cc0_rtx)
5363 	    return 0;
5364 
5365 	  op0 = SET_SRC (set);
5366 	  op1 = CONST0_RTX (GET_MODE (op0));
5367 	  if (earliest)
5368 	    *earliest = prev;
5369 	}
5370 
5371       /* If this is a COMPARE, pick up the two things being compared.  */
5372       if (GET_CODE (op0) == COMPARE)
5373 	{
5374 	  op1 = XEXP (op0, 1);
5375 	  op0 = XEXP (op0, 0);
5376 	  continue;
5377 	}
5378       else if (!REG_P (op0))
5379 	break;
5380 
5381       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5382 	 stop if it isn't a single set or if it has a REG_INC note because
5383 	 we don't want to bother dealing with it.  */
5384 
5385       prev = prev_nonnote_nondebug_insn (prev);
5386 
5387       if (prev == 0
5388 	  || !NONJUMP_INSN_P (prev)
5389 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5390 	  /* In cfglayout mode, there do not have to be labels at the
5391 	     beginning of a block, or jumps at the end, so the previous
5392 	     conditions would not stop us when we reach bb boundary.  */
5393 	  || BLOCK_FOR_INSN (prev) != bb)
5394 	break;
5395 
5396       set = set_of (op0, prev);
5397 
5398       if (set
5399 	  && (GET_CODE (set) != SET
5400 	      || !rtx_equal_p (SET_DEST (set), op0)))
5401 	break;
5402 
5403       /* If this is setting OP0, get what it sets it to if it looks
5404 	 relevant.  */
5405       if (set)
5406 	{
5407 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5408 #ifdef FLOAT_STORE_FLAG_VALUE
5409 	  REAL_VALUE_TYPE fsfv;
5410 #endif
5411 
5412 	  /* ??? We may not combine comparisons done in a CCmode with
5413 	     comparisons not done in a CCmode.  This is to aid targets
5414 	     like Alpha that have an IEEE compliant EQ instruction, and
5415 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5416 	     actually artificial, simply to prevent the combination, but
5417 	     should not affect other platforms.
5418 
5419 	     However, we must allow VOIDmode comparisons to match either
5420 	     CCmode or non-CCmode comparison, because some ports have
5421 	     modeless comparisons inside branch patterns.
5422 
5423 	     ??? This mode check should perhaps look more like the mode check
5424 	     in simplify_comparison in combine.  */
5425 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5426 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5427 	      && mode != VOIDmode
5428 	      && inner_mode != VOIDmode)
5429 	    break;
5430 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5431 	      || (((code == NE
5432 		    || (code == LT
5433 			&& val_signbit_known_set_p (inner_mode,
5434 						    STORE_FLAG_VALUE))
5435 #ifdef FLOAT_STORE_FLAG_VALUE
5436 		    || (code == LT
5437 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5438 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5439 			    REAL_VALUE_NEGATIVE (fsfv)))
5440 #endif
5441 		    ))
5442 		  && COMPARISON_P (SET_SRC (set))))
5443 	    x = SET_SRC (set);
5444 	  else if (((code == EQ
5445 		     || (code == GE
5446 			 && val_signbit_known_set_p (inner_mode,
5447 						     STORE_FLAG_VALUE))
5448 #ifdef FLOAT_STORE_FLAG_VALUE
5449 		     || (code == GE
5450 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5451 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5452 			     REAL_VALUE_NEGATIVE (fsfv)))
5453 #endif
5454 		     ))
5455 		   && COMPARISON_P (SET_SRC (set)))
5456 	    {
5457 	      reverse_code = 1;
5458 	      x = SET_SRC (set);
5459 	    }
5460 	  else if ((code == EQ || code == NE)
5461 		   && GET_CODE (SET_SRC (set)) == XOR)
5462 	    /* Handle sequences like:
5463 
5464 	       (set op0 (xor X Y))
5465 	       ...(eq|ne op0 (const_int 0))...
5466 
5467 	       in which case:
5468 
5469 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5470 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5471 
5472 	       This is the form used by MIPS16, for example.  */
5473 	    x = SET_SRC (set);
5474 	  else
5475 	    break;
5476 	}
5477 
5478       else if (reg_set_p (op0, prev))
5479 	/* If this sets OP0, but not directly, we have to give up.  */
5480 	break;
5481 
5482       if (x)
5483 	{
5484 	  /* If the caller is expecting the condition to be valid at INSN,
5485 	     make sure X doesn't change before INSN.  */
5486 	  if (valid_at_insn_p)
5487 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5488 	      break;
5489 	  if (COMPARISON_P (x))
5490 	    code = GET_CODE (x);
5491 	  if (reverse_code)
5492 	    {
5493 	      code = reversed_comparison_code (x, prev);
5494 	      if (code == UNKNOWN)
5495 		return 0;
5496 	      reverse_code = 0;
5497 	    }
5498 
5499 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5500 	  if (earliest)
5501 	    *earliest = prev;
5502 	}
5503     }
5504 
5505   /* If constant is first, put it last.  */
5506   if (CONSTANT_P (op0))
5507     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5508 
5509   /* If OP0 is the result of a comparison, we weren't able to find what
5510      was really being compared, so fail.  */
5511   if (!allow_cc_mode
5512       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5513     return 0;
5514 
5515   /* Canonicalize any ordered comparison with integers involving equality
5516      if we can do computations in the relevant mode and we do not
5517      overflow.  */
5518 
5519   if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5520       && CONST_INT_P (op1)
5521       && GET_MODE (op0) != VOIDmode
5522       && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5523     {
5524       HOST_WIDE_INT const_val = INTVAL (op1);
5525       unsigned HOST_WIDE_INT uconst_val = const_val;
5526       unsigned HOST_WIDE_INT max_val
5527 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5528 
5529       switch (code)
5530 	{
5531 	case LE:
5532 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5533 	    code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5534 	  break;
5535 
5536 	/* When cross-compiling, const_val might be sign-extended from
5537 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5538 	case GE:
5539 	  if ((const_val & max_val)
5540 	      != (HOST_WIDE_INT_1U
5541 		  << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5542 	    code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5543 	  break;
5544 
5545 	case LEU:
5546 	  if (uconst_val < max_val)
5547 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5548 	  break;
5549 
5550 	case GEU:
5551 	  if (uconst_val != 0)
5552 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5553 	  break;
5554 
5555 	default:
5556 	  break;
5557 	}
5558     }
5559 
5560   /* Never return CC0; return zero instead.  */
5561   if (CC0_P (op0))
5562     return 0;
5563 
5564   return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5565 }
5566 
5567 /* Given a jump insn JUMP, return the condition that will cause it to branch
5568    to its JUMP_LABEL.  If the condition cannot be understood, or is an
5569    inequality floating-point comparison which needs to be reversed, 0 will
5570    be returned.
5571 
5572    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5573    insn used in locating the condition was found.  If a replacement test
5574    of the condition is desired, it should be placed in front of that
5575    insn and we will be sure that the inputs are still valid.  If EARLIEST
5576    is null, the returned condition will be valid at INSN.
5577 
5578    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5579    compare CC mode register.
5580 
5581    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
5582 
5583 rtx
5584 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5585 	       int valid_at_insn_p)
5586 {
5587   rtx cond;
5588   int reverse;
5589   rtx set;
5590 
5591   /* If this is not a standard conditional jump, we can't parse it.  */
5592   if (!JUMP_P (jump)
5593       || ! any_condjump_p (jump))
5594     return 0;
5595   set = pc_set (jump);
5596 
5597   cond = XEXP (SET_SRC (set), 0);
5598 
5599   /* If this branches to JUMP_LABEL when the condition is false, reverse
5600      the condition.  */
5601   reverse
5602     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5603       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5604 
5605   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5606 				 allow_cc_mode, valid_at_insn_p);
5607 }
5608 
5609 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5610    TARGET_MODE_REP_EXTENDED.
5611 
5612    Note that we assume that the property of
5613    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5614    narrower than mode B.  I.e., if A is a mode narrower than B then in
5615    order to be able to operate on it in mode B, mode A needs to
5616    satisfy the requirements set by the representation of mode B.  */
5617 
5618 static void
5619 init_num_sign_bit_copies_in_rep (void)
5620 {
5621   machine_mode mode, in_mode;
5622 
5623   for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5624        in_mode = GET_MODE_WIDER_MODE (mode))
5625     for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5626 	 mode = GET_MODE_WIDER_MODE (mode))
5627       {
5628 	machine_mode i;
5629 
5630 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5631 	   extends to the next widest mode.  */
5632 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5633 		    || GET_MODE_WIDER_MODE (mode) == in_mode);
5634 
5635 	/* We are in in_mode.  Count how many bits outside of mode
5636 	   have to be copies of the sign-bit.  */
5637 	for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5638 	  {
5639 	    machine_mode wider = GET_MODE_WIDER_MODE (i);
5640 
5641 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5642 		/* We can only check sign-bit copies starting from the
5643 		   top-bit.  In order to be able to check the bits we
5644 		   have already seen we pretend that subsequent bits
5645 		   have to be sign-bit copies too.  */
5646 		|| num_sign_bit_copies_in_rep [in_mode][mode])
5647 	      num_sign_bit_copies_in_rep [in_mode][mode]
5648 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5649 	  }
5650       }
5651 }
5652 
5653 /* Suppose that truncation from the machine mode of X to MODE is not a
5654    no-op.  See if there is anything special about X so that we can
5655    assume it already contains a truncated value of MODE.  */
5656 
5657 bool
5658 truncated_to_mode (machine_mode mode, const_rtx x)
5659 {
5660   /* This register has already been used in MODE without explicit
5661      truncation.  */
5662   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5663     return true;
5664 
5665   /* See if we already satisfy the requirements of MODE.  If yes we
5666      can just switch to MODE.  */
5667   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5668       && (num_sign_bit_copies (x, GET_MODE (x))
5669 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5670     return true;
5671 
5672   return false;
5673 }
5674 
5675 /* Return true if RTX code CODE has a single sequence of zero or more
5676    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
5677    entry in that case.  */
5678 
5679 static bool
5680 setup_reg_subrtx_bounds (unsigned int code)
5681 {
5682   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5683   unsigned int i = 0;
5684   for (; format[i] != 'e'; ++i)
5685     {
5686       if (!format[i])
5687 	/* No subrtxes.  Leave start and count as 0.  */
5688 	return true;
5689       if (format[i] == 'E' || format[i] == 'V')
5690 	return false;
5691     }
5692 
5693   /* Record the sequence of 'e's.  */
5694   rtx_all_subrtx_bounds[code].start = i;
5695   do
5696     ++i;
5697   while (format[i] == 'e');
5698   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5699   /* rtl-iter.h relies on this.  */
5700   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5701 
5702   for (; format[i]; ++i)
5703     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5704       return false;
5705 
5706   return true;
5707 }
5708 
5709 /* Initialize rtx_all_subrtx_bounds.  */
5710 void
5711 init_rtlanal (void)
5712 {
5713   int i;
5714   for (i = 0; i < NUM_RTX_CODE; i++)
5715     {
5716       if (!setup_reg_subrtx_bounds (i))
5717 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5718       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5719 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5720     }
5721 
5722   init_num_sign_bit_copies_in_rep ();
5723 }
5724 
5725 /* Check whether this is a constant pool constant.  */
5726 bool
5727 constant_pool_constant_p (rtx x)
5728 {
5729   x = avoid_constant_pool_reference (x);
5730   return CONST_DOUBLE_P (x);
5731 }
5732 
5733 /* If M is a bitmask that selects a field of low-order bits within an item but
5734    not the entire word, return the length of the field.  Return -1 otherwise.
5735    M is used in machine mode MODE.  */
5736 
5737 int
5738 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5739 {
5740   if (mode != VOIDmode)
5741     {
5742       if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5743 	return -1;
5744       m &= GET_MODE_MASK (mode);
5745     }
5746 
5747   return exact_log2 (m + 1);
5748 }
5749 
5750 /* Return the mode of MEM's address.  */
5751 
5752 machine_mode
5753 get_address_mode (rtx mem)
5754 {
5755   machine_mode mode;
5756 
5757   gcc_assert (MEM_P (mem));
5758   mode = GET_MODE (XEXP (mem, 0));
5759   if (mode != VOIDmode)
5760     return mode;
5761   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5762 }
5763 
5764 /* Split up a CONST_DOUBLE or integer constant rtx
5765    into two rtx's for single words,
5766    storing in *FIRST the word that comes first in memory in the target
5767    and in *SECOND the other.
5768 
5769    TODO: This function needs to be rewritten to work on any size
5770    integer.  */
5771 
5772 void
5773 split_double (rtx value, rtx *first, rtx *second)
5774 {
5775   if (CONST_INT_P (value))
5776     {
5777       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5778 	{
5779 	  /* In this case the CONST_INT holds both target words.
5780 	     Extract the bits from it into two word-sized pieces.
5781 	     Sign extend each half to HOST_WIDE_INT.  */
5782 	  unsigned HOST_WIDE_INT low, high;
5783 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5784 	  unsigned bits_per_word = BITS_PER_WORD;
5785 
5786 	  /* Set sign_bit to the most significant bit of a word.  */
5787 	  sign_bit = 1;
5788 	  sign_bit <<= bits_per_word - 1;
5789 
5790 	  /* Set mask so that all bits of the word are set.  We could
5791 	     have used 1 << BITS_PER_WORD instead of basing the
5792 	     calculation on sign_bit.  However, on machines where
5793 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5794 	     compiler warning, even though the code would never be
5795 	     executed.  */
5796 	  mask = sign_bit << 1;
5797 	  mask--;
5798 
5799 	  /* Set sign_extend as any remaining bits.  */
5800 	  sign_extend = ~mask;
5801 
5802 	  /* Pick the lower word and sign-extend it.  */
5803 	  low = INTVAL (value);
5804 	  low &= mask;
5805 	  if (low & sign_bit)
5806 	    low |= sign_extend;
5807 
5808 	  /* Pick the higher word, shifted to the least significant
5809 	     bits, and sign-extend it.  */
5810 	  high = INTVAL (value);
5811 	  high >>= bits_per_word - 1;
5812 	  high >>= 1;
5813 	  high &= mask;
5814 	  if (high & sign_bit)
5815 	    high |= sign_extend;
5816 
5817 	  /* Store the words in the target machine order.  */
5818 	  if (WORDS_BIG_ENDIAN)
5819 	    {
5820 	      *first = GEN_INT (high);
5821 	      *second = GEN_INT (low);
5822 	    }
5823 	  else
5824 	    {
5825 	      *first = GEN_INT (low);
5826 	      *second = GEN_INT (high);
5827 	    }
5828 	}
5829       else
5830 	{
5831 	  /* The rule for using CONST_INT for a wider mode
5832 	     is that we regard the value as signed.
5833 	     So sign-extend it.  */
5834 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5835 	  if (WORDS_BIG_ENDIAN)
5836 	    {
5837 	      *first = high;
5838 	      *second = value;
5839 	    }
5840 	  else
5841 	    {
5842 	      *first = value;
5843 	      *second = high;
5844 	    }
5845 	}
5846     }
5847   else if (GET_CODE (value) == CONST_WIDE_INT)
5848     {
5849       /* All of this is scary code and needs to be converted to
5850 	 properly work with any size integer.  */
5851       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5852       if (WORDS_BIG_ENDIAN)
5853 	{
5854 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5855 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5856 	}
5857       else
5858 	{
5859 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5860 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5861 	}
5862     }
5863   else if (!CONST_DOUBLE_P (value))
5864     {
5865       if (WORDS_BIG_ENDIAN)
5866 	{
5867 	  *first = const0_rtx;
5868 	  *second = value;
5869 	}
5870       else
5871 	{
5872 	  *first = value;
5873 	  *second = const0_rtx;
5874 	}
5875     }
5876   else if (GET_MODE (value) == VOIDmode
5877 	   /* This is the old way we did CONST_DOUBLE integers.  */
5878 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5879     {
5880       /* In an integer, the words are defined as most and least significant.
5881 	 So order them by the target's convention.  */
5882       if (WORDS_BIG_ENDIAN)
5883 	{
5884 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5885 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
5886 	}
5887       else
5888 	{
5889 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
5890 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5891 	}
5892     }
5893   else
5894     {
5895       long l[2];
5896 
5897       /* Note, this converts the REAL_VALUE_TYPE to the target's
5898 	 format, splits up the floating point double and outputs
5899 	 exactly 32 bits of it into each of l[0] and l[1] --
5900 	 not necessarily BITS_PER_WORD bits.  */
5901       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5902 
5903       /* If 32 bits is an entire word for the target, but not for the host,
5904 	 then sign-extend on the host so that the number will look the same
5905 	 way on the host that it would on the target.  See for instance
5906 	 simplify_unary_operation.  The #if is needed to avoid compiler
5907 	 warnings.  */
5908 
5909 #if HOST_BITS_PER_LONG > 32
5910       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5911 	{
5912 	  if (l[0] & ((long) 1 << 31))
5913 	    l[0] |= ((unsigned long) (-1) << 32);
5914 	  if (l[1] & ((long) 1 << 31))
5915 	    l[1] |= ((unsigned long) (-1) << 32);
5916 	}
5917 #endif
5918 
5919       *first = GEN_INT (l[0]);
5920       *second = GEN_INT (l[1]);
5921     }
5922 }
5923 
5924 /* Return true if X is a sign_extract or zero_extract from the least
5925    significant bit.  */
5926 
5927 static bool
5928 lsb_bitfield_op_p (rtx x)
5929 {
5930   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5931     {
5932       machine_mode mode = GET_MODE (XEXP (x, 0));
5933       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5934       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5935 
5936       return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5937     }
5938   return false;
5939 }
5940 
5941 /* Strip outer address "mutations" from LOC and return a pointer to the
5942    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
5943    stripped expression there.
5944 
5945    "Mutations" either convert between modes or apply some kind of
5946    extension, truncation or alignment.  */
5947 
5948 rtx *
5949 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5950 {
5951   for (;;)
5952     {
5953       enum rtx_code code = GET_CODE (*loc);
5954       if (GET_RTX_CLASS (code) == RTX_UNARY)
5955 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5956 	   used to convert between pointer sizes.  */
5957 	loc = &XEXP (*loc, 0);
5958       else if (lsb_bitfield_op_p (*loc))
5959 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5960 	   acts as a combined truncation and extension.  */
5961 	loc = &XEXP (*loc, 0);
5962       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5963 	/* (and ... (const_int -X)) is used to align to X bytes.  */
5964 	loc = &XEXP (*loc, 0);
5965       else if (code == SUBREG
5966                && !OBJECT_P (SUBREG_REG (*loc))
5967                && subreg_lowpart_p (*loc))
5968 	/* (subreg (operator ...) ...) inside and is used for mode
5969 	   conversion too.  */
5970 	loc = &SUBREG_REG (*loc);
5971       else
5972 	return loc;
5973       if (outer_code)
5974 	*outer_code = code;
5975     }
5976 }
5977 
5978 /* Return true if CODE applies some kind of scale.  The scaled value is
5979    is the first operand and the scale is the second.  */
5980 
5981 static bool
5982 binary_scale_code_p (enum rtx_code code)
5983 {
5984   return (code == MULT
5985           || code == ASHIFT
5986           /* Needed by ARM targets.  */
5987           || code == ASHIFTRT
5988           || code == LSHIFTRT
5989           || code == ROTATE
5990           || code == ROTATERT);
5991 }
5992 
5993 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5994    (see address_info).  Return null otherwise.  */
5995 
5996 static rtx *
5997 get_base_term (rtx *inner)
5998 {
5999   if (GET_CODE (*inner) == LO_SUM)
6000     inner = strip_address_mutations (&XEXP (*inner, 0));
6001   if (REG_P (*inner)
6002       || MEM_P (*inner)
6003       || GET_CODE (*inner) == SUBREG
6004       || GET_CODE (*inner) == SCRATCH)
6005     return inner;
6006   return 0;
6007 }
6008 
6009 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6010    (see address_info).  Return null otherwise.  */
6011 
6012 static rtx *
6013 get_index_term (rtx *inner)
6014 {
6015   /* At present, only constant scales are allowed.  */
6016   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6017     inner = strip_address_mutations (&XEXP (*inner, 0));
6018   if (REG_P (*inner)
6019       || MEM_P (*inner)
6020       || GET_CODE (*inner) == SUBREG
6021       || GET_CODE (*inner) == SCRATCH)
6022     return inner;
6023   return 0;
6024 }
6025 
6026 /* Set the segment part of address INFO to LOC, given that INNER is the
6027    unmutated value.  */
6028 
6029 static void
6030 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6031 {
6032   gcc_assert (!info->segment);
6033   info->segment = loc;
6034   info->segment_term = inner;
6035 }
6036 
6037 /* Set the base part of address INFO to LOC, given that INNER is the
6038    unmutated value.  */
6039 
6040 static void
6041 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6042 {
6043   gcc_assert (!info->base);
6044   info->base = loc;
6045   info->base_term = inner;
6046 }
6047 
6048 /* Set the index part of address INFO to LOC, given that INNER is the
6049    unmutated value.  */
6050 
6051 static void
6052 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6053 {
6054   gcc_assert (!info->index);
6055   info->index = loc;
6056   info->index_term = inner;
6057 }
6058 
6059 /* Set the displacement part of address INFO to LOC, given that INNER
6060    is the constant term.  */
6061 
6062 static void
6063 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6064 {
6065   gcc_assert (!info->disp);
6066   info->disp = loc;
6067   info->disp_term = inner;
6068 }
6069 
6070 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6071    rest of INFO accordingly.  */
6072 
6073 static void
6074 decompose_incdec_address (struct address_info *info)
6075 {
6076   info->autoinc_p = true;
6077 
6078   rtx *base = &XEXP (*info->inner, 0);
6079   set_address_base (info, base, base);
6080   gcc_checking_assert (info->base == info->base_term);
6081 
6082   /* These addresses are only valid when the size of the addressed
6083      value is known.  */
6084   gcc_checking_assert (info->mode != VOIDmode);
6085 }
6086 
6087 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6088    of INFO accordingly.  */
6089 
6090 static void
6091 decompose_automod_address (struct address_info *info)
6092 {
6093   info->autoinc_p = true;
6094 
6095   rtx *base = &XEXP (*info->inner, 0);
6096   set_address_base (info, base, base);
6097   gcc_checking_assert (info->base == info->base_term);
6098 
6099   rtx plus = XEXP (*info->inner, 1);
6100   gcc_assert (GET_CODE (plus) == PLUS);
6101 
6102   info->base_term2 = &XEXP (plus, 0);
6103   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6104 
6105   rtx *step = &XEXP (plus, 1);
6106   rtx *inner_step = strip_address_mutations (step);
6107   if (CONSTANT_P (*inner_step))
6108     set_address_disp (info, step, inner_step);
6109   else
6110     set_address_index (info, step, inner_step);
6111 }
6112 
6113 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6114    values in [PTR, END).  Return a pointer to the end of the used array.  */
6115 
6116 static rtx **
6117 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6118 {
6119   rtx x = *loc;
6120   if (GET_CODE (x) == PLUS)
6121     {
6122       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6123       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6124     }
6125   else
6126     {
6127       gcc_assert (ptr != end);
6128       *ptr++ = loc;
6129     }
6130   return ptr;
6131 }
6132 
6133 /* Evaluate the likelihood of X being a base or index value, returning
6134    positive if it is likely to be a base, negative if it is likely to be
6135    an index, and 0 if we can't tell.  Make the magnitude of the return
6136    value reflect the amount of confidence we have in the answer.
6137 
6138    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6139 
6140 static int
6141 baseness (rtx x, machine_mode mode, addr_space_t as,
6142 	  enum rtx_code outer_code, enum rtx_code index_code)
6143 {
6144   /* Believe *_POINTER unless the address shape requires otherwise.  */
6145   if (REG_P (x) && REG_POINTER (x))
6146     return 2;
6147   if (MEM_P (x) && MEM_POINTER (x))
6148     return 2;
6149 
6150   if (REG_P (x) && HARD_REGISTER_P (x))
6151     {
6152       /* X is a hard register.  If it only fits one of the base
6153 	 or index classes, choose that interpretation.  */
6154       int regno = REGNO (x);
6155       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6156       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6157       if (base_p != index_p)
6158 	return base_p ? 1 : -1;
6159     }
6160   return 0;
6161 }
6162 
6163 /* INFO->INNER describes a normal, non-automodified address.
6164    Fill in the rest of INFO accordingly.  */
6165 
6166 static void
6167 decompose_normal_address (struct address_info *info)
6168 {
6169   /* Treat the address as the sum of up to four values.  */
6170   rtx *ops[4];
6171   size_t n_ops = extract_plus_operands (info->inner, ops,
6172 					ops + ARRAY_SIZE (ops)) - ops;
6173 
6174   /* If there is more than one component, any base component is in a PLUS.  */
6175   if (n_ops > 1)
6176     info->base_outer_code = PLUS;
6177 
6178   /* Try to classify each sum operand now.  Leave those that could be
6179      either a base or an index in OPS.  */
6180   rtx *inner_ops[4];
6181   size_t out = 0;
6182   for (size_t in = 0; in < n_ops; ++in)
6183     {
6184       rtx *loc = ops[in];
6185       rtx *inner = strip_address_mutations (loc);
6186       if (CONSTANT_P (*inner))
6187 	set_address_disp (info, loc, inner);
6188       else if (GET_CODE (*inner) == UNSPEC)
6189 	set_address_segment (info, loc, inner);
6190       else
6191 	{
6192 	  /* The only other possibilities are a base or an index.  */
6193 	  rtx *base_term = get_base_term (inner);
6194 	  rtx *index_term = get_index_term (inner);
6195 	  gcc_assert (base_term || index_term);
6196 	  if (!base_term)
6197 	    set_address_index (info, loc, index_term);
6198 	  else if (!index_term)
6199 	    set_address_base (info, loc, base_term);
6200 	  else
6201 	    {
6202 	      gcc_assert (base_term == index_term);
6203 	      ops[out] = loc;
6204 	      inner_ops[out] = base_term;
6205 	      ++out;
6206 	    }
6207 	}
6208     }
6209 
6210   /* Classify the remaining OPS members as bases and indexes.  */
6211   if (out == 1)
6212     {
6213       /* If we haven't seen a base or an index yet, assume that this is
6214 	 the base.  If we were confident that another term was the base
6215 	 or index, treat the remaining operand as the other kind.  */
6216       if (!info->base)
6217 	set_address_base (info, ops[0], inner_ops[0]);
6218       else
6219 	set_address_index (info, ops[0], inner_ops[0]);
6220     }
6221   else if (out == 2)
6222     {
6223       /* In the event of a tie, assume the base comes first.  */
6224       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6225 		    GET_CODE (*ops[1]))
6226 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6227 		       GET_CODE (*ops[0])))
6228 	{
6229 	  set_address_base (info, ops[0], inner_ops[0]);
6230 	  set_address_index (info, ops[1], inner_ops[1]);
6231 	}
6232       else
6233 	{
6234 	  set_address_base (info, ops[1], inner_ops[1]);
6235 	  set_address_index (info, ops[0], inner_ops[0]);
6236 	}
6237     }
6238   else
6239     gcc_assert (out == 0);
6240 }
6241 
6242 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6243    or VOIDmode if not known.  AS is the address space associated with LOC.
6244    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6245 
6246 void
6247 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6248 		   addr_space_t as, enum rtx_code outer_code)
6249 {
6250   memset (info, 0, sizeof (*info));
6251   info->mode = mode;
6252   info->as = as;
6253   info->addr_outer_code = outer_code;
6254   info->outer = loc;
6255   info->inner = strip_address_mutations (loc, &outer_code);
6256   info->base_outer_code = outer_code;
6257   switch (GET_CODE (*info->inner))
6258     {
6259     case PRE_DEC:
6260     case PRE_INC:
6261     case POST_DEC:
6262     case POST_INC:
6263       decompose_incdec_address (info);
6264       break;
6265 
6266     case PRE_MODIFY:
6267     case POST_MODIFY:
6268       decompose_automod_address (info);
6269       break;
6270 
6271     default:
6272       decompose_normal_address (info);
6273       break;
6274     }
6275 }
6276 
6277 /* Describe address operand LOC in INFO.  */
6278 
6279 void
6280 decompose_lea_address (struct address_info *info, rtx *loc)
6281 {
6282   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6283 }
6284 
6285 /* Describe the address of MEM X in INFO.  */
6286 
6287 void
6288 decompose_mem_address (struct address_info *info, rtx x)
6289 {
6290   gcc_assert (MEM_P (x));
6291   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6292 		     MEM_ADDR_SPACE (x), MEM);
6293 }
6294 
6295 /* Update INFO after a change to the address it describes.  */
6296 
6297 void
6298 update_address (struct address_info *info)
6299 {
6300   decompose_address (info, info->outer, info->mode, info->as,
6301 		     info->addr_outer_code);
6302 }
6303 
6304 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6305    more complicated than that.  */
6306 
6307 HOST_WIDE_INT
6308 get_index_scale (const struct address_info *info)
6309 {
6310   rtx index = *info->index;
6311   if (GET_CODE (index) == MULT
6312       && CONST_INT_P (XEXP (index, 1))
6313       && info->index_term == &XEXP (index, 0))
6314     return INTVAL (XEXP (index, 1));
6315 
6316   if (GET_CODE (index) == ASHIFT
6317       && CONST_INT_P (XEXP (index, 1))
6318       && info->index_term == &XEXP (index, 0))
6319     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6320 
6321   if (info->index == info->index_term)
6322     return 1;
6323 
6324   return 0;
6325 }
6326 
6327 /* Return the "index code" of INFO, in the form required by
6328    ok_for_base_p_1.  */
6329 
6330 enum rtx_code
6331 get_index_code (const struct address_info *info)
6332 {
6333   if (info->index)
6334     return GET_CODE (*info->index);
6335 
6336   if (info->disp)
6337     return GET_CODE (*info->disp);
6338 
6339   return SCRATCH;
6340 }
6341 
6342 /* Return true if RTL X contains a SYMBOL_REF.  */
6343 
6344 bool
6345 contains_symbol_ref_p (const_rtx x)
6346 {
6347   subrtx_iterator::array_type array;
6348   FOR_EACH_SUBRTX (iter, array, x, ALL)
6349     if (SYMBOL_REF_P (*iter))
6350       return true;
6351 
6352   return false;
6353 }
6354 
6355 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6356 
6357 bool
6358 contains_symbolic_reference_p (const_rtx x)
6359 {
6360   subrtx_iterator::array_type array;
6361   FOR_EACH_SUBRTX (iter, array, x, ALL)
6362     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6363       return true;
6364 
6365   return false;
6366 }
6367 
6368 /* Return true if X contains a thread-local symbol.  */
6369 
6370 bool
6371 tls_referenced_p (const_rtx x)
6372 {
6373   if (!targetm.have_tls)
6374     return false;
6375 
6376   subrtx_iterator::array_type array;
6377   FOR_EACH_SUBRTX (iter, array, x, ALL)
6378     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6379       return true;
6380   return false;
6381 }
6382