xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/rtlanal.c (revision 44864942de333c81d186d36507e41c1cac5ca157)
1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39 #include "function-abi.h"
40 
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int computed_jump_p_1 (const_rtx);
46 static void parms_set (rtx, const_rtx, void *);
47 
48 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
49                                                    const_rtx, machine_mode,
50                                                    unsigned HOST_WIDE_INT);
51 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
52 					     const_rtx, machine_mode,
53                                              unsigned HOST_WIDE_INT);
54 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
55 						const_rtx, machine_mode,
56                                                 unsigned int);
57 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
58 					  const_rtx, machine_mode,
59 					  unsigned int);
60 
61 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
62 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
63 
64 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
65    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
66    SIGN_EXTEND then while narrowing we also have to enforce the
67    representation and sign-extend the value to mode DESTINATION_REP.
68 
69    If the value is already sign-extended to DESTINATION_REP mode we
70    can just switch to DESTINATION mode on it.  For each pair of
71    integral modes SOURCE and DESTINATION, when truncating from SOURCE
72    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
73    contains the number of high-order bits in SOURCE that have to be
74    copies of the sign-bit so that we can do this mode-switch to
75    DESTINATION.  */
76 
77 static unsigned int
78 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
79 
80 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
81    elements.  Return the new base of ARRAY.  */
82 
83 template <typename T>
84 typename T::value_type *
add_single_to_queue(array_type & array,value_type * base,size_t i,value_type x)85 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
86 						  value_type *base,
87 						  size_t i, value_type x)
88 {
89   if (base == array.stack)
90     {
91       if (i < LOCAL_ELEMS)
92 	{
93 	  base[i] = x;
94 	  return base;
95 	}
96       gcc_checking_assert (i == LOCAL_ELEMS);
97       /* A previous iteration might also have moved from the stack to the
98 	 heap, in which case the heap array will already be big enough.  */
99       if (vec_safe_length (array.heap) <= i)
100 	vec_safe_grow (array.heap, i + 1);
101       base = array.heap->address ();
102       memcpy (base, array.stack, sizeof (array.stack));
103       base[LOCAL_ELEMS] = x;
104       return base;
105     }
106   unsigned int length = array.heap->length ();
107   if (length > i)
108     {
109       gcc_checking_assert (base == array.heap->address ());
110       base[i] = x;
111       return base;
112     }
113   else
114     {
115       gcc_checking_assert (i == length);
116       vec_safe_push (array.heap, x);
117       return array.heap->address ();
118     }
119 }
120 
121 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
122    number of elements added to the worklist.  */
123 
124 template <typename T>
125 size_t
add_subrtxes_to_queue(array_type & array,value_type * base,size_t end,rtx_type x)126 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
127 						    value_type *base,
128 						    size_t end, rtx_type x)
129 {
130   enum rtx_code code = GET_CODE (x);
131   const char *format = GET_RTX_FORMAT (code);
132   size_t orig_end = end;
133   if (__builtin_expect (INSN_P (x), false))
134     {
135       /* Put the pattern at the top of the queue, since that's what
136 	 we're likely to want most.  It also allows for the SEQUENCE
137 	 code below.  */
138       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
139 	if (format[i] == 'e')
140 	  {
141 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
142 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
143 	      base[end++] = subx;
144 	    else
145 	      base = add_single_to_queue (array, base, end++, subx);
146 	  }
147     }
148   else
149     for (int i = 0; format[i]; ++i)
150       if (format[i] == 'e')
151 	{
152 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
153 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
154 	    base[end++] = subx;
155 	  else
156 	    base = add_single_to_queue (array, base, end++, subx);
157 	}
158       else if (format[i] == 'E')
159 	{
160 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
161 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
162 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
163 	    for (unsigned int j = 0; j < length; j++)
164 	      base[end++] = T::get_value (vec[j]);
165 	  else
166 	    for (unsigned int j = 0; j < length; j++)
167 	      base = add_single_to_queue (array, base, end++,
168 					  T::get_value (vec[j]));
169 	  if (code == SEQUENCE && end == length)
170 	    /* If the subrtxes of the sequence fill the entire array then
171 	       we know that no other parts of a containing insn are queued.
172 	       The caller is therefore iterating over the sequence as a
173 	       PATTERN (...), so we also want the patterns of the
174 	       subinstructions.  */
175 	    for (unsigned int j = 0; j < length; j++)
176 	      {
177 		typename T::rtx_type x = T::get_rtx (base[j]);
178 		if (INSN_P (x))
179 		  base[j] = T::get_value (PATTERN (x));
180 	      }
181 	}
182   return end - orig_end;
183 }
184 
185 template <typename T>
186 void
free_array(array_type & array)187 generic_subrtx_iterator <T>::free_array (array_type &array)
188 {
189   vec_free (array.heap);
190 }
191 
192 template <typename T>
193 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
194 
195 template class generic_subrtx_iterator <const_rtx_accessor>;
196 template class generic_subrtx_iterator <rtx_var_accessor>;
197 template class generic_subrtx_iterator <rtx_ptr_accessor>;
198 
199 /* Return 1 if the value of X is unstable
200    (would be different at a different point in the program).
201    The frame pointer, arg pointer, etc. are considered stable
202    (within one function) and so is anything marked `unchanging'.  */
203 
204 int
rtx_unstable_p(const_rtx x)205 rtx_unstable_p (const_rtx x)
206 {
207   const RTX_CODE code = GET_CODE (x);
208   int i;
209   const char *fmt;
210 
211   switch (code)
212     {
213     case MEM:
214       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
215 
216     case CONST:
217     CASE_CONST_ANY:
218     case SYMBOL_REF:
219     case LABEL_REF:
220       return 0;
221 
222     case REG:
223       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
224       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
225 	  /* The arg pointer varies if it is not a fixed register.  */
226 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
227 	return 0;
228       /* ??? When call-clobbered, the value is stable modulo the restore
229 	 that must happen after a call.  This currently screws up local-alloc
230 	 into believing that the restore is not needed.  */
231       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
232 	return 0;
233       return 1;
234 
235     case ASM_OPERANDS:
236       if (MEM_VOLATILE_P (x))
237 	return 1;
238 
239       /* Fall through.  */
240 
241     default:
242       break;
243     }
244 
245   fmt = GET_RTX_FORMAT (code);
246   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
247     if (fmt[i] == 'e')
248       {
249 	if (rtx_unstable_p (XEXP (x, i)))
250 	  return 1;
251       }
252     else if (fmt[i] == 'E')
253       {
254 	int j;
255 	for (j = 0; j < XVECLEN (x, i); j++)
256 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
257 	    return 1;
258       }
259 
260   return 0;
261 }
262 
263 /* Return 1 if X has a value that can vary even between two
264    executions of the program.  0 means X can be compared reliably
265    against certain constants or near-constants.
266    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
267    zero, we are slightly more conservative.
268    The frame pointer and the arg pointer are considered constant.  */
269 
270 bool
rtx_varies_p(const_rtx x,bool for_alias)271 rtx_varies_p (const_rtx x, bool for_alias)
272 {
273   RTX_CODE code;
274   int i;
275   const char *fmt;
276 
277   if (!x)
278     return 0;
279 
280   code = GET_CODE (x);
281   switch (code)
282     {
283     case MEM:
284       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
285 
286     case CONST:
287     CASE_CONST_ANY:
288     case SYMBOL_REF:
289     case LABEL_REF:
290       return 0;
291 
292     case REG:
293       /* Note that we have to test for the actual rtx used for the frame
294 	 and arg pointers and not just the register number in case we have
295 	 eliminated the frame and/or arg pointer and are using it
296 	 for pseudos.  */
297       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
298 	  /* The arg pointer varies if it is not a fixed register.  */
299 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
300 	return 0;
301       if (x == pic_offset_table_rtx
302 	  /* ??? When call-clobbered, the value is stable modulo the restore
303 	     that must happen after a call.  This currently screws up
304 	     local-alloc into believing that the restore is not needed, so we
305 	     must return 0 only if we are called from alias analysis.  */
306 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
307 	return 0;
308       return 1;
309 
310     case LO_SUM:
311       /* The operand 0 of a LO_SUM is considered constant
312 	 (in fact it is related specifically to operand 1)
313 	 during alias analysis.  */
314       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
315 	     || rtx_varies_p (XEXP (x, 1), for_alias);
316 
317     case ASM_OPERANDS:
318       if (MEM_VOLATILE_P (x))
319 	return 1;
320 
321       /* Fall through.  */
322 
323     default:
324       break;
325     }
326 
327   fmt = GET_RTX_FORMAT (code);
328   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
329     if (fmt[i] == 'e')
330       {
331 	if (rtx_varies_p (XEXP (x, i), for_alias))
332 	  return 1;
333       }
334     else if (fmt[i] == 'E')
335       {
336 	int j;
337 	for (j = 0; j < XVECLEN (x, i); j++)
338 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
339 	    return 1;
340       }
341 
342   return 0;
343 }
344 
345 /* Compute an approximation for the offset between the register
346    FROM and TO for the current function, as it was at the start
347    of the routine.  */
348 
349 static poly_int64
get_initial_register_offset(int from,int to)350 get_initial_register_offset (int from, int to)
351 {
352   static const struct elim_table_t
353   {
354     const int from;
355     const int to;
356   } table[] = ELIMINABLE_REGS;
357   poly_int64 offset1, offset2;
358   unsigned int i, j;
359 
360   if (to == from)
361     return 0;
362 
363   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
364      is completed, but we need to give at least an estimate for the stack
365      pointer based on the frame size.  */
366   if (!epilogue_completed)
367     {
368       offset1 = crtl->outgoing_args_size + get_frame_size ();
369 #if !STACK_GROWS_DOWNWARD
370       offset1 = - offset1;
371 #endif
372       if (to == STACK_POINTER_REGNUM)
373 	return offset1;
374       else if (from == STACK_POINTER_REGNUM)
375 	return - offset1;
376       else
377 	return 0;
378      }
379 
380   for (i = 0; i < ARRAY_SIZE (table); i++)
381       if (table[i].from == from)
382 	{
383 	  if (table[i].to == to)
384 	    {
385 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
386 					  offset1);
387 	      return offset1;
388 	    }
389 	  for (j = 0; j < ARRAY_SIZE (table); j++)
390 	    {
391 	      if (table[j].to == to
392 		  && table[j].from == table[i].to)
393 		{
394 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
395 					      offset1);
396 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
397 					      offset2);
398 		  return offset1 + offset2;
399 		}
400 	      if (table[j].from == to
401 		  && table[j].to == table[i].to)
402 		{
403 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
404 					      offset1);
405 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
406 					      offset2);
407 		  return offset1 - offset2;
408 		}
409 	    }
410 	}
411       else if (table[i].to == from)
412 	{
413 	  if (table[i].from == to)
414 	    {
415 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
416 					  offset1);
417 	      return - offset1;
418 	    }
419 	  for (j = 0; j < ARRAY_SIZE (table); j++)
420 	    {
421 	      if (table[j].to == to
422 		  && table[j].from == table[i].from)
423 		{
424 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
425 					      offset1);
426 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
427 					      offset2);
428 		  return - offset1 + offset2;
429 		}
430 	      if (table[j].from == to
431 		  && table[j].to == table[i].from)
432 		{
433 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
434 					      offset1);
435 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
436 					      offset2);
437 		  return - offset1 - offset2;
438 		}
439 	    }
440 	}
441 
442   /* If the requested register combination was not found,
443      try a different more simple combination.  */
444   if (from == ARG_POINTER_REGNUM)
445     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
446   else if (to == ARG_POINTER_REGNUM)
447     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
448   else if (from == HARD_FRAME_POINTER_REGNUM)
449     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
450   else if (to == HARD_FRAME_POINTER_REGNUM)
451     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
452   else
453     return 0;
454 }
455 
456 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
457    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
458    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
459    references on strict alignment machines.  */
460 
461 static int
rtx_addr_can_trap_p_1(const_rtx x,poly_int64 offset,poly_int64 size,machine_mode mode,bool unaligned_mems)462 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
463 		       machine_mode mode, bool unaligned_mems)
464 {
465   enum rtx_code code = GET_CODE (x);
466   gcc_checking_assert (mode == BLKmode
467 		       || mode == VOIDmode
468 		       || known_size_p (size));
469   poly_int64 const_x1;
470 
471   /* The offset must be a multiple of the mode size if we are considering
472      unaligned memory references on strict alignment machines.  */
473   if (STRICT_ALIGNMENT
474       && unaligned_mems
475       && mode != BLKmode
476       && mode != VOIDmode)
477     {
478       poly_int64 actual_offset = offset;
479 
480 #ifdef SPARC_STACK_BOUNDARY_HACK
481       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
482 	     the real alignment of %sp.  However, when it does this, the
483 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
484       if (SPARC_STACK_BOUNDARY_HACK
485 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
486 	actual_offset -= STACK_POINTER_OFFSET;
487 #endif
488 
489       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
490 	return 1;
491     }
492 
493   switch (code)
494     {
495     case SYMBOL_REF:
496       if (SYMBOL_REF_WEAK (x))
497 	return 1;
498       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
499 	{
500 	  tree decl;
501 	  poly_int64 decl_size;
502 
503 	  if (maybe_lt (offset, 0))
504 	    return 1;
505 	  if (!known_size_p (size))
506 	    return maybe_ne (offset, 0);
507 
508 	  /* If the size of the access or of the symbol is unknown,
509 	     assume the worst.  */
510 	  decl = SYMBOL_REF_DECL (x);
511 
512 	  /* Else check that the access is in bounds.  TODO: restructure
513 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
514 	  if (!decl)
515 	    decl_size = -1;
516 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
517 	    {
518 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
519 		decl_size = -1;
520 	    }
521 	  else if (TREE_CODE (decl) == STRING_CST)
522 	    decl_size = TREE_STRING_LENGTH (decl);
523 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
524 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
525 	  else
526 	    decl_size = -1;
527 
528 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
529 		  ? maybe_ne (offset, 0)
530 		  : !known_subrange_p (offset, size, 0, decl_size));
531         }
532 
533       return 0;
534 
535     case LABEL_REF:
536       return 0;
537 
538     case REG:
539       /* Stack references are assumed not to trap, but we need to deal with
540 	 nonsensical offsets.  */
541       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
542 	 || x == stack_pointer_rtx
543 	 /* The arg pointer varies if it is not a fixed register.  */
544 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
545 	{
546 #ifdef RED_ZONE_SIZE
547 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
548 #else
549 	  poly_int64 red_zone_size = 0;
550 #endif
551 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
552 	  poly_int64 low_bound, high_bound;
553 
554 	  if (!known_size_p (size))
555 	    return 1;
556 
557 	  if (x == frame_pointer_rtx)
558 	    {
559 	      if (FRAME_GROWS_DOWNWARD)
560 		{
561 		  high_bound = targetm.starting_frame_offset ();
562 		  low_bound  = high_bound - get_frame_size ();
563 		}
564 	      else
565 		{
566 		  low_bound  = targetm.starting_frame_offset ();
567 		  high_bound = low_bound + get_frame_size ();
568 		}
569 	    }
570 	  else if (x == hard_frame_pointer_rtx)
571 	    {
572 	      poly_int64 sp_offset
573 		= get_initial_register_offset (STACK_POINTER_REGNUM,
574 					       HARD_FRAME_POINTER_REGNUM);
575 	      poly_int64 ap_offset
576 		= get_initial_register_offset (ARG_POINTER_REGNUM,
577 					       HARD_FRAME_POINTER_REGNUM);
578 
579 #if STACK_GROWS_DOWNWARD
580 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
581 	      high_bound = ap_offset
582 			   + FIRST_PARM_OFFSET (current_function_decl)
583 #if !ARGS_GROW_DOWNWARD
584 			   + crtl->args.size
585 #endif
586 			   + stack_boundary;
587 #else
588 	      high_bound = sp_offset + red_zone_size + stack_boundary;
589 	      low_bound  = ap_offset
590 			   + FIRST_PARM_OFFSET (current_function_decl)
591 #if ARGS_GROW_DOWNWARD
592 			   - crtl->args.size
593 #endif
594 			   - stack_boundary;
595 #endif
596 	    }
597 	  else if (x == stack_pointer_rtx)
598 	    {
599 	      poly_int64 ap_offset
600 		= get_initial_register_offset (ARG_POINTER_REGNUM,
601 					       STACK_POINTER_REGNUM);
602 
603 #if STACK_GROWS_DOWNWARD
604 	      low_bound  = - red_zone_size - stack_boundary;
605 	      high_bound = ap_offset
606 			   + FIRST_PARM_OFFSET (current_function_decl)
607 #if !ARGS_GROW_DOWNWARD
608 			   + crtl->args.size
609 #endif
610 			   + stack_boundary;
611 #else
612 	      high_bound = red_zone_size + stack_boundary;
613 	      low_bound  = ap_offset
614 			   + FIRST_PARM_OFFSET (current_function_decl)
615 #if ARGS_GROW_DOWNWARD
616 			   - crtl->args.size
617 #endif
618 			   - stack_boundary;
619 #endif
620 	    }
621 	  else
622 	    {
623 	      /* We assume that accesses are safe to at least the
624 		 next stack boundary.
625 		 Examples are varargs and __builtin_return_address.  */
626 #if ARGS_GROW_DOWNWARD
627 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
628 			   + stack_boundary;
629 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
630 			   - crtl->args.size - stack_boundary;
631 #else
632 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
633 			   - stack_boundary;
634 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
635 			   + crtl->args.size + stack_boundary;
636 #endif
637 	    }
638 
639 	  if (known_ge (offset, low_bound)
640 	      && known_le (offset, high_bound - size))
641 	    return 0;
642 	  return 1;
643 	}
644       /* All of the virtual frame registers are stack references.  */
645       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
646 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
647 	return 0;
648       return 1;
649 
650     case CONST:
651       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
652 				    mode, unaligned_mems);
653 
654     case PLUS:
655       /* An address is assumed not to trap if:
656 	 - it is the pic register plus a const unspec without offset.  */
657       if (XEXP (x, 0) == pic_offset_table_rtx
658 	  && GET_CODE (XEXP (x, 1)) == CONST
659 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
660 	  && known_eq (offset, 0))
661 	return 0;
662 
663       /* - or it is an address that can't trap plus a constant integer.  */
664       if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
665 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
666 				     size, mode, unaligned_mems))
667 	return 0;
668 
669       return 1;
670 
671     case LO_SUM:
672     case PRE_MODIFY:
673       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
674 				    mode, unaligned_mems);
675 
676     case PRE_DEC:
677     case PRE_INC:
678     case POST_DEC:
679     case POST_INC:
680     case POST_MODIFY:
681       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
682 				    mode, unaligned_mems);
683 
684     default:
685       break;
686     }
687 
688   /* If it isn't one of the case above, it can cause a trap.  */
689   return 1;
690 }
691 
692 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
693 
694 int
rtx_addr_can_trap_p(const_rtx x)695 rtx_addr_can_trap_p (const_rtx x)
696 {
697   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
698 }
699 
700 /* Return true if X contains a MEM subrtx.  */
701 
702 bool
contains_mem_rtx_p(rtx x)703 contains_mem_rtx_p (rtx x)
704 {
705   subrtx_iterator::array_type array;
706   FOR_EACH_SUBRTX (iter, array, x, ALL)
707     if (MEM_P (*iter))
708       return true;
709 
710   return false;
711 }
712 
713 /* Return true if X is an address that is known to not be zero.  */
714 
715 bool
nonzero_address_p(const_rtx x)716 nonzero_address_p (const_rtx x)
717 {
718   const enum rtx_code code = GET_CODE (x);
719 
720   switch (code)
721     {
722     case SYMBOL_REF:
723       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
724 
725     case LABEL_REF:
726       return true;
727 
728     case REG:
729       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
730       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
731 	  || x == stack_pointer_rtx
732 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
733 	return true;
734       /* All of the virtual frame registers are stack references.  */
735       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
736 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
737 	return true;
738       return false;
739 
740     case CONST:
741       return nonzero_address_p (XEXP (x, 0));
742 
743     case PLUS:
744       /* Handle PIC references.  */
745       if (XEXP (x, 0) == pic_offset_table_rtx
746 	       && CONSTANT_P (XEXP (x, 1)))
747 	return true;
748       return false;
749 
750     case PRE_MODIFY:
751       /* Similar to the above; allow positive offsets.  Further, since
752 	 auto-inc is only allowed in memories, the register must be a
753 	 pointer.  */
754       if (CONST_INT_P (XEXP (x, 1))
755 	  && INTVAL (XEXP (x, 1)) > 0)
756 	return true;
757       return nonzero_address_p (XEXP (x, 0));
758 
759     case PRE_INC:
760       /* Similarly.  Further, the offset is always positive.  */
761       return true;
762 
763     case PRE_DEC:
764     case POST_DEC:
765     case POST_INC:
766     case POST_MODIFY:
767       return nonzero_address_p (XEXP (x, 0));
768 
769     case LO_SUM:
770       return nonzero_address_p (XEXP (x, 1));
771 
772     default:
773       break;
774     }
775 
776   /* If it isn't one of the case above, might be zero.  */
777   return false;
778 }
779 
780 /* Return 1 if X refers to a memory location whose address
781    cannot be compared reliably with constant addresses,
782    or if X refers to a BLKmode memory object.
783    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
784    zero, we are slightly more conservative.  */
785 
786 bool
rtx_addr_varies_p(const_rtx x,bool for_alias)787 rtx_addr_varies_p (const_rtx x, bool for_alias)
788 {
789   enum rtx_code code;
790   int i;
791   const char *fmt;
792 
793   if (x == 0)
794     return 0;
795 
796   code = GET_CODE (x);
797   if (code == MEM)
798     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
799 
800   fmt = GET_RTX_FORMAT (code);
801   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
802     if (fmt[i] == 'e')
803       {
804 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
805 	  return 1;
806       }
807     else if (fmt[i] == 'E')
808       {
809 	int j;
810 	for (j = 0; j < XVECLEN (x, i); j++)
811 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
812 	    return 1;
813       }
814   return 0;
815 }
816 
817 /* Return the CALL in X if there is one.  */
818 
819 rtx
get_call_rtx_from(const rtx_insn * insn)820 get_call_rtx_from (const rtx_insn *insn)
821 {
822   rtx x = PATTERN (insn);
823   if (GET_CODE (x) == PARALLEL)
824     x = XVECEXP (x, 0, 0);
825   if (GET_CODE (x) == SET)
826     x = SET_SRC (x);
827   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
828     return x;
829   return NULL_RTX;
830 }
831 
832 /* Get the declaration of the function called by INSN.  */
833 
834 tree
get_call_fndecl(const rtx_insn * insn)835 get_call_fndecl (const rtx_insn *insn)
836 {
837   rtx note, datum;
838 
839   note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
840   if (note == NULL_RTX)
841     return NULL_TREE;
842 
843   datum = XEXP (note, 0);
844   if (datum != NULL_RTX)
845     return SYMBOL_REF_DECL (datum);
846 
847   return NULL_TREE;
848 }
849 
850 /* Return the value of the integer term in X, if one is apparent;
851    otherwise return 0.
852    Only obvious integer terms are detected.
853    This is used in cse.c with the `related_value' field.  */
854 
855 HOST_WIDE_INT
get_integer_term(const_rtx x)856 get_integer_term (const_rtx x)
857 {
858   if (GET_CODE (x) == CONST)
859     x = XEXP (x, 0);
860 
861   if (GET_CODE (x) == MINUS
862       && CONST_INT_P (XEXP (x, 1)))
863     return - INTVAL (XEXP (x, 1));
864   if (GET_CODE (x) == PLUS
865       && CONST_INT_P (XEXP (x, 1)))
866     return INTVAL (XEXP (x, 1));
867   return 0;
868 }
869 
870 /* If X is a constant, return the value sans apparent integer term;
871    otherwise return 0.
872    Only obvious integer terms are detected.  */
873 
874 rtx
get_related_value(const_rtx x)875 get_related_value (const_rtx x)
876 {
877   if (GET_CODE (x) != CONST)
878     return 0;
879   x = XEXP (x, 0);
880   if (GET_CODE (x) == PLUS
881       && CONST_INT_P (XEXP (x, 1)))
882     return XEXP (x, 0);
883   else if (GET_CODE (x) == MINUS
884 	   && CONST_INT_P (XEXP (x, 1)))
885     return XEXP (x, 0);
886   return 0;
887 }
888 
889 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
890    to somewhere in the same object or object_block as SYMBOL.  */
891 
892 bool
offset_within_block_p(const_rtx symbol,HOST_WIDE_INT offset)893 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
894 {
895   tree decl;
896 
897   if (GET_CODE (symbol) != SYMBOL_REF)
898     return false;
899 
900   if (offset == 0)
901     return true;
902 
903   if (offset > 0)
904     {
905       if (CONSTANT_POOL_ADDRESS_P (symbol)
906 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
907 	return true;
908 
909       decl = SYMBOL_REF_DECL (symbol);
910       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
911 	return true;
912     }
913 
914   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
915       && SYMBOL_REF_BLOCK (symbol)
916       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
917       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
918 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
919     return true;
920 
921   return false;
922 }
923 
924 /* Split X into a base and a constant offset, storing them in *BASE_OUT
925    and *OFFSET_OUT respectively.  */
926 
927 void
split_const(rtx x,rtx * base_out,rtx * offset_out)928 split_const (rtx x, rtx *base_out, rtx *offset_out)
929 {
930   if (GET_CODE (x) == CONST)
931     {
932       x = XEXP (x, 0);
933       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
934 	{
935 	  *base_out = XEXP (x, 0);
936 	  *offset_out = XEXP (x, 1);
937 	  return;
938 	}
939     }
940   *base_out = x;
941   *offset_out = const0_rtx;
942 }
943 
944 /* Express integer value X as some value Y plus a polynomial offset,
945    where Y is either const0_rtx, X or something within X (as opposed
946    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
947 
948 rtx
strip_offset(rtx x,poly_int64_pod * offset_out)949 strip_offset (rtx x, poly_int64_pod *offset_out)
950 {
951   rtx base = const0_rtx;
952   rtx test = x;
953   if (GET_CODE (test) == CONST)
954     test = XEXP (test, 0);
955   if (GET_CODE (test) == PLUS)
956     {
957       base = XEXP (test, 0);
958       test = XEXP (test, 1);
959     }
960   if (poly_int_rtx_p (test, offset_out))
961     return base;
962   *offset_out = 0;
963   return x;
964 }
965 
966 /* Return the argument size in REG_ARGS_SIZE note X.  */
967 
968 poly_int64
get_args_size(const_rtx x)969 get_args_size (const_rtx x)
970 {
971   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
972   return rtx_to_poly_int64 (XEXP (x, 0));
973 }
974 
975 /* Return the number of places FIND appears within X.  If COUNT_DEST is
976    zero, we do not count occurrences inside the destination of a SET.  */
977 
978 int
count_occurrences(const_rtx x,const_rtx find,int count_dest)979 count_occurrences (const_rtx x, const_rtx find, int count_dest)
980 {
981   int i, j;
982   enum rtx_code code;
983   const char *format_ptr;
984   int count;
985 
986   if (x == find)
987     return 1;
988 
989   code = GET_CODE (x);
990 
991   switch (code)
992     {
993     case REG:
994     CASE_CONST_ANY:
995     case SYMBOL_REF:
996     case CODE_LABEL:
997     case PC:
998     case CC0:
999       return 0;
1000 
1001     case EXPR_LIST:
1002       count = count_occurrences (XEXP (x, 0), find, count_dest);
1003       if (XEXP (x, 1))
1004 	count += count_occurrences (XEXP (x, 1), find, count_dest);
1005       return count;
1006 
1007     case MEM:
1008       if (MEM_P (find) && rtx_equal_p (x, find))
1009 	return 1;
1010       break;
1011 
1012     case SET:
1013       if (SET_DEST (x) == find && ! count_dest)
1014 	return count_occurrences (SET_SRC (x), find, count_dest);
1015       break;
1016 
1017     default:
1018       break;
1019     }
1020 
1021   format_ptr = GET_RTX_FORMAT (code);
1022   count = 0;
1023 
1024   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1025     {
1026       switch (*format_ptr++)
1027 	{
1028 	case 'e':
1029 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1030 	  break;
1031 
1032 	case 'E':
1033 	  for (j = 0; j < XVECLEN (x, i); j++)
1034 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1035 	  break;
1036 	}
1037     }
1038   return count;
1039 }
1040 
1041 
1042 /* Return TRUE if OP is a register or subreg of a register that
1043    holds an unsigned quantity.  Otherwise, return FALSE.  */
1044 
1045 bool
unsigned_reg_p(rtx op)1046 unsigned_reg_p (rtx op)
1047 {
1048   if (REG_P (op)
1049       && REG_EXPR (op)
1050       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1051     return true;
1052 
1053   if (GET_CODE (op) == SUBREG
1054       && SUBREG_PROMOTED_SIGN (op))
1055     return true;
1056 
1057   return false;
1058 }
1059 
1060 
1061 /* Nonzero if register REG appears somewhere within IN.
1062    Also works if REG is not a register; in this case it checks
1063    for a subexpression of IN that is Lisp "equal" to REG.  */
1064 
1065 int
reg_mentioned_p(const_rtx reg,const_rtx in)1066 reg_mentioned_p (const_rtx reg, const_rtx in)
1067 {
1068   const char *fmt;
1069   int i;
1070   enum rtx_code code;
1071 
1072   if (in == 0)
1073     return 0;
1074 
1075   if (reg == in)
1076     return 1;
1077 
1078   if (GET_CODE (in) == LABEL_REF)
1079     return reg == label_ref_label (in);
1080 
1081   code = GET_CODE (in);
1082 
1083   switch (code)
1084     {
1085       /* Compare registers by number.  */
1086     case REG:
1087       return REG_P (reg) && REGNO (in) == REGNO (reg);
1088 
1089       /* These codes have no constituent expressions
1090 	 and are unique.  */
1091     case SCRATCH:
1092     case CC0:
1093     case PC:
1094       return 0;
1095 
1096     CASE_CONST_ANY:
1097       /* These are kept unique for a given value.  */
1098       return 0;
1099 
1100     default:
1101       break;
1102     }
1103 
1104   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1105     return 1;
1106 
1107   fmt = GET_RTX_FORMAT (code);
1108 
1109   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1110     {
1111       if (fmt[i] == 'E')
1112 	{
1113 	  int j;
1114 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1115 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1116 	      return 1;
1117 	}
1118       else if (fmt[i] == 'e'
1119 	       && reg_mentioned_p (reg, XEXP (in, i)))
1120 	return 1;
1121     }
1122   return 0;
1123 }
1124 
1125 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1126    no CODE_LABEL insn.  */
1127 
1128 int
no_labels_between_p(const rtx_insn * beg,const rtx_insn * end)1129 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1130 {
1131   rtx_insn *p;
1132   if (beg == end)
1133     return 0;
1134   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1135     if (LABEL_P (p))
1136       return 0;
1137   return 1;
1138 }
1139 
1140 /* Nonzero if register REG is used in an insn between
1141    FROM_INSN and TO_INSN (exclusive of those two).  */
1142 
1143 int
reg_used_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1144 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1145 		    const rtx_insn *to_insn)
1146 {
1147   rtx_insn *insn;
1148 
1149   if (from_insn == to_insn)
1150     return 0;
1151 
1152   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1153     if (NONDEBUG_INSN_P (insn)
1154 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1155 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1156       return 1;
1157   return 0;
1158 }
1159 
1160 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1161    is entirely replaced by a new value and the only use is as a SET_DEST,
1162    we do not consider it a reference.  */
1163 
1164 int
reg_referenced_p(const_rtx x,const_rtx body)1165 reg_referenced_p (const_rtx x, const_rtx body)
1166 {
1167   int i;
1168 
1169   switch (GET_CODE (body))
1170     {
1171     case SET:
1172       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1173 	return 1;
1174 
1175       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1176 	 of a REG that occupies all of the REG, the insn references X if
1177 	 it is mentioned in the destination.  */
1178       if (GET_CODE (SET_DEST (body)) != CC0
1179 	  && GET_CODE (SET_DEST (body)) != PC
1180 	  && !REG_P (SET_DEST (body))
1181 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1182 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1183 		&& !read_modify_subreg_p (SET_DEST (body)))
1184 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1185 	return 1;
1186       return 0;
1187 
1188     case ASM_OPERANDS:
1189       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1190 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1191 	  return 1;
1192       return 0;
1193 
1194     case CALL:
1195     case USE:
1196     case IF_THEN_ELSE:
1197       return reg_overlap_mentioned_p (x, body);
1198 
1199     case TRAP_IF:
1200       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1201 
1202     case PREFETCH:
1203       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1204 
1205     case UNSPEC:
1206     case UNSPEC_VOLATILE:
1207       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1208 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1209 	  return 1;
1210       return 0;
1211 
1212     case PARALLEL:
1213       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1214 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1215 	  return 1;
1216       return 0;
1217 
1218     case CLOBBER:
1219       if (MEM_P (XEXP (body, 0)))
1220 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1221 	  return 1;
1222       return 0;
1223 
1224     case COND_EXEC:
1225       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1226 	return 1;
1227       return reg_referenced_p (x, COND_EXEC_CODE (body));
1228 
1229     default:
1230       return 0;
1231     }
1232 }
1233 
1234 /* Nonzero if register REG is set or clobbered in an insn between
1235    FROM_INSN and TO_INSN (exclusive of those two).  */
1236 
1237 int
reg_set_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1238 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1239 		   const rtx_insn *to_insn)
1240 {
1241   const rtx_insn *insn;
1242 
1243   if (from_insn == to_insn)
1244     return 0;
1245 
1246   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1247     if (INSN_P (insn) && reg_set_p (reg, insn))
1248       return 1;
1249   return 0;
1250 }
1251 
1252 /* Return true if REG is set or clobbered inside INSN.  */
1253 
1254 int
reg_set_p(const_rtx reg,const_rtx insn)1255 reg_set_p (const_rtx reg, const_rtx insn)
1256 {
1257   /* After delay slot handling, call and branch insns might be in a
1258      sequence.  Check all the elements there.  */
1259   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1260     {
1261       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1262 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1263 	  return true;
1264 
1265       return false;
1266     }
1267 
1268   /* We can be passed an insn or part of one.  If we are passed an insn,
1269      check if a side-effect of the insn clobbers REG.  */
1270   if (INSN_P (insn)
1271       && (FIND_REG_INC_NOTE (insn, reg)
1272 	  || (CALL_P (insn)
1273 	      && ((REG_P (reg)
1274 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1275 		   && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1276 		       .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1277 		  || MEM_P (reg)
1278 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1279     return true;
1280 
1281   /* There are no REG_INC notes for SP autoinc.  */
1282   if (reg == stack_pointer_rtx && INSN_P (insn))
1283     {
1284       subrtx_var_iterator::array_type array;
1285       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1286 	{
1287 	  rtx mem = *iter;
1288 	  if (mem
1289 	      && MEM_P (mem)
1290 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1291 	    {
1292 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1293 		return true;
1294 	      iter.skip_subrtxes ();
1295 	    }
1296 	}
1297     }
1298 
1299   return set_of (reg, insn) != NULL_RTX;
1300 }
1301 
1302 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1303    only if none of them are modified between START and END.  Return 1 if
1304    X contains a MEM; this routine does use memory aliasing.  */
1305 
1306 int
modified_between_p(const_rtx x,const rtx_insn * start,const rtx_insn * end)1307 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1308 {
1309   const enum rtx_code code = GET_CODE (x);
1310   const char *fmt;
1311   int i, j;
1312   rtx_insn *insn;
1313 
1314   if (start == end)
1315     return 0;
1316 
1317   switch (code)
1318     {
1319     CASE_CONST_ANY:
1320     case CONST:
1321     case SYMBOL_REF:
1322     case LABEL_REF:
1323       return 0;
1324 
1325     case PC:
1326     case CC0:
1327       return 1;
1328 
1329     case MEM:
1330       if (modified_between_p (XEXP (x, 0), start, end))
1331 	return 1;
1332       if (MEM_READONLY_P (x))
1333 	return 0;
1334       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1335 	if (memory_modified_in_insn_p (x, insn))
1336 	  return 1;
1337       return 0;
1338 
1339     case REG:
1340       return reg_set_between_p (x, start, end);
1341 
1342     default:
1343       break;
1344     }
1345 
1346   fmt = GET_RTX_FORMAT (code);
1347   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1348     {
1349       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1350 	return 1;
1351 
1352       else if (fmt[i] == 'E')
1353 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1354 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1355 	    return 1;
1356     }
1357 
1358   return 0;
1359 }
1360 
1361 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1362    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1363    does use memory aliasing.  */
1364 
1365 int
modified_in_p(const_rtx x,const_rtx insn)1366 modified_in_p (const_rtx x, const_rtx insn)
1367 {
1368   const enum rtx_code code = GET_CODE (x);
1369   const char *fmt;
1370   int i, j;
1371 
1372   switch (code)
1373     {
1374     CASE_CONST_ANY:
1375     case CONST:
1376     case SYMBOL_REF:
1377     case LABEL_REF:
1378       return 0;
1379 
1380     case PC:
1381     case CC0:
1382       return 1;
1383 
1384     case MEM:
1385       if (modified_in_p (XEXP (x, 0), insn))
1386 	return 1;
1387       if (MEM_READONLY_P (x))
1388 	return 0;
1389       if (memory_modified_in_insn_p (x, insn))
1390 	return 1;
1391       return 0;
1392 
1393     case REG:
1394       return reg_set_p (x, insn);
1395 
1396     default:
1397       break;
1398     }
1399 
1400   fmt = GET_RTX_FORMAT (code);
1401   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1402     {
1403       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1404 	return 1;
1405 
1406       else if (fmt[i] == 'E')
1407 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1408 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1409 	    return 1;
1410     }
1411 
1412   return 0;
1413 }
1414 
1415 /* Return true if X is a SUBREG and if storing a value to X would
1416    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1417    target, using a SUBREG to store to one half of a DImode REG would
1418    preserve the other half.  */
1419 
1420 bool
read_modify_subreg_p(const_rtx x)1421 read_modify_subreg_p (const_rtx x)
1422 {
1423   if (GET_CODE (x) != SUBREG)
1424     return false;
1425   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1426   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1427   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1428   /* The inner and outer modes of a subreg must be ordered, so that we
1429      can tell whether they're paradoxical or partial.  */
1430   gcc_checking_assert (ordered_p (isize, osize));
1431   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1432 }
1433 
1434 /* Helper function for set_of.  */
1435 struct set_of_data
1436   {
1437     const_rtx found;
1438     const_rtx pat;
1439   };
1440 
1441 static void
set_of_1(rtx x,const_rtx pat,void * data1)1442 set_of_1 (rtx x, const_rtx pat, void *data1)
1443 {
1444   struct set_of_data *const data = (struct set_of_data *) (data1);
1445   if (rtx_equal_p (x, data->pat)
1446       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1447     data->found = pat;
1448 }
1449 
1450 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1451    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1452 const_rtx
set_of(const_rtx pat,const_rtx insn)1453 set_of (const_rtx pat, const_rtx insn)
1454 {
1455   struct set_of_data data;
1456   data.found = NULL_RTX;
1457   data.pat = pat;
1458   note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1459   return data.found;
1460 }
1461 
1462 /* Add all hard register in X to *PSET.  */
1463 void
find_all_hard_regs(const_rtx x,HARD_REG_SET * pset)1464 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1465 {
1466   subrtx_iterator::array_type array;
1467   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1468     {
1469       const_rtx x = *iter;
1470       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1471 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1472     }
1473 }
1474 
1475 /* This function, called through note_stores, collects sets and
1476    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1477    by DATA.  */
1478 void
record_hard_reg_sets(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)1479 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1480 {
1481   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1482   if (REG_P (x) && HARD_REGISTER_P (x))
1483     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1484 }
1485 
1486 /* Examine INSN, and compute the set of hard registers written by it.
1487    Store it in *PSET.  Should only be called after reload.
1488 
1489    IMPLICIT is true if we should include registers that are fully-clobbered
1490    by calls.  This should be used with caution, since it doesn't include
1491    partially-clobbered registers.  */
1492 void
find_all_hard_reg_sets(const rtx_insn * insn,HARD_REG_SET * pset,bool implicit)1493 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1494 {
1495   rtx link;
1496 
1497   CLEAR_HARD_REG_SET (*pset);
1498   note_stores (insn, record_hard_reg_sets, pset);
1499   if (CALL_P (insn) && implicit)
1500     *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1501   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1502     if (REG_NOTE_KIND (link) == REG_INC)
1503       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1504 }
1505 
1506 /* Like record_hard_reg_sets, but called through note_uses.  */
1507 void
record_hard_reg_uses(rtx * px,void * data)1508 record_hard_reg_uses (rtx *px, void *data)
1509 {
1510   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1511 }
1512 
1513 /* Given an INSN, return a SET expression if this insn has only a single SET.
1514    It may also have CLOBBERs, USEs, or SET whose output
1515    will not be used, which we ignore.  */
1516 
1517 rtx
single_set_2(const rtx_insn * insn,const_rtx pat)1518 single_set_2 (const rtx_insn *insn, const_rtx pat)
1519 {
1520   rtx set = NULL;
1521   int set_verified = 1;
1522   int i;
1523 
1524   if (GET_CODE (pat) == PARALLEL)
1525     {
1526       for (i = 0; i < XVECLEN (pat, 0); i++)
1527 	{
1528 	  rtx sub = XVECEXP (pat, 0, i);
1529 	  switch (GET_CODE (sub))
1530 	    {
1531 	    case USE:
1532 	    case CLOBBER:
1533 	      break;
1534 
1535 	    case SET:
1536 	      /* We can consider insns having multiple sets, where all
1537 		 but one are dead as single set insns.  In common case
1538 		 only single set is present in the pattern so we want
1539 		 to avoid checking for REG_UNUSED notes unless necessary.
1540 
1541 		 When we reach set first time, we just expect this is
1542 		 the single set we are looking for and only when more
1543 		 sets are found in the insn, we check them.  */
1544 	      if (!set_verified)
1545 		{
1546 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1547 		      && !side_effects_p (set))
1548 		    set = NULL;
1549 		  else
1550 		    set_verified = 1;
1551 		}
1552 	      if (!set)
1553 		set = sub, set_verified = 0;
1554 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1555 		       || side_effects_p (sub))
1556 		return NULL_RTX;
1557 	      break;
1558 
1559 	    default:
1560 	      return NULL_RTX;
1561 	    }
1562 	}
1563     }
1564   return set;
1565 }
1566 
1567 /* Given an INSN, return nonzero if it has more than one SET, else return
1568    zero.  */
1569 
1570 int
multiple_sets(const_rtx insn)1571 multiple_sets (const_rtx insn)
1572 {
1573   int found;
1574   int i;
1575 
1576   /* INSN must be an insn.  */
1577   if (! INSN_P (insn))
1578     return 0;
1579 
1580   /* Only a PARALLEL can have multiple SETs.  */
1581   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1582     {
1583       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1584 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1585 	  {
1586 	    /* If we have already found a SET, then return now.  */
1587 	    if (found)
1588 	      return 1;
1589 	    else
1590 	      found = 1;
1591 	  }
1592     }
1593 
1594   /* Either zero or one SET.  */
1595   return 0;
1596 }
1597 
1598 /* Return nonzero if the destination of SET equals the source
1599    and there are no side effects.  */
1600 
1601 int
set_noop_p(const_rtx set)1602 set_noop_p (const_rtx set)
1603 {
1604   rtx src = SET_SRC (set);
1605   rtx dst = SET_DEST (set);
1606 
1607   if (dst == pc_rtx && src == pc_rtx)
1608     return 1;
1609 
1610   if (MEM_P (dst) && MEM_P (src))
1611     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1612 
1613   if (GET_CODE (dst) == ZERO_EXTRACT)
1614     return rtx_equal_p (XEXP (dst, 0), src)
1615 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1616 	   && !side_effects_p (src);
1617 
1618   if (GET_CODE (dst) == STRICT_LOW_PART)
1619     dst = XEXP (dst, 0);
1620 
1621   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1622     {
1623       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1624 	return 0;
1625       src = SUBREG_REG (src);
1626       dst = SUBREG_REG (dst);
1627     }
1628 
1629   /* It is a NOOP if destination overlaps with selected src vector
1630      elements.  */
1631   if (GET_CODE (src) == VEC_SELECT
1632       && REG_P (XEXP (src, 0)) && REG_P (dst)
1633       && HARD_REGISTER_P (XEXP (src, 0))
1634       && HARD_REGISTER_P (dst))
1635     {
1636       int i;
1637       rtx par = XEXP (src, 1);
1638       rtx src0 = XEXP (src, 0);
1639       poly_int64 c0;
1640       if (!poly_int_rtx_p (XVECEXP (par, 0, 0), &c0))
1641 	return 0;
1642       poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1643 
1644       for (i = 1; i < XVECLEN (par, 0); i++)
1645 	{
1646 	  poly_int64 c0i;
1647 	  if (!poly_int_rtx_p (XVECEXP (par, 0, i), &c0i)
1648 	      || maybe_ne (c0i, c0 + i))
1649 	    return 0;
1650 	}
1651       return
1652 	REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1653 	&& simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1654 				  offset, GET_MODE (dst)) == (int) REGNO (dst);
1655     }
1656 
1657   return (REG_P (src) && REG_P (dst)
1658 	  && REGNO (src) == REGNO (dst));
1659 }
1660 
1661 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1662    value to itself.  */
1663 
1664 int
noop_move_p(const rtx_insn * insn)1665 noop_move_p (const rtx_insn *insn)
1666 {
1667   rtx pat = PATTERN (insn);
1668 
1669   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1670     return 1;
1671 
1672   /* Insns carrying these notes are useful later on.  */
1673   if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1674     return 0;
1675 
1676   /* Check the code to be executed for COND_EXEC.  */
1677   if (GET_CODE (pat) == COND_EXEC)
1678     pat = COND_EXEC_CODE (pat);
1679 
1680   if (GET_CODE (pat) == SET && set_noop_p (pat))
1681     return 1;
1682 
1683   if (GET_CODE (pat) == PARALLEL)
1684     {
1685       int i;
1686       /* If nothing but SETs of registers to themselves,
1687 	 this insn can also be deleted.  */
1688       for (i = 0; i < XVECLEN (pat, 0); i++)
1689 	{
1690 	  rtx tem = XVECEXP (pat, 0, i);
1691 
1692 	  if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1693 	    continue;
1694 
1695 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1696 	    return 0;
1697 	}
1698 
1699       return 1;
1700     }
1701   return 0;
1702 }
1703 
1704 
1705 /* Return nonzero if register in range [REGNO, ENDREGNO)
1706    appears either explicitly or implicitly in X
1707    other than being stored into.
1708 
1709    References contained within the substructure at LOC do not count.
1710    LOC may be zero, meaning don't ignore anything.  */
1711 
1712 bool
refers_to_regno_p(unsigned int regno,unsigned int endregno,const_rtx x,rtx * loc)1713 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1714 		   rtx *loc)
1715 {
1716   int i;
1717   unsigned int x_regno;
1718   RTX_CODE code;
1719   const char *fmt;
1720 
1721  repeat:
1722   /* The contents of a REG_NONNEG note is always zero, so we must come here
1723      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1724   if (x == 0)
1725     return false;
1726 
1727   code = GET_CODE (x);
1728 
1729   switch (code)
1730     {
1731     case REG:
1732       x_regno = REGNO (x);
1733 
1734       /* If we modifying the stack, frame, or argument pointer, it will
1735 	 clobber a virtual register.  In fact, we could be more precise,
1736 	 but it isn't worth it.  */
1737       if ((x_regno == STACK_POINTER_REGNUM
1738 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1739 	       && x_regno == ARG_POINTER_REGNUM)
1740 	   || x_regno == FRAME_POINTER_REGNUM)
1741 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1742 	return true;
1743 
1744       return endregno > x_regno && regno < END_REGNO (x);
1745 
1746     case SUBREG:
1747       /* If this is a SUBREG of a hard reg, we can see exactly which
1748 	 registers are being modified.  Otherwise, handle normally.  */
1749       if (REG_P (SUBREG_REG (x))
1750 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1751 	{
1752 	  unsigned int inner_regno = subreg_regno (x);
1753 	  unsigned int inner_endregno
1754 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1755 			     ? subreg_nregs (x) : 1);
1756 
1757 	  return endregno > inner_regno && regno < inner_endregno;
1758 	}
1759       break;
1760 
1761     case CLOBBER:
1762     case SET:
1763       if (&SET_DEST (x) != loc
1764 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1765 	     a pseudo but not for hard registers since we can
1766 	     treat each word individually.  */
1767 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1768 	       && loc != &SUBREG_REG (SET_DEST (x))
1769 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1770 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1771 	       && refers_to_regno_p (regno, endregno,
1772 				     SUBREG_REG (SET_DEST (x)), loc))
1773 	      || (!REG_P (SET_DEST (x))
1774 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1775 	return true;
1776 
1777       if (code == CLOBBER || loc == &SET_SRC (x))
1778 	return false;
1779       x = SET_SRC (x);
1780       goto repeat;
1781 
1782     default:
1783       break;
1784     }
1785 
1786   /* X does not match, so try its subexpressions.  */
1787 
1788   fmt = GET_RTX_FORMAT (code);
1789   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1790     {
1791       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1792 	{
1793 	  if (i == 0)
1794 	    {
1795 	      x = XEXP (x, 0);
1796 	      goto repeat;
1797 	    }
1798 	  else
1799 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1800 	      return true;
1801 	}
1802       else if (fmt[i] == 'E')
1803 	{
1804 	  int j;
1805 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1806 	    if (loc != &XVECEXP (x, i, j)
1807 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1808 	      return true;
1809 	}
1810     }
1811   return false;
1812 }
1813 
1814 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1815    we check if any register number in X conflicts with the relevant register
1816    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1817    contains a MEM (we don't bother checking for memory addresses that can't
1818    conflict because we expect this to be a rare case.  */
1819 
1820 int
reg_overlap_mentioned_p(const_rtx x,const_rtx in)1821 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1822 {
1823   unsigned int regno, endregno;
1824 
1825   /* If either argument is a constant, then modifying X cannot
1826      affect IN.  Here we look at IN, we can profitably combine
1827      CONSTANT_P (x) with the switch statement below.  */
1828   if (CONSTANT_P (in))
1829     return 0;
1830 
1831  recurse:
1832   switch (GET_CODE (x))
1833     {
1834     case CLOBBER:
1835     case STRICT_LOW_PART:
1836     case ZERO_EXTRACT:
1837     case SIGN_EXTRACT:
1838       /* Overly conservative.  */
1839       x = XEXP (x, 0);
1840       goto recurse;
1841 
1842     case SUBREG:
1843       regno = REGNO (SUBREG_REG (x));
1844       if (regno < FIRST_PSEUDO_REGISTER)
1845 	regno = subreg_regno (x);
1846       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1847 			  ? subreg_nregs (x) : 1);
1848       goto do_reg;
1849 
1850     case REG:
1851       regno = REGNO (x);
1852       endregno = END_REGNO (x);
1853     do_reg:
1854       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1855 
1856     case MEM:
1857       {
1858 	const char *fmt;
1859 	int i;
1860 
1861 	if (MEM_P (in))
1862 	  return 1;
1863 
1864 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1865 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1866 	  if (fmt[i] == 'e')
1867 	    {
1868 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1869 		return 1;
1870 	    }
1871 	  else if (fmt[i] == 'E')
1872 	    {
1873 	      int j;
1874 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1875 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1876 		  return 1;
1877 	    }
1878 
1879 	return 0;
1880       }
1881 
1882     case SCRATCH:
1883     case PC:
1884     case CC0:
1885       return reg_mentioned_p (x, in);
1886 
1887     case PARALLEL:
1888       {
1889 	int i;
1890 
1891 	/* If any register in here refers to it we return true.  */
1892 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1893 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1894 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1895 	    return 1;
1896 	return 0;
1897       }
1898 
1899     default:
1900       gcc_assert (CONSTANT_P (x));
1901       return 0;
1902     }
1903 }
1904 
1905 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1906    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1907    ignored by note_stores, but passed to FUN.
1908 
1909    FUN receives three arguments:
1910    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1911    2. the SET or CLOBBER rtx that does the store,
1912    3. the pointer DATA provided to note_stores.
1913 
1914   If the item being stored in or clobbered is a SUBREG of a hard register,
1915   the SUBREG will be passed.  */
1916 
1917 void
note_pattern_stores(const_rtx x,void (* fun)(rtx,const_rtx,void *),void * data)1918 note_pattern_stores (const_rtx x,
1919 		     void (*fun) (rtx, const_rtx, void *), void *data)
1920 {
1921   int i;
1922 
1923   if (GET_CODE (x) == COND_EXEC)
1924     x = COND_EXEC_CODE (x);
1925 
1926   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1927     {
1928       rtx dest = SET_DEST (x);
1929 
1930       while ((GET_CODE (dest) == SUBREG
1931 	      && (!REG_P (SUBREG_REG (dest))
1932 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1933 	     || GET_CODE (dest) == ZERO_EXTRACT
1934 	     || GET_CODE (dest) == STRICT_LOW_PART)
1935 	dest = XEXP (dest, 0);
1936 
1937       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1938 	 each of whose first operand is a register.  */
1939       if (GET_CODE (dest) == PARALLEL)
1940 	{
1941 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1942 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1943 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1944 	}
1945       else
1946 	(*fun) (dest, x, data);
1947     }
1948 
1949   else if (GET_CODE (x) == PARALLEL)
1950     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1951       note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1952 }
1953 
1954 /* Same, but for an instruction.  If the instruction is a call, include
1955    any CLOBBERs in its CALL_INSN_FUNCTION_USAGE.  */
1956 
1957 void
note_stores(const rtx_insn * insn,void (* fun)(rtx,const_rtx,void *),void * data)1958 note_stores (const rtx_insn *insn,
1959 	     void (*fun) (rtx, const_rtx, void *), void *data)
1960 {
1961   if (CALL_P (insn))
1962     for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1963 	 link; link = XEXP (link, 1))
1964       if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1965 	note_pattern_stores (XEXP (link, 0), fun, data);
1966   note_pattern_stores (PATTERN (insn), fun, data);
1967 }
1968 
1969 /* Like notes_stores, but call FUN for each expression that is being
1970    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
1971    FUN for each expression, not any interior subexpressions.  FUN receives a
1972    pointer to the expression and the DATA passed to this function.
1973 
1974    Note that this is not quite the same test as that done in reg_referenced_p
1975    since that considers something as being referenced if it is being
1976    partially set, while we do not.  */
1977 
1978 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)1979 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1980 {
1981   rtx body = *pbody;
1982   int i;
1983 
1984   switch (GET_CODE (body))
1985     {
1986     case COND_EXEC:
1987       (*fun) (&COND_EXEC_TEST (body), data);
1988       note_uses (&COND_EXEC_CODE (body), fun, data);
1989       return;
1990 
1991     case PARALLEL:
1992       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1993 	note_uses (&XVECEXP (body, 0, i), fun, data);
1994       return;
1995 
1996     case SEQUENCE:
1997       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1998 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1999       return;
2000 
2001     case USE:
2002       (*fun) (&XEXP (body, 0), data);
2003       return;
2004 
2005     case ASM_OPERANDS:
2006       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2007 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2008       return;
2009 
2010     case TRAP_IF:
2011       (*fun) (&TRAP_CONDITION (body), data);
2012       return;
2013 
2014     case PREFETCH:
2015       (*fun) (&XEXP (body, 0), data);
2016       return;
2017 
2018     case UNSPEC:
2019     case UNSPEC_VOLATILE:
2020       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2021 	(*fun) (&XVECEXP (body, 0, i), data);
2022       return;
2023 
2024     case CLOBBER:
2025       if (MEM_P (XEXP (body, 0)))
2026 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
2027       return;
2028 
2029     case SET:
2030       {
2031 	rtx dest = SET_DEST (body);
2032 
2033 	/* For sets we replace everything in source plus registers in memory
2034 	   expression in store and operands of a ZERO_EXTRACT.  */
2035 	(*fun) (&SET_SRC (body), data);
2036 
2037 	if (GET_CODE (dest) == ZERO_EXTRACT)
2038 	  {
2039 	    (*fun) (&XEXP (dest, 1), data);
2040 	    (*fun) (&XEXP (dest, 2), data);
2041 	  }
2042 
2043 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2044 	  dest = XEXP (dest, 0);
2045 
2046 	if (MEM_P (dest))
2047 	  (*fun) (&XEXP (dest, 0), data);
2048       }
2049       return;
2050 
2051     default:
2052       /* All the other possibilities never store.  */
2053       (*fun) (pbody, data);
2054       return;
2055     }
2056 }
2057 
2058 /* Return nonzero if X's old contents don't survive after INSN.
2059    This will be true if X is (cc0) or if X is a register and
2060    X dies in INSN or because INSN entirely sets X.
2061 
2062    "Entirely set" means set directly and not through a SUBREG, or
2063    ZERO_EXTRACT, so no trace of the old contents remains.
2064    Likewise, REG_INC does not count.
2065 
2066    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2067    but for this use that makes no difference, since regs don't overlap
2068    during their lifetimes.  Therefore, this function may be used
2069    at any time after deaths have been computed.
2070 
2071    If REG is a hard reg that occupies multiple machine registers, this
2072    function will only return 1 if each of those registers will be replaced
2073    by INSN.  */
2074 
2075 int
dead_or_set_p(const rtx_insn * insn,const_rtx x)2076 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2077 {
2078   unsigned int regno, end_regno;
2079   unsigned int i;
2080 
2081   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
2082   if (GET_CODE (x) == CC0)
2083     return 1;
2084 
2085   gcc_assert (REG_P (x));
2086 
2087   regno = REGNO (x);
2088   end_regno = END_REGNO (x);
2089   for (i = regno; i < end_regno; i++)
2090     if (! dead_or_set_regno_p (insn, i))
2091       return 0;
2092 
2093   return 1;
2094 }
2095 
2096 /* Return TRUE iff DEST is a register or subreg of a register, is a
2097    complete rather than read-modify-write destination, and contains
2098    register TEST_REGNO.  */
2099 
2100 static bool
covers_regno_no_parallel_p(const_rtx dest,unsigned int test_regno)2101 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2102 {
2103   unsigned int regno, endregno;
2104 
2105   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2106     dest = SUBREG_REG (dest);
2107 
2108   if (!REG_P (dest))
2109     return false;
2110 
2111   regno = REGNO (dest);
2112   endregno = END_REGNO (dest);
2113   return (test_regno >= regno && test_regno < endregno);
2114 }
2115 
2116 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2117    any member matches the covers_regno_no_parallel_p criteria.  */
2118 
2119 static bool
covers_regno_p(const_rtx dest,unsigned int test_regno)2120 covers_regno_p (const_rtx dest, unsigned int test_regno)
2121 {
2122   if (GET_CODE (dest) == PARALLEL)
2123     {
2124       /* Some targets place small structures in registers for return
2125 	 values of functions, and those registers are wrapped in
2126 	 PARALLELs that we may see as the destination of a SET.  */
2127       int i;
2128 
2129       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2130 	{
2131 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2132 	  if (inner != NULL_RTX
2133 	      && covers_regno_no_parallel_p (inner, test_regno))
2134 	    return true;
2135 	}
2136 
2137       return false;
2138     }
2139   else
2140     return covers_regno_no_parallel_p (dest, test_regno);
2141 }
2142 
2143 /* Utility function for dead_or_set_p to check an individual register. */
2144 
2145 int
dead_or_set_regno_p(const rtx_insn * insn,unsigned int test_regno)2146 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2147 {
2148   const_rtx pattern;
2149 
2150   /* See if there is a death note for something that includes TEST_REGNO.  */
2151   if (find_regno_note (insn, REG_DEAD, test_regno))
2152     return 1;
2153 
2154   if (CALL_P (insn)
2155       && find_regno_fusage (insn, CLOBBER, test_regno))
2156     return 1;
2157 
2158   pattern = PATTERN (insn);
2159 
2160   /* If a COND_EXEC is not executed, the value survives.  */
2161   if (GET_CODE (pattern) == COND_EXEC)
2162     return 0;
2163 
2164   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2165     return covers_regno_p (SET_DEST (pattern), test_regno);
2166   else if (GET_CODE (pattern) == PARALLEL)
2167     {
2168       int i;
2169 
2170       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2171 	{
2172 	  rtx body = XVECEXP (pattern, 0, i);
2173 
2174 	  if (GET_CODE (body) == COND_EXEC)
2175 	    body = COND_EXEC_CODE (body);
2176 
2177 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2178 	      && covers_regno_p (SET_DEST (body), test_regno))
2179 	    return 1;
2180 	}
2181     }
2182 
2183   return 0;
2184 }
2185 
2186 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2187    If DATUM is nonzero, look for one whose datum is DATUM.  */
2188 
2189 rtx
find_reg_note(const_rtx insn,enum reg_note kind,const_rtx datum)2190 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2191 {
2192   rtx link;
2193 
2194   gcc_checking_assert (insn);
2195 
2196   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2197   if (! INSN_P (insn))
2198     return 0;
2199   if (datum == 0)
2200     {
2201       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2202 	if (REG_NOTE_KIND (link) == kind)
2203 	  return link;
2204       return 0;
2205     }
2206 
2207   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2208     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2209       return link;
2210   return 0;
2211 }
2212 
2213 /* Return the reg-note of kind KIND in insn INSN which applies to register
2214    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2215    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2216    it might be the case that the note overlaps REGNO.  */
2217 
2218 rtx
find_regno_note(const_rtx insn,enum reg_note kind,unsigned int regno)2219 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2220 {
2221   rtx link;
2222 
2223   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2224   if (! INSN_P (insn))
2225     return 0;
2226 
2227   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2228     if (REG_NOTE_KIND (link) == kind
2229 	/* Verify that it is a register, so that scratch and MEM won't cause a
2230 	   problem here.  */
2231 	&& REG_P (XEXP (link, 0))
2232 	&& REGNO (XEXP (link, 0)) <= regno
2233 	&& END_REGNO (XEXP (link, 0)) > regno)
2234       return link;
2235   return 0;
2236 }
2237 
2238 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2239    has such a note.  */
2240 
2241 rtx
find_reg_equal_equiv_note(const_rtx insn)2242 find_reg_equal_equiv_note (const_rtx insn)
2243 {
2244   rtx link;
2245 
2246   if (!INSN_P (insn))
2247     return 0;
2248 
2249   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2250     if (REG_NOTE_KIND (link) == REG_EQUAL
2251 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2252       {
2253 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2254 	   insns that have multiple sets.  Checking single_set to
2255 	   make sure of this is not the proper check, as explained
2256 	   in the comment in set_unique_reg_note.
2257 
2258 	   This should be changed into an assert.  */
2259 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2260 	  return 0;
2261 	return link;
2262       }
2263   return NULL;
2264 }
2265 
2266 /* Check whether INSN is a single_set whose source is known to be
2267    equivalent to a constant.  Return that constant if so, otherwise
2268    return null.  */
2269 
2270 rtx
find_constant_src(const rtx_insn * insn)2271 find_constant_src (const rtx_insn *insn)
2272 {
2273   rtx note, set, x;
2274 
2275   set = single_set (insn);
2276   if (set)
2277     {
2278       x = avoid_constant_pool_reference (SET_SRC (set));
2279       if (CONSTANT_P (x))
2280 	return x;
2281     }
2282 
2283   note = find_reg_equal_equiv_note (insn);
2284   if (note && CONSTANT_P (XEXP (note, 0)))
2285     return XEXP (note, 0);
2286 
2287   return NULL_RTX;
2288 }
2289 
2290 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2291    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2292 
2293 int
find_reg_fusage(const_rtx insn,enum rtx_code code,const_rtx datum)2294 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2295 {
2296   /* If it's not a CALL_INSN, it can't possibly have a
2297      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2298   if (!CALL_P (insn))
2299     return 0;
2300 
2301   gcc_assert (datum);
2302 
2303   if (!REG_P (datum))
2304     {
2305       rtx link;
2306 
2307       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2308 	   link;
2309 	   link = XEXP (link, 1))
2310 	if (GET_CODE (XEXP (link, 0)) == code
2311 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2312 	  return 1;
2313     }
2314   else
2315     {
2316       unsigned int regno = REGNO (datum);
2317 
2318       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2319 	 to pseudo registers, so don't bother checking.  */
2320 
2321       if (regno < FIRST_PSEUDO_REGISTER)
2322 	{
2323 	  unsigned int end_regno = END_REGNO (datum);
2324 	  unsigned int i;
2325 
2326 	  for (i = regno; i < end_regno; i++)
2327 	    if (find_regno_fusage (insn, code, i))
2328 	      return 1;
2329 	}
2330     }
2331 
2332   return 0;
2333 }
2334 
2335 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2336    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2337 
2338 int
find_regno_fusage(const_rtx insn,enum rtx_code code,unsigned int regno)2339 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2340 {
2341   rtx link;
2342 
2343   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2344      to pseudo registers, so don't bother checking.  */
2345 
2346   if (regno >= FIRST_PSEUDO_REGISTER
2347       || !CALL_P (insn) )
2348     return 0;
2349 
2350   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2351     {
2352       rtx op, reg;
2353 
2354       if (GET_CODE (op = XEXP (link, 0)) == code
2355 	  && REG_P (reg = XEXP (op, 0))
2356 	  && REGNO (reg) <= regno
2357 	  && END_REGNO (reg) > regno)
2358 	return 1;
2359     }
2360 
2361   return 0;
2362 }
2363 
2364 
2365 /* Return true if KIND is an integer REG_NOTE.  */
2366 
2367 static bool
int_reg_note_p(enum reg_note kind)2368 int_reg_note_p (enum reg_note kind)
2369 {
2370   return kind == REG_BR_PROB;
2371 }
2372 
2373 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2374    stored as the pointer to the next register note.  */
2375 
2376 rtx
alloc_reg_note(enum reg_note kind,rtx datum,rtx list)2377 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2378 {
2379   rtx note;
2380 
2381   gcc_checking_assert (!int_reg_note_p (kind));
2382   switch (kind)
2383     {
2384     case REG_CC_SETTER:
2385     case REG_CC_USER:
2386     case REG_LABEL_TARGET:
2387     case REG_LABEL_OPERAND:
2388     case REG_TM:
2389       /* These types of register notes use an INSN_LIST rather than an
2390 	 EXPR_LIST, so that copying is done right and dumps look
2391 	 better.  */
2392       note = alloc_INSN_LIST (datum, list);
2393       PUT_REG_NOTE_KIND (note, kind);
2394       break;
2395 
2396     default:
2397       note = alloc_EXPR_LIST (kind, datum, list);
2398       break;
2399     }
2400 
2401   return note;
2402 }
2403 
2404 /* Add register note with kind KIND and datum DATUM to INSN.  */
2405 
2406 void
add_reg_note(rtx insn,enum reg_note kind,rtx datum)2407 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2408 {
2409   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2410 }
2411 
2412 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2413 
2414 void
add_int_reg_note(rtx_insn * insn,enum reg_note kind,int datum)2415 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2416 {
2417   gcc_checking_assert (int_reg_note_p (kind));
2418   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2419 				       datum, REG_NOTES (insn));
2420 }
2421 
2422 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2423 
2424 void
add_args_size_note(rtx_insn * insn,poly_int64 value)2425 add_args_size_note (rtx_insn *insn, poly_int64 value)
2426 {
2427   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2428   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2429 }
2430 
2431 /* Add a register note like NOTE to INSN.  */
2432 
2433 void
add_shallow_copy_of_reg_note(rtx_insn * insn,rtx note)2434 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2435 {
2436   if (GET_CODE (note) == INT_LIST)
2437     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2438   else
2439     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2440 }
2441 
2442 /* Duplicate NOTE and return the copy.  */
2443 rtx
duplicate_reg_note(rtx note)2444 duplicate_reg_note (rtx note)
2445 {
2446   reg_note kind = REG_NOTE_KIND (note);
2447 
2448   if (GET_CODE (note) == INT_LIST)
2449     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2450   else if (GET_CODE (note) == EXPR_LIST)
2451     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2452   else
2453     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2454 }
2455 
2456 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2457 
2458 void
remove_note(rtx_insn * insn,const_rtx note)2459 remove_note (rtx_insn *insn, const_rtx note)
2460 {
2461   rtx link;
2462 
2463   if (note == NULL_RTX)
2464     return;
2465 
2466   if (REG_NOTES (insn) == note)
2467     REG_NOTES (insn) = XEXP (note, 1);
2468   else
2469     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2470       if (XEXP (link, 1) == note)
2471 	{
2472 	  XEXP (link, 1) = XEXP (note, 1);
2473 	  break;
2474 	}
2475 
2476   switch (REG_NOTE_KIND (note))
2477     {
2478     case REG_EQUAL:
2479     case REG_EQUIV:
2480       df_notes_rescan (insn);
2481       break;
2482     default:
2483       break;
2484     }
2485 }
2486 
2487 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2488    Return true if any note has been removed.  */
2489 
2490 bool
remove_reg_equal_equiv_notes(rtx_insn * insn)2491 remove_reg_equal_equiv_notes (rtx_insn *insn)
2492 {
2493   rtx *loc;
2494   bool ret = false;
2495 
2496   loc = &REG_NOTES (insn);
2497   while (*loc)
2498     {
2499       enum reg_note kind = REG_NOTE_KIND (*loc);
2500       if (kind == REG_EQUAL || kind == REG_EQUIV)
2501 	{
2502 	  *loc = XEXP (*loc, 1);
2503 	  ret = true;
2504 	}
2505       else
2506 	loc = &XEXP (*loc, 1);
2507     }
2508   return ret;
2509 }
2510 
2511 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2512 
2513 void
remove_reg_equal_equiv_notes_for_regno(unsigned int regno)2514 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2515 {
2516   df_ref eq_use;
2517 
2518   if (!df)
2519     return;
2520 
2521   /* This loop is a little tricky.  We cannot just go down the chain because
2522      it is being modified by some actions in the loop.  So we just iterate
2523      over the head.  We plan to drain the list anyway.  */
2524   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2525     {
2526       rtx_insn *insn = DF_REF_INSN (eq_use);
2527       rtx note = find_reg_equal_equiv_note (insn);
2528 
2529       /* This assert is generally triggered when someone deletes a REG_EQUAL
2530 	 or REG_EQUIV note by hacking the list manually rather than calling
2531 	 remove_note.  */
2532       gcc_assert (note);
2533 
2534       remove_note (insn, note);
2535     }
2536 }
2537 
2538 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2539    return 1 if it is found.  A simple equality test is used to determine if
2540    NODE matches.  */
2541 
2542 bool
in_insn_list_p(const rtx_insn_list * listp,const rtx_insn * node)2543 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2544 {
2545   const_rtx x;
2546 
2547   for (x = listp; x; x = XEXP (x, 1))
2548     if (node == XEXP (x, 0))
2549       return true;
2550 
2551   return false;
2552 }
2553 
2554 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2555    remove that entry from the list if it is found.
2556 
2557    A simple equality test is used to determine if NODE matches.  */
2558 
2559 void
remove_node_from_expr_list(const_rtx node,rtx_expr_list ** listp)2560 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2561 {
2562   rtx_expr_list *temp = *listp;
2563   rtx_expr_list *prev = NULL;
2564 
2565   while (temp)
2566     {
2567       if (node == temp->element ())
2568 	{
2569 	  /* Splice the node out of the list.  */
2570 	  if (prev)
2571 	    XEXP (prev, 1) = temp->next ();
2572 	  else
2573 	    *listp = temp->next ();
2574 
2575 	  return;
2576 	}
2577 
2578       prev = temp;
2579       temp = temp->next ();
2580     }
2581 }
2582 
2583 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2584    remove that entry from the list if it is found.
2585 
2586    A simple equality test is used to determine if NODE matches.  */
2587 
2588 void
remove_node_from_insn_list(const rtx_insn * node,rtx_insn_list ** listp)2589 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2590 {
2591   rtx_insn_list *temp = *listp;
2592   rtx_insn_list *prev = NULL;
2593 
2594   while (temp)
2595     {
2596       if (node == temp->insn ())
2597 	{
2598 	  /* Splice the node out of the list.  */
2599 	  if (prev)
2600 	    XEXP (prev, 1) = temp->next ();
2601 	  else
2602 	    *listp = temp->next ();
2603 
2604 	  return;
2605 	}
2606 
2607       prev = temp;
2608       temp = temp->next ();
2609     }
2610 }
2611 
2612 /* Nonzero if X contains any volatile instructions.  These are instructions
2613    which may cause unpredictable machine state instructions, and thus no
2614    instructions or register uses should be moved or combined across them.
2615    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2616 
2617 int
volatile_insn_p(const_rtx x)2618 volatile_insn_p (const_rtx x)
2619 {
2620   const RTX_CODE code = GET_CODE (x);
2621   switch (code)
2622     {
2623     case LABEL_REF:
2624     case SYMBOL_REF:
2625     case CONST:
2626     CASE_CONST_ANY:
2627     case CC0:
2628     case PC:
2629     case REG:
2630     case SCRATCH:
2631     case CLOBBER:
2632     case ADDR_VEC:
2633     case ADDR_DIFF_VEC:
2634     case CALL:
2635     case MEM:
2636       return 0;
2637 
2638     case UNSPEC_VOLATILE:
2639       return 1;
2640 
2641     case ASM_INPUT:
2642     case ASM_OPERANDS:
2643       if (MEM_VOLATILE_P (x))
2644 	return 1;
2645 
2646     default:
2647       break;
2648     }
2649 
2650   /* Recursively scan the operands of this expression.  */
2651 
2652   {
2653     const char *const fmt = GET_RTX_FORMAT (code);
2654     int i;
2655 
2656     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2657       {
2658 	if (fmt[i] == 'e')
2659 	  {
2660 	    if (volatile_insn_p (XEXP (x, i)))
2661 	      return 1;
2662 	  }
2663 	else if (fmt[i] == 'E')
2664 	  {
2665 	    int j;
2666 	    for (j = 0; j < XVECLEN (x, i); j++)
2667 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2668 		return 1;
2669 	  }
2670       }
2671   }
2672   return 0;
2673 }
2674 
2675 /* Nonzero if X contains any volatile memory references
2676    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
2677 
2678 int
volatile_refs_p(const_rtx x)2679 volatile_refs_p (const_rtx x)
2680 {
2681   const RTX_CODE code = GET_CODE (x);
2682   switch (code)
2683     {
2684     case LABEL_REF:
2685     case SYMBOL_REF:
2686     case CONST:
2687     CASE_CONST_ANY:
2688     case CC0:
2689     case PC:
2690     case REG:
2691     case SCRATCH:
2692     case CLOBBER:
2693     case ADDR_VEC:
2694     case ADDR_DIFF_VEC:
2695       return 0;
2696 
2697     case UNSPEC_VOLATILE:
2698       return 1;
2699 
2700     case MEM:
2701     case ASM_INPUT:
2702     case ASM_OPERANDS:
2703       if (MEM_VOLATILE_P (x))
2704 	return 1;
2705 
2706     default:
2707       break;
2708     }
2709 
2710   /* Recursively scan the operands of this expression.  */
2711 
2712   {
2713     const char *const fmt = GET_RTX_FORMAT (code);
2714     int i;
2715 
2716     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2717       {
2718 	if (fmt[i] == 'e')
2719 	  {
2720 	    if (volatile_refs_p (XEXP (x, i)))
2721 	      return 1;
2722 	  }
2723 	else if (fmt[i] == 'E')
2724 	  {
2725 	    int j;
2726 	    for (j = 0; j < XVECLEN (x, i); j++)
2727 	      if (volatile_refs_p (XVECEXP (x, i, j)))
2728 		return 1;
2729 	  }
2730       }
2731   }
2732   return 0;
2733 }
2734 
2735 /* Similar to above, except that it also rejects register pre- and post-
2736    incrementing.  */
2737 
2738 int
side_effects_p(const_rtx x)2739 side_effects_p (const_rtx x)
2740 {
2741   const RTX_CODE code = GET_CODE (x);
2742   switch (code)
2743     {
2744     case LABEL_REF:
2745     case SYMBOL_REF:
2746     case CONST:
2747     CASE_CONST_ANY:
2748     case CC0:
2749     case PC:
2750     case REG:
2751     case SCRATCH:
2752     case ADDR_VEC:
2753     case ADDR_DIFF_VEC:
2754     case VAR_LOCATION:
2755       return 0;
2756 
2757     case CLOBBER:
2758       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
2759 	 when some combination can't be done.  If we see one, don't think
2760 	 that we can simplify the expression.  */
2761       return (GET_MODE (x) != VOIDmode);
2762 
2763     case PRE_INC:
2764     case PRE_DEC:
2765     case POST_INC:
2766     case POST_DEC:
2767     case PRE_MODIFY:
2768     case POST_MODIFY:
2769     case CALL:
2770     case UNSPEC_VOLATILE:
2771       return 1;
2772 
2773     case MEM:
2774     case ASM_INPUT:
2775     case ASM_OPERANDS:
2776       if (MEM_VOLATILE_P (x))
2777 	return 1;
2778 
2779     default:
2780       break;
2781     }
2782 
2783   /* Recursively scan the operands of this expression.  */
2784 
2785   {
2786     const char *fmt = GET_RTX_FORMAT (code);
2787     int i;
2788 
2789     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2790       {
2791 	if (fmt[i] == 'e')
2792 	  {
2793 	    if (side_effects_p (XEXP (x, i)))
2794 	      return 1;
2795 	  }
2796 	else if (fmt[i] == 'E')
2797 	  {
2798 	    int j;
2799 	    for (j = 0; j < XVECLEN (x, i); j++)
2800 	      if (side_effects_p (XVECEXP (x, i, j)))
2801 		return 1;
2802 	  }
2803       }
2804   }
2805   return 0;
2806 }
2807 
2808 /* Return nonzero if evaluating rtx X might cause a trap.
2809    FLAGS controls how to consider MEMs.  A nonzero means the context
2810    of the access may have changed from the original, such that the
2811    address may have become invalid.  */
2812 
2813 int
may_trap_p_1(const_rtx x,unsigned flags)2814 may_trap_p_1 (const_rtx x, unsigned flags)
2815 {
2816   int i;
2817   enum rtx_code code;
2818   const char *fmt;
2819 
2820   /* We make no distinction currently, but this function is part of
2821      the internal target-hooks ABI so we keep the parameter as
2822      "unsigned flags".  */
2823   bool code_changed = flags != 0;
2824 
2825   if (x == 0)
2826     return 0;
2827   code = GET_CODE (x);
2828   switch (code)
2829     {
2830       /* Handle these cases quickly.  */
2831     CASE_CONST_ANY:
2832     case SYMBOL_REF:
2833     case LABEL_REF:
2834     case CONST:
2835     case PC:
2836     case CC0:
2837     case REG:
2838     case SCRATCH:
2839       return 0;
2840 
2841     case UNSPEC:
2842       return targetm.unspec_may_trap_p (x, flags);
2843 
2844     case UNSPEC_VOLATILE:
2845     case ASM_INPUT:
2846     case TRAP_IF:
2847       return 1;
2848 
2849     case ASM_OPERANDS:
2850       return MEM_VOLATILE_P (x);
2851 
2852       /* Memory ref can trap unless it's a static var or a stack slot.  */
2853     case MEM:
2854       /* Recognize specific pattern of stack checking probes.  */
2855       if (flag_stack_check
2856 	  && MEM_VOLATILE_P (x)
2857 	  && XEXP (x, 0) == stack_pointer_rtx)
2858 	return 1;
2859       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2860 	     reference; moving it out of context such as when moving code
2861 	     when optimizing, might cause its address to become invalid.  */
2862 	  code_changed
2863 	  || !MEM_NOTRAP_P (x))
2864 	{
2865 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2866 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2867 					GET_MODE (x), code_changed);
2868 	}
2869 
2870       return 0;
2871 
2872       /* Division by a non-constant might trap.  */
2873     case DIV:
2874     case MOD:
2875     case UDIV:
2876     case UMOD:
2877       if (HONOR_SNANS (x))
2878 	return 1;
2879       if (FLOAT_MODE_P (GET_MODE (x)))
2880 	return flag_trapping_math;
2881       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2882 	return 1;
2883       if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
2884 	{
2885 	  /* For CONST_VECTOR, return 1 if any element is or might be zero.  */
2886 	  unsigned int n_elts;
2887 	  rtx op = XEXP (x, 1);
2888 	  if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
2889 	    {
2890 	      if (!CONST_VECTOR_DUPLICATE_P (op))
2891 		return 1;
2892 	      for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
2893 		if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
2894 		  return 1;
2895 	    }
2896 	  else
2897 	    for (unsigned i = 0; i < n_elts; i++)
2898 	      if (CONST_VECTOR_ELT (op, i) == const0_rtx)
2899 		return 1;
2900 	}
2901       break;
2902 
2903     case EXPR_LIST:
2904       /* An EXPR_LIST is used to represent a function call.  This
2905 	 certainly may trap.  */
2906       return 1;
2907 
2908     case GE:
2909     case GT:
2910     case LE:
2911     case LT:
2912     case LTGT:
2913     case COMPARE:
2914       /* Some floating point comparisons may trap.  */
2915       if (!flag_trapping_math)
2916 	break;
2917       /* ??? There is no machine independent way to check for tests that trap
2918 	 when COMPARE is used, though many targets do make this distinction.
2919 	 For instance, sparc uses CCFPE for compares which generate exceptions
2920 	 and CCFP for compares which do not generate exceptions.  */
2921       if (HONOR_NANS (x))
2922 	return 1;
2923       /* But often the compare has some CC mode, so check operand
2924 	 modes as well.  */
2925       if (HONOR_NANS (XEXP (x, 0))
2926 	  || HONOR_NANS (XEXP (x, 1)))
2927 	return 1;
2928       break;
2929 
2930     case EQ:
2931     case NE:
2932       if (HONOR_SNANS (x))
2933 	return 1;
2934       /* Often comparison is CC mode, so check operand modes.  */
2935       if (HONOR_SNANS (XEXP (x, 0))
2936 	  || HONOR_SNANS (XEXP (x, 1)))
2937 	return 1;
2938       break;
2939 
2940     case FIX:
2941       /* Conversion of floating point might trap.  */
2942       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2943 	return 1;
2944       break;
2945 
2946     case NEG:
2947     case ABS:
2948     case SUBREG:
2949     case VEC_MERGE:
2950     case VEC_SELECT:
2951     case VEC_CONCAT:
2952     case VEC_DUPLICATE:
2953       /* These operations don't trap even with floating point.  */
2954       break;
2955 
2956     case SIGN_EXTRACT:
2957       if (targetm.have_extv ())
2958 	return targetm.bitfield_may_trap_p (x, flags);
2959       break;
2960     case ZERO_EXTRACT:
2961       if (targetm.have_extzv ())
2962 	return targetm.bitfield_may_trap_p (x, flags);
2963       break;
2964 
2965     default:
2966       /* Any floating arithmetic may trap.  */
2967       if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2968 	return 1;
2969     }
2970 
2971   fmt = GET_RTX_FORMAT (code);
2972   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2973     {
2974       if (fmt[i] == 'e')
2975 	{
2976 	  if (may_trap_p_1 (XEXP (x, i), flags))
2977 	    return 1;
2978 	}
2979       else if (fmt[i] == 'E')
2980 	{
2981 	  int j;
2982 	  for (j = 0; j < XVECLEN (x, i); j++)
2983 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2984 	      return 1;
2985 	}
2986     }
2987   return 0;
2988 }
2989 
2990 /* Return nonzero if evaluating rtx X might cause a trap.  */
2991 
2992 int
may_trap_p(const_rtx x)2993 may_trap_p (const_rtx x)
2994 {
2995   return may_trap_p_1 (x, 0);
2996 }
2997 
2998 /* Same as above, but additionally return nonzero if evaluating rtx X might
2999    cause a fault.  We define a fault for the purpose of this function as a
3000    erroneous execution condition that cannot be encountered during the normal
3001    execution of a valid program; the typical example is an unaligned memory
3002    access on a strict alignment machine.  The compiler guarantees that it
3003    doesn't generate code that will fault from a valid program, but this
3004    guarantee doesn't mean anything for individual instructions.  Consider
3005    the following example:
3006 
3007       struct S { int d; union { char *cp; int *ip; }; };
3008 
3009       int foo(struct S *s)
3010       {
3011 	if (s->d == 1)
3012 	  return *s->ip;
3013 	else
3014 	  return *s->cp;
3015       }
3016 
3017    on a strict alignment machine.  In a valid program, foo will never be
3018    invoked on a structure for which d is equal to 1 and the underlying
3019    unique field of the union not aligned on a 4-byte boundary, but the
3020    expression *s->ip might cause a fault if considered individually.
3021 
3022    At the RTL level, potentially problematic expressions will almost always
3023    verify may_trap_p; for example, the above dereference can be emitted as
3024    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3025    However, suppose that foo is inlined in a caller that causes s->cp to
3026    point to a local character variable and guarantees that s->d is not set
3027    to 1; foo may have been effectively translated into pseudo-RTL as:
3028 
3029       if ((reg:SI) == 1)
3030 	(set (reg:SI) (mem:SI (%fp - 7)))
3031       else
3032 	(set (reg:QI) (mem:QI (%fp - 7)))
3033 
3034    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3035    memory reference to a stack slot, but it will certainly cause a fault
3036    on a strict alignment machine.  */
3037 
3038 int
may_trap_or_fault_p(const_rtx x)3039 may_trap_or_fault_p (const_rtx x)
3040 {
3041   return may_trap_p_1 (x, 1);
3042 }
3043 
3044 /* Replace any occurrence of FROM in X with TO.  The function does
3045    not enter into CONST_DOUBLE for the replace.
3046 
3047    Note that copying is not done so X must not be shared unless all copies
3048    are to be modified.
3049 
3050    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3051    those pointer-equal ones.  */
3052 
3053 rtx
replace_rtx(rtx x,rtx from,rtx to,bool all_regs)3054 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3055 {
3056   int i, j;
3057   const char *fmt;
3058 
3059   if (x == from)
3060     return to;
3061 
3062   /* Allow this function to make replacements in EXPR_LISTs.  */
3063   if (x == 0)
3064     return 0;
3065 
3066   if (all_regs
3067       && REG_P (x)
3068       && REG_P (from)
3069       && REGNO (x) == REGNO (from))
3070     {
3071       gcc_assert (GET_MODE (x) == GET_MODE (from));
3072       return to;
3073     }
3074   else if (GET_CODE (x) == SUBREG)
3075     {
3076       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3077 
3078       if (CONST_SCALAR_INT_P (new_rtx))
3079 	{
3080 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3081 			       GET_MODE (SUBREG_REG (x)),
3082 			       SUBREG_BYTE (x));
3083 	  gcc_assert (x);
3084 	}
3085       else
3086 	SUBREG_REG (x) = new_rtx;
3087 
3088       return x;
3089     }
3090   else if (GET_CODE (x) == ZERO_EXTEND)
3091     {
3092       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3093 
3094       if (CONST_SCALAR_INT_P (new_rtx))
3095 	{
3096 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3097 					new_rtx, GET_MODE (XEXP (x, 0)));
3098 	  gcc_assert (x);
3099 	}
3100       else
3101 	XEXP (x, 0) = new_rtx;
3102 
3103       return x;
3104     }
3105 
3106   fmt = GET_RTX_FORMAT (GET_CODE (x));
3107   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3108     {
3109       if (fmt[i] == 'e')
3110 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3111       else if (fmt[i] == 'E')
3112 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3113 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3114 					   from, to, all_regs);
3115     }
3116 
3117   return x;
3118 }
3119 
3120 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3121    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3122 
3123 void
replace_label(rtx * loc,rtx old_label,rtx new_label,bool update_label_nuses)3124 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3125 {
3126   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3127   rtx x = *loc;
3128   if (JUMP_TABLE_DATA_P (x))
3129     {
3130       x = PATTERN (x);
3131       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3132       int len = GET_NUM_ELEM (vec);
3133       for (int i = 0; i < len; ++i)
3134 	{
3135 	  rtx ref = RTVEC_ELT (vec, i);
3136 	  if (XEXP (ref, 0) == old_label)
3137 	    {
3138 	      XEXP (ref, 0) = new_label;
3139 	      if (update_label_nuses)
3140 		{
3141 		  ++LABEL_NUSES (new_label);
3142 		  --LABEL_NUSES (old_label);
3143 		}
3144 	    }
3145 	}
3146       return;
3147     }
3148 
3149   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3150      field.  This is not handled by the iterator because it doesn't
3151      handle unprinted ('0') fields.  */
3152   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3153     JUMP_LABEL (x) = new_label;
3154 
3155   subrtx_ptr_iterator::array_type array;
3156   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3157     {
3158       rtx *loc = *iter;
3159       if (rtx x = *loc)
3160 	{
3161 	  if (GET_CODE (x) == SYMBOL_REF
3162 	      && CONSTANT_POOL_ADDRESS_P (x))
3163 	    {
3164 	      rtx c = get_pool_constant (x);
3165 	      if (rtx_referenced_p (old_label, c))
3166 		{
3167 		  /* Create a copy of constant C; replace the label inside
3168 		     but do not update LABEL_NUSES because uses in constant pool
3169 		     are not counted.  */
3170 		  rtx new_c = copy_rtx (c);
3171 		  replace_label (&new_c, old_label, new_label, false);
3172 
3173 		  /* Add the new constant NEW_C to constant pool and replace
3174 		     the old reference to constant by new reference.  */
3175 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3176 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3177 		}
3178 	    }
3179 
3180 	  if ((GET_CODE (x) == LABEL_REF
3181 	       || GET_CODE (x) == INSN_LIST)
3182 	      && XEXP (x, 0) == old_label)
3183 	    {
3184 	      XEXP (x, 0) = new_label;
3185 	      if (update_label_nuses)
3186 		{
3187 		  ++LABEL_NUSES (new_label);
3188 		  --LABEL_NUSES (old_label);
3189 		}
3190 	    }
3191 	}
3192     }
3193 }
3194 
3195 void
replace_label_in_insn(rtx_insn * insn,rtx_insn * old_label,rtx_insn * new_label,bool update_label_nuses)3196 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3197 		       rtx_insn *new_label, bool update_label_nuses)
3198 {
3199   rtx insn_as_rtx = insn;
3200   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3201   gcc_checking_assert (insn_as_rtx == insn);
3202 }
3203 
3204 /* Return true if X is referenced in BODY.  */
3205 
3206 bool
rtx_referenced_p(const_rtx x,const_rtx body)3207 rtx_referenced_p (const_rtx x, const_rtx body)
3208 {
3209   subrtx_iterator::array_type array;
3210   FOR_EACH_SUBRTX (iter, array, body, ALL)
3211     if (const_rtx y = *iter)
3212       {
3213 	/* Check if a label_ref Y refers to label X.  */
3214 	if (GET_CODE (y) == LABEL_REF
3215 	    && LABEL_P (x)
3216 	    && label_ref_label (y) == x)
3217 	  return true;
3218 
3219 	if (rtx_equal_p (x, y))
3220 	  return true;
3221 
3222 	/* If Y is a reference to pool constant traverse the constant.  */
3223 	if (GET_CODE (y) == SYMBOL_REF
3224 	    && CONSTANT_POOL_ADDRESS_P (y))
3225 	  iter.substitute (get_pool_constant (y));
3226       }
3227   return false;
3228 }
3229 
3230 /* If INSN is a tablejump return true and store the label (before jump table) to
3231    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3232 
3233 bool
tablejump_p(const rtx_insn * insn,rtx_insn ** labelp,rtx_jump_table_data ** tablep)3234 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3235 	     rtx_jump_table_data **tablep)
3236 {
3237   if (!JUMP_P (insn))
3238     return false;
3239 
3240   rtx target = JUMP_LABEL (insn);
3241   if (target == NULL_RTX || ANY_RETURN_P (target))
3242     return false;
3243 
3244   rtx_insn *label = as_a<rtx_insn *> (target);
3245   rtx_insn *table = next_insn (label);
3246   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3247     return false;
3248 
3249   if (labelp)
3250     *labelp = label;
3251   if (tablep)
3252     *tablep = as_a <rtx_jump_table_data *> (table);
3253   return true;
3254 }
3255 
3256 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3257    CASESI.  Return the insn pattern if so, NULL_RTX otherwise.  */
3258 
3259 rtx
tablejump_casesi_pattern(const rtx_insn * insn)3260 tablejump_casesi_pattern (const rtx_insn *insn)
3261 {
3262   rtx tmp;
3263 
3264   if ((tmp = single_set (insn)) != NULL
3265       && SET_DEST (tmp) == pc_rtx
3266       && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3267       && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3268     return tmp;
3269 
3270   return NULL_RTX;
3271 }
3272 
3273 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3274    constant that is not in the constant pool and not in the condition
3275    of an IF_THEN_ELSE.  */
3276 
3277 static int
computed_jump_p_1(const_rtx x)3278 computed_jump_p_1 (const_rtx x)
3279 {
3280   const enum rtx_code code = GET_CODE (x);
3281   int i, j;
3282   const char *fmt;
3283 
3284   switch (code)
3285     {
3286     case LABEL_REF:
3287     case PC:
3288       return 0;
3289 
3290     case CONST:
3291     CASE_CONST_ANY:
3292     case SYMBOL_REF:
3293     case REG:
3294       return 1;
3295 
3296     case MEM:
3297       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3298 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3299 
3300     case IF_THEN_ELSE:
3301       return (computed_jump_p_1 (XEXP (x, 1))
3302 	      || computed_jump_p_1 (XEXP (x, 2)));
3303 
3304     default:
3305       break;
3306     }
3307 
3308   fmt = GET_RTX_FORMAT (code);
3309   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3310     {
3311       if (fmt[i] == 'e'
3312 	  && computed_jump_p_1 (XEXP (x, i)))
3313 	return 1;
3314 
3315       else if (fmt[i] == 'E')
3316 	for (j = 0; j < XVECLEN (x, i); j++)
3317 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3318 	    return 1;
3319     }
3320 
3321   return 0;
3322 }
3323 
3324 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3325 
3326    Tablejumps and casesi insns are not considered indirect jumps;
3327    we can recognize them by a (use (label_ref)).  */
3328 
3329 int
computed_jump_p(const rtx_insn * insn)3330 computed_jump_p (const rtx_insn *insn)
3331 {
3332   int i;
3333   if (JUMP_P (insn))
3334     {
3335       rtx pat = PATTERN (insn);
3336 
3337       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3338       if (JUMP_LABEL (insn) != NULL)
3339 	return 0;
3340 
3341       if (GET_CODE (pat) == PARALLEL)
3342 	{
3343 	  int len = XVECLEN (pat, 0);
3344 	  int has_use_labelref = 0;
3345 
3346 	  for (i = len - 1; i >= 0; i--)
3347 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3348 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3349 		    == LABEL_REF))
3350 	      {
3351 	        has_use_labelref = 1;
3352 	        break;
3353 	      }
3354 
3355 	  if (! has_use_labelref)
3356 	    for (i = len - 1; i >= 0; i--)
3357 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3358 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3359 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3360 		return 1;
3361 	}
3362       else if (GET_CODE (pat) == SET
3363 	       && SET_DEST (pat) == pc_rtx
3364 	       && computed_jump_p_1 (SET_SRC (pat)))
3365 	return 1;
3366     }
3367   return 0;
3368 }
3369 
3370 
3371 
3372 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3373    the equivalent add insn and pass the result to FN, using DATA as the
3374    final argument.  */
3375 
3376 static int
for_each_inc_dec_find_inc_dec(rtx mem,for_each_inc_dec_fn fn,void * data)3377 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3378 {
3379   rtx x = XEXP (mem, 0);
3380   switch (GET_CODE (x))
3381     {
3382     case PRE_INC:
3383     case POST_INC:
3384       {
3385 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3386 	rtx r1 = XEXP (x, 0);
3387 	rtx c = gen_int_mode (size, GET_MODE (r1));
3388 	return fn (mem, x, r1, r1, c, data);
3389       }
3390 
3391     case PRE_DEC:
3392     case POST_DEC:
3393       {
3394 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3395 	rtx r1 = XEXP (x, 0);
3396 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3397 	return fn (mem, x, r1, r1, c, data);
3398       }
3399 
3400     case PRE_MODIFY:
3401     case POST_MODIFY:
3402       {
3403 	rtx r1 = XEXP (x, 0);
3404 	rtx add = XEXP (x, 1);
3405 	return fn (mem, x, r1, add, NULL, data);
3406       }
3407 
3408     default:
3409       gcc_unreachable ();
3410     }
3411 }
3412 
3413 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3414    For each such autoinc operation found, call FN, passing it
3415    the innermost enclosing MEM, the operation itself, the RTX modified
3416    by the operation, two RTXs (the second may be NULL) that, once
3417    added, represent the value to be held by the modified RTX
3418    afterwards, and DATA.  FN is to return 0 to continue the
3419    traversal or any other value to have it returned to the caller of
3420    for_each_inc_dec.  */
3421 
3422 int
for_each_inc_dec(rtx x,for_each_inc_dec_fn fn,void * data)3423 for_each_inc_dec (rtx x,
3424 		  for_each_inc_dec_fn fn,
3425 		  void *data)
3426 {
3427   subrtx_var_iterator::array_type array;
3428   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3429     {
3430       rtx mem = *iter;
3431       if (mem
3432 	  && MEM_P (mem)
3433 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3434 	{
3435 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3436 	  if (res != 0)
3437 	    return res;
3438 	  iter.skip_subrtxes ();
3439 	}
3440     }
3441   return 0;
3442 }
3443 
3444 
3445 /* Searches X for any reference to REGNO, returning the rtx of the
3446    reference found if any.  Otherwise, returns NULL_RTX.  */
3447 
3448 rtx
regno_use_in(unsigned int regno,rtx x)3449 regno_use_in (unsigned int regno, rtx x)
3450 {
3451   const char *fmt;
3452   int i, j;
3453   rtx tem;
3454 
3455   if (REG_P (x) && REGNO (x) == regno)
3456     return x;
3457 
3458   fmt = GET_RTX_FORMAT (GET_CODE (x));
3459   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3460     {
3461       if (fmt[i] == 'e')
3462 	{
3463 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3464 	    return tem;
3465 	}
3466       else if (fmt[i] == 'E')
3467 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3468 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3469 	    return tem;
3470     }
3471 
3472   return NULL_RTX;
3473 }
3474 
3475 /* Return a value indicating whether OP, an operand of a commutative
3476    operation, is preferred as the first or second operand.  The more
3477    positive the value, the stronger the preference for being the first
3478    operand.  */
3479 
3480 int
commutative_operand_precedence(rtx op)3481 commutative_operand_precedence (rtx op)
3482 {
3483   enum rtx_code code = GET_CODE (op);
3484 
3485   /* Constants always become the second operand.  Prefer "nice" constants.  */
3486   if (code == CONST_INT)
3487     return -10;
3488   if (code == CONST_WIDE_INT)
3489     return -9;
3490   if (code == CONST_POLY_INT)
3491     return -8;
3492   if (code == CONST_DOUBLE)
3493     return -8;
3494   if (code == CONST_FIXED)
3495     return -8;
3496   op = avoid_constant_pool_reference (op);
3497   code = GET_CODE (op);
3498 
3499   switch (GET_RTX_CLASS (code))
3500     {
3501     case RTX_CONST_OBJ:
3502       if (code == CONST_INT)
3503 	return -7;
3504       if (code == CONST_WIDE_INT)
3505 	return -6;
3506       if (code == CONST_POLY_INT)
3507 	return -5;
3508       if (code == CONST_DOUBLE)
3509 	return -5;
3510       if (code == CONST_FIXED)
3511 	return -5;
3512       return -4;
3513 
3514     case RTX_EXTRA:
3515       /* SUBREGs of objects should come second.  */
3516       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3517         return -3;
3518       return 0;
3519 
3520     case RTX_OBJ:
3521       /* Complex expressions should be the first, so decrease priority
3522          of objects.  Prefer pointer objects over non pointer objects.  */
3523       if ((REG_P (op) && REG_POINTER (op))
3524 	  || (MEM_P (op) && MEM_POINTER (op)))
3525 	return -1;
3526       return -2;
3527 
3528     case RTX_COMM_ARITH:
3529       /* Prefer operands that are themselves commutative to be first.
3530          This helps to make things linear.  In particular,
3531          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3532       return 4;
3533 
3534     case RTX_BIN_ARITH:
3535       /* If only one operand is a binary expression, it will be the first
3536          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3537          is canonical, although it will usually be further simplified.  */
3538       return 2;
3539 
3540     case RTX_UNARY:
3541       /* Then prefer NEG and NOT.  */
3542       if (code == NEG || code == NOT)
3543         return 1;
3544       /* FALLTHRU */
3545 
3546     default:
3547       return 0;
3548     }
3549 }
3550 
3551 /* Return 1 iff it is necessary to swap operands of commutative operation
3552    in order to canonicalize expression.  */
3553 
3554 bool
swap_commutative_operands_p(rtx x,rtx y)3555 swap_commutative_operands_p (rtx x, rtx y)
3556 {
3557   return (commutative_operand_precedence (x)
3558 	  < commutative_operand_precedence (y));
3559 }
3560 
3561 /* Return 1 if X is an autoincrement side effect and the register is
3562    not the stack pointer.  */
3563 int
auto_inc_p(const_rtx x)3564 auto_inc_p (const_rtx x)
3565 {
3566   switch (GET_CODE (x))
3567     {
3568     case PRE_INC:
3569     case POST_INC:
3570     case PRE_DEC:
3571     case POST_DEC:
3572     case PRE_MODIFY:
3573     case POST_MODIFY:
3574       /* There are no REG_INC notes for SP.  */
3575       if (XEXP (x, 0) != stack_pointer_rtx)
3576 	return 1;
3577     default:
3578       break;
3579     }
3580   return 0;
3581 }
3582 
3583 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3584 int
loc_mentioned_in_p(rtx * loc,const_rtx in)3585 loc_mentioned_in_p (rtx *loc, const_rtx in)
3586 {
3587   enum rtx_code code;
3588   const char *fmt;
3589   int i, j;
3590 
3591   if (!in)
3592     return 0;
3593 
3594   code = GET_CODE (in);
3595   fmt = GET_RTX_FORMAT (code);
3596   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3597     {
3598       if (fmt[i] == 'e')
3599 	{
3600 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3601 	    return 1;
3602 	}
3603       else if (fmt[i] == 'E')
3604 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3605 	  if (loc == &XVECEXP (in, i, j)
3606 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3607 	    return 1;
3608     }
3609   return 0;
3610 }
3611 
3612 /* Reinterpret a subreg as a bit extraction from an integer and return
3613    the position of the least significant bit of the extracted value.
3614    In other words, if the extraction were performed as a shift right
3615    and mask, return the number of bits to shift right.
3616 
3617    The outer value of the subreg has OUTER_BYTES bytes and starts at
3618    byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes.  */
3619 
3620 poly_uint64
subreg_size_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 subreg_byte)3621 subreg_size_lsb (poly_uint64 outer_bytes,
3622 		 poly_uint64 inner_bytes,
3623 		 poly_uint64 subreg_byte)
3624 {
3625   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3626 
3627   /* A paradoxical subreg begins at bit position 0.  */
3628   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3629   if (maybe_gt (outer_bytes, inner_bytes))
3630     {
3631       gcc_checking_assert (known_eq (subreg_byte, 0U));
3632       return 0;
3633     }
3634 
3635   subreg_end = subreg_byte + outer_bytes;
3636   trailing_bytes = inner_bytes - subreg_end;
3637   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3638     byte_pos = trailing_bytes;
3639   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3640     byte_pos = subreg_byte;
3641   else
3642     {
3643       /* When bytes and words have opposite endianness, we must be able
3644 	 to split offsets into words and bytes at compile time.  */
3645       poly_uint64 leading_word_part
3646 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3647       poly_uint64 trailing_word_part
3648 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3649       /* If the subreg crosses a word boundary ensure that
3650 	 it also begins and ends on a word boundary.  */
3651       gcc_assert (known_le (subreg_end - leading_word_part,
3652 			    (unsigned int) UNITS_PER_WORD)
3653 		  || (known_eq (leading_word_part, subreg_byte)
3654 		      && known_eq (trailing_word_part, trailing_bytes)));
3655       if (WORDS_BIG_ENDIAN)
3656 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3657       else
3658 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3659     }
3660 
3661   return byte_pos * BITS_PER_UNIT;
3662 }
3663 
3664 /* Given a subreg X, return the bit offset where the subreg begins
3665    (counting from the least significant bit of the reg).  */
3666 
3667 poly_uint64
subreg_lsb(const_rtx x)3668 subreg_lsb (const_rtx x)
3669 {
3670   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3671 		       SUBREG_BYTE (x));
3672 }
3673 
3674 /* Return the subreg byte offset for a subreg whose outer value has
3675    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3676    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3677    lsb of the inner value.  This is the inverse of the calculation
3678    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3679 
3680 poly_uint64
subreg_size_offset_from_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 lsb_shift)3681 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3682 			     poly_uint64 lsb_shift)
3683 {
3684   /* A paradoxical subreg begins at bit position 0.  */
3685   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3686   if (maybe_gt (outer_bytes, inner_bytes))
3687     {
3688       gcc_checking_assert (known_eq (lsb_shift, 0U));
3689       return 0;
3690     }
3691 
3692   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3693   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3694   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3695     return upper_bytes;
3696   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3697     return lower_bytes;
3698   else
3699     {
3700       /* When bytes and words have opposite endianness, we must be able
3701 	 to split offsets into words and bytes at compile time.  */
3702       poly_uint64 lower_word_part = force_align_down (lower_bytes,
3703 						      UNITS_PER_WORD);
3704       poly_uint64 upper_word_part = force_align_down (upper_bytes,
3705 						      UNITS_PER_WORD);
3706       if (WORDS_BIG_ENDIAN)
3707 	return upper_word_part + (lower_bytes - lower_word_part);
3708       else
3709 	return lower_word_part + (upper_bytes - upper_word_part);
3710     }
3711 }
3712 
3713 /* Fill in information about a subreg of a hard register.
3714    xregno - A regno of an inner hard subreg_reg (or what will become one).
3715    xmode  - The mode of xregno.
3716    offset - The byte offset.
3717    ymode  - The mode of a top level SUBREG (or what may become one).
3718    info   - Pointer to structure to fill in.
3719 
3720    Rather than considering one particular inner register (and thus one
3721    particular "outer" register) in isolation, this function really uses
3722    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
3723    function does not check whether adding INFO->offset to XREGNO gives
3724    a valid hard register; even if INFO->offset + XREGNO is out of range,
3725    there might be another register of the same type that is in range.
3726    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3727    the new register, since that can depend on things like whether the final
3728    register number is even or odd.  Callers that want to check whether
3729    this particular subreg can be replaced by a simple (reg ...) should
3730    use simplify_subreg_regno.  */
3731 
3732 void
subreg_get_info(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode,struct subreg_info * info)3733 subreg_get_info (unsigned int xregno, machine_mode xmode,
3734 		 poly_uint64 offset, machine_mode ymode,
3735 		 struct subreg_info *info)
3736 {
3737   unsigned int nregs_xmode, nregs_ymode;
3738 
3739   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3740 
3741   poly_uint64 xsize = GET_MODE_SIZE (xmode);
3742   poly_uint64 ysize = GET_MODE_SIZE (ymode);
3743 
3744   bool rknown = false;
3745 
3746   /* If the register representation of a non-scalar mode has holes in it,
3747      we expect the scalar units to be concatenated together, with the holes
3748      distributed evenly among the scalar units.  Each scalar unit must occupy
3749      at least one register.  */
3750   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3751     {
3752       /* As a consequence, we must be dealing with a constant number of
3753 	 scalars, and thus a constant offset and number of units.  */
3754       HOST_WIDE_INT coffset = offset.to_constant ();
3755       HOST_WIDE_INT cysize = ysize.to_constant ();
3756       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3757       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3758       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3759       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3760       gcc_assert (nregs_xmode
3761 		  == (nunits
3762 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3763       gcc_assert (hard_regno_nregs (xregno, xmode)
3764 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
3765 
3766       /* You can only ask for a SUBREG of a value with holes in the middle
3767 	 if you don't cross the holes.  (Such a SUBREG should be done by
3768 	 picking a different register class, or doing it in memory if
3769 	 necessary.)  An example of a value with holes is XCmode on 32-bit
3770 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3771 	 3 for each part, but in memory it's two 128-bit parts.
3772 	 Padding is assumed to be at the end (not necessarily the 'high part')
3773 	 of each unit.  */
3774       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3775 	  && (coffset / GET_MODE_SIZE (xmode_unit)
3776 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3777 	{
3778 	  info->representable_p = false;
3779 	  rknown = true;
3780 	}
3781     }
3782   else
3783     nregs_xmode = hard_regno_nregs (xregno, xmode);
3784 
3785   nregs_ymode = hard_regno_nregs (xregno, ymode);
3786 
3787   /* Subreg sizes must be ordered, so that we can tell whether they are
3788      partial, paradoxical or complete.  */
3789   gcc_checking_assert (ordered_p (xsize, ysize));
3790 
3791   /* Paradoxical subregs are otherwise valid.  */
3792   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3793     {
3794       info->representable_p = true;
3795       /* If this is a big endian paradoxical subreg, which uses more
3796 	 actual hard registers than the original register, we must
3797 	 return a negative offset so that we find the proper highpart
3798 	 of the register.
3799 
3800 	 We assume that the ordering of registers within a multi-register
3801 	 value has a consistent endianness: if bytes and register words
3802 	 have different endianness, the hard registers that make up a
3803 	 multi-register value must be at least word-sized.  */
3804       if (REG_WORDS_BIG_ENDIAN)
3805 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
3806       else
3807 	info->offset = 0;
3808       info->nregs = nregs_ymode;
3809       return;
3810     }
3811 
3812   /* If registers store different numbers of bits in the different
3813      modes, we cannot generally form this subreg.  */
3814   poly_uint64 regsize_xmode, regsize_ymode;
3815   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3816       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3817       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3818       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3819     {
3820       if (!rknown
3821 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3822 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3823 	{
3824 	  info->representable_p = false;
3825 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3826 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3827 	    /* Checked by validate_subreg.  We must know at compile time
3828 	       which inner registers are being accessed.  */
3829 	    gcc_unreachable ();
3830 	  return;
3831 	}
3832       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3833 	 would go outside of XMODE.  */
3834       if (!rknown && maybe_gt (ysize + offset, xsize))
3835 	{
3836 	  info->representable_p = false;
3837 	  info->nregs = nregs_ymode;
3838 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3839 	    /* Checked by validate_subreg.  We must know at compile time
3840 	       which inner registers are being accessed.  */
3841 	    gcc_unreachable ();
3842 	  return;
3843 	}
3844       /* Quick exit for the simple and common case of extracting whole
3845 	 subregisters from a multiregister value.  */
3846       /* ??? It would be better to integrate this into the code below,
3847 	 if we can generalize the concept enough and figure out how
3848 	 odd-sized modes can coexist with the other weird cases we support.  */
3849       HOST_WIDE_INT count;
3850       if (!rknown
3851 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3852 	  && known_eq (regsize_xmode, regsize_ymode)
3853 	  && constant_multiple_p (offset, regsize_ymode, &count))
3854 	{
3855 	  info->representable_p = true;
3856 	  info->nregs = nregs_ymode;
3857 	  info->offset = count;
3858 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3859 	  return;
3860 	}
3861     }
3862 
3863   /* Lowpart subregs are otherwise valid.  */
3864   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3865     {
3866       info->representable_p = true;
3867       rknown = true;
3868 
3869       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3870 	{
3871 	  info->offset = 0;
3872 	  info->nregs = nregs_ymode;
3873 	  return;
3874 	}
3875     }
3876 
3877   /* Set NUM_BLOCKS to the number of independently-representable YMODE
3878      values there are in (reg:XMODE XREGNO).  We can view the register
3879      as consisting of this number of independent "blocks", where each
3880      block occupies NREGS_YMODE registers and contains exactly one
3881      representable YMODE value.  */
3882   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3883   unsigned int num_blocks = nregs_xmode / nregs_ymode;
3884 
3885   /* Calculate the number of bytes in each block.  This must always
3886      be exact, otherwise we don't know how to verify the constraint.
3887      These conditions may be relaxed but subreg_regno_offset would
3888      need to be redesigned.  */
3889   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3890 
3891   /* Get the number of the first block that contains the subreg and the byte
3892      offset of the subreg from the start of that block.  */
3893   unsigned int block_number;
3894   poly_uint64 subblock_offset;
3895   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3896 			&subblock_offset))
3897     /* Checked by validate_subreg.  We must know at compile time which
3898        inner registers are being accessed.  */
3899     gcc_unreachable ();
3900 
3901   if (!rknown)
3902     {
3903       /* Only the lowpart of each block is representable.  */
3904       info->representable_p
3905 	= known_eq (subblock_offset,
3906 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
3907       rknown = true;
3908     }
3909 
3910   /* We assume that the ordering of registers within a multi-register
3911      value has a consistent endianness: if bytes and register words
3912      have different endianness, the hard registers that make up a
3913      multi-register value must be at least word-sized.  */
3914   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3915     /* The block number we calculated above followed memory endianness.
3916        Convert it to register endianness by counting back from the end.
3917        (Note that, because of the assumption above, each block must be
3918        at least word-sized.)  */
3919     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3920   else
3921     info->offset = block_number * nregs_ymode;
3922   info->nregs = nregs_ymode;
3923 }
3924 
3925 /* This function returns the regno offset of a subreg expression.
3926    xregno - A regno of an inner hard subreg_reg (or what will become one).
3927    xmode  - The mode of xregno.
3928    offset - The byte offset.
3929    ymode  - The mode of a top level SUBREG (or what may become one).
3930    RETURN - The regno offset which would be used.  */
3931 unsigned int
subreg_regno_offset(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3932 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3933 		     poly_uint64 offset, machine_mode ymode)
3934 {
3935   struct subreg_info info;
3936   subreg_get_info (xregno, xmode, offset, ymode, &info);
3937   return info.offset;
3938 }
3939 
3940 /* This function returns true when the offset is representable via
3941    subreg_offset in the given regno.
3942    xregno - A regno of an inner hard subreg_reg (or what will become one).
3943    xmode  - The mode of xregno.
3944    offset - The byte offset.
3945    ymode  - The mode of a top level SUBREG (or what may become one).
3946    RETURN - Whether the offset is representable.  */
3947 bool
subreg_offset_representable_p(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3948 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3949 			       poly_uint64 offset, machine_mode ymode)
3950 {
3951   struct subreg_info info;
3952   subreg_get_info (xregno, xmode, offset, ymode, &info);
3953   return info.representable_p;
3954 }
3955 
3956 /* Return the number of a YMODE register to which
3957 
3958        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3959 
3960    can be simplified.  Return -1 if the subreg can't be simplified.
3961 
3962    XREGNO is a hard register number.  */
3963 
3964 int
simplify_subreg_regno(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3965 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3966 		       poly_uint64 offset, machine_mode ymode)
3967 {
3968   struct subreg_info info;
3969   unsigned int yregno;
3970 
3971   /* Give the backend a chance to disallow the mode change.  */
3972   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3973       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3974       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode))
3975     return -1;
3976 
3977   /* We shouldn't simplify stack-related registers.  */
3978   if ((!reload_completed || frame_pointer_needed)
3979       && xregno == FRAME_POINTER_REGNUM)
3980     return -1;
3981 
3982   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3983       && xregno == ARG_POINTER_REGNUM)
3984     return -1;
3985 
3986   if (xregno == STACK_POINTER_REGNUM
3987       /* We should convert hard stack register in LRA if it is
3988 	 possible.  */
3989       && ! lra_in_progress)
3990     return -1;
3991 
3992   /* Try to get the register offset.  */
3993   subreg_get_info (xregno, xmode, offset, ymode, &info);
3994   if (!info.representable_p)
3995     return -1;
3996 
3997   /* Make sure that the offsetted register value is in range.  */
3998   yregno = xregno + info.offset;
3999   if (!HARD_REGISTER_NUM_P (yregno))
4000     return -1;
4001 
4002   /* See whether (reg:YMODE YREGNO) is valid.
4003 
4004      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4005      This is a kludge to work around how complex FP arguments are passed
4006      on IA-64 and should be fixed.  See PR target/49226.  */
4007   if (!targetm.hard_regno_mode_ok (yregno, ymode)
4008       && targetm.hard_regno_mode_ok (xregno, xmode))
4009     return -1;
4010 
4011   return (int) yregno;
4012 }
4013 
4014 /* Return the final regno that a subreg expression refers to.  */
4015 unsigned int
subreg_regno(const_rtx x)4016 subreg_regno (const_rtx x)
4017 {
4018   unsigned int ret;
4019   rtx subreg = SUBREG_REG (x);
4020   int regno = REGNO (subreg);
4021 
4022   ret = regno + subreg_regno_offset (regno,
4023 				     GET_MODE (subreg),
4024 				     SUBREG_BYTE (x),
4025 				     GET_MODE (x));
4026   return ret;
4027 
4028 }
4029 
4030 /* Return the number of registers that a subreg expression refers
4031    to.  */
4032 unsigned int
subreg_nregs(const_rtx x)4033 subreg_nregs (const_rtx x)
4034 {
4035   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4036 }
4037 
4038 /* Return the number of registers that a subreg REG with REGNO
4039    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
4040    changed so that the regno can be passed in. */
4041 
4042 unsigned int
subreg_nregs_with_regno(unsigned int regno,const_rtx x)4043 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4044 {
4045   struct subreg_info info;
4046   rtx subreg = SUBREG_REG (x);
4047 
4048   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4049 		   &info);
4050   return info.nregs;
4051 }
4052 
4053 struct parms_set_data
4054 {
4055   int nregs;
4056   HARD_REG_SET regs;
4057 };
4058 
4059 /* Helper function for noticing stores to parameter registers.  */
4060 static void
parms_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)4061 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4062 {
4063   struct parms_set_data *const d = (struct parms_set_data *) data;
4064   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4065       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4066     {
4067       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4068       d->nregs--;
4069     }
4070 }
4071 
4072 /* Look backward for first parameter to be loaded.
4073    Note that loads of all parameters will not necessarily be
4074    found if CSE has eliminated some of them (e.g., an argument
4075    to the outer function is passed down as a parameter).
4076    Do not skip BOUNDARY.  */
4077 rtx_insn *
find_first_parameter_load(rtx_insn * call_insn,rtx_insn * boundary)4078 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4079 {
4080   struct parms_set_data parm;
4081   rtx p;
4082   rtx_insn *before, *first_set;
4083 
4084   /* Since different machines initialize their parameter registers
4085      in different orders, assume nothing.  Collect the set of all
4086      parameter registers.  */
4087   CLEAR_HARD_REG_SET (parm.regs);
4088   parm.nregs = 0;
4089   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4090     if (GET_CODE (XEXP (p, 0)) == USE
4091 	&& REG_P (XEXP (XEXP (p, 0), 0))
4092 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4093       {
4094 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4095 
4096 	/* We only care about registers which can hold function
4097 	   arguments.  */
4098 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4099 	  continue;
4100 
4101 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4102 	parm.nregs++;
4103       }
4104   before = call_insn;
4105   first_set = call_insn;
4106 
4107   /* Search backward for the first set of a register in this set.  */
4108   while (parm.nregs && before != boundary)
4109     {
4110       before = PREV_INSN (before);
4111 
4112       /* It is possible that some loads got CSEed from one call to
4113          another.  Stop in that case.  */
4114       if (CALL_P (before))
4115 	break;
4116 
4117       /* Our caller needs either ensure that we will find all sets
4118          (in case code has not been optimized yet), or take care
4119          for possible labels in a way by setting boundary to preceding
4120          CODE_LABEL.  */
4121       if (LABEL_P (before))
4122 	{
4123 	  gcc_assert (before == boundary);
4124 	  break;
4125 	}
4126 
4127       if (INSN_P (before))
4128 	{
4129 	  int nregs_old = parm.nregs;
4130 	  note_stores (before, parms_set, &parm);
4131 	  /* If we found something that did not set a parameter reg,
4132 	     we're done.  Do not keep going, as that might result
4133 	     in hoisting an insn before the setting of a pseudo
4134 	     that is used by the hoisted insn. */
4135 	  if (nregs_old != parm.nregs)
4136 	    first_set = before;
4137 	  else
4138 	    break;
4139 	}
4140     }
4141   return first_set;
4142 }
4143 
4144 /* Return true if we should avoid inserting code between INSN and preceding
4145    call instruction.  */
4146 
4147 bool
keep_with_call_p(const rtx_insn * insn)4148 keep_with_call_p (const rtx_insn *insn)
4149 {
4150   rtx set;
4151 
4152   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4153     {
4154       if (REG_P (SET_DEST (set))
4155 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4156 	  && fixed_regs[REGNO (SET_DEST (set))]
4157 	  && general_operand (SET_SRC (set), VOIDmode))
4158 	return true;
4159       if (REG_P (SET_SRC (set))
4160 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4161 	  && REG_P (SET_DEST (set))
4162 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4163 	return true;
4164       /* There may be a stack pop just after the call and before the store
4165 	 of the return register.  Search for the actual store when deciding
4166 	 if we can break or not.  */
4167       if (SET_DEST (set) == stack_pointer_rtx)
4168 	{
4169 	  /* This CONST_CAST is okay because next_nonnote_insn just
4170 	     returns its argument and we assign it to a const_rtx
4171 	     variable.  */
4172 	  const rtx_insn *i2
4173 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4174 	  if (i2 && keep_with_call_p (i2))
4175 	    return true;
4176 	}
4177     }
4178   return false;
4179 }
4180 
4181 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4182    to non-complex jumps.  That is, direct unconditional, conditional,
4183    and tablejumps, but not computed jumps or returns.  It also does
4184    not apply to the fallthru case of a conditional jump.  */
4185 
4186 bool
label_is_jump_target_p(const_rtx label,const rtx_insn * jump_insn)4187 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4188 {
4189   rtx tmp = JUMP_LABEL (jump_insn);
4190   rtx_jump_table_data *table;
4191 
4192   if (label == tmp)
4193     return true;
4194 
4195   if (tablejump_p (jump_insn, NULL, &table))
4196     {
4197       rtvec vec = table->get_labels ();
4198       int i, veclen = GET_NUM_ELEM (vec);
4199 
4200       for (i = 0; i < veclen; ++i)
4201 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4202 	  return true;
4203     }
4204 
4205   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4206     return true;
4207 
4208   return false;
4209 }
4210 
4211 
4212 /* Return an estimate of the cost of computing rtx X.
4213    One use is in cse, to decide which expression to keep in the hash table.
4214    Another is in rtl generation, to pick the cheapest way to multiply.
4215    Other uses like the latter are expected in the future.
4216 
4217    X appears as operand OPNO in an expression with code OUTER_CODE.
4218    SPEED specifies whether costs optimized for speed or size should
4219    be returned.  */
4220 
4221 int
rtx_cost(rtx x,machine_mode mode,enum rtx_code outer_code,int opno,bool speed)4222 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4223 	  int opno, bool speed)
4224 {
4225   int i, j;
4226   enum rtx_code code;
4227   const char *fmt;
4228   int total;
4229   int factor;
4230   unsigned mode_size;
4231 
4232   if (x == 0)
4233     return 0;
4234 
4235   if (GET_CODE (x) == SET)
4236     /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4237        the mode for the factor.  */
4238     mode = GET_MODE (SET_DEST (x));
4239   else if (GET_MODE (x) != VOIDmode)
4240     mode = GET_MODE (x);
4241 
4242   mode_size = estimated_poly_value (GET_MODE_SIZE (mode));
4243 
4244   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4245      many insns, taking N times as long.  */
4246   factor = mode_size > UNITS_PER_WORD ? mode_size / UNITS_PER_WORD : 1;
4247 
4248   /* Compute the default costs of certain things.
4249      Note that targetm.rtx_costs can override the defaults.  */
4250 
4251   code = GET_CODE (x);
4252   switch (code)
4253     {
4254     case MULT:
4255       /* Multiplication has time-complexity O(N*N), where N is the
4256 	 number of units (translated from digits) when using
4257 	 schoolbook long multiplication.  */
4258       total = factor * factor * COSTS_N_INSNS (5);
4259       break;
4260     case DIV:
4261     case UDIV:
4262     case MOD:
4263     case UMOD:
4264       /* Similarly, complexity for schoolbook long division.  */
4265       total = factor * factor * COSTS_N_INSNS (7);
4266       break;
4267     case USE:
4268       /* Used in combine.c as a marker.  */
4269       total = 0;
4270       break;
4271     default:
4272       total = factor * COSTS_N_INSNS (1);
4273     }
4274 
4275   switch (code)
4276     {
4277     case REG:
4278       return 0;
4279 
4280     case SUBREG:
4281       total = 0;
4282       /* If we can't tie these modes, make this expensive.  The larger
4283 	 the mode, the more expensive it is.  */
4284       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4285 	return COSTS_N_INSNS (2 + factor);
4286       break;
4287 
4288     case TRUNCATE:
4289       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4290 	{
4291 	  total = 0;
4292 	  break;
4293 	}
4294       /* FALLTHRU */
4295     default:
4296       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4297 	return total;
4298       break;
4299     }
4300 
4301   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4302      which is already in total.  */
4303 
4304   fmt = GET_RTX_FORMAT (code);
4305   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4306     if (fmt[i] == 'e')
4307       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4308     else if (fmt[i] == 'E')
4309       for (j = 0; j < XVECLEN (x, i); j++)
4310 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4311 
4312   return total;
4313 }
4314 
4315 /* Fill in the structure C with information about both speed and size rtx
4316    costs for X, which is operand OPNO in an expression with code OUTER.  */
4317 
4318 void
get_full_rtx_cost(rtx x,machine_mode mode,enum rtx_code outer,int opno,struct full_rtx_costs * c)4319 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4320 		   struct full_rtx_costs *c)
4321 {
4322   c->speed = rtx_cost (x, mode, outer, opno, true);
4323   c->size = rtx_cost (x, mode, outer, opno, false);
4324 }
4325 
4326 
4327 /* Return cost of address expression X.
4328    Expect that X is properly formed address reference.
4329 
4330    SPEED parameter specify whether costs optimized for speed or size should
4331    be returned.  */
4332 
4333 int
address_cost(rtx x,machine_mode mode,addr_space_t as,bool speed)4334 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4335 {
4336   /* We may be asked for cost of various unusual addresses, such as operands
4337      of push instruction.  It is not worthwhile to complicate writing
4338      of the target hook by such cases.  */
4339 
4340   if (!memory_address_addr_space_p (mode, x, as))
4341     return 1000;
4342 
4343   return targetm.address_cost (x, mode, as, speed);
4344 }
4345 
4346 /* If the target doesn't override, compute the cost as with arithmetic.  */
4347 
4348 int
default_address_cost(rtx x,machine_mode,addr_space_t,bool speed)4349 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4350 {
4351   return rtx_cost (x, Pmode, MEM, 0, speed);
4352 }
4353 
4354 
4355 unsigned HOST_WIDE_INT
nonzero_bits(const_rtx x,machine_mode mode)4356 nonzero_bits (const_rtx x, machine_mode mode)
4357 {
4358   if (mode == VOIDmode)
4359     mode = GET_MODE (x);
4360   scalar_int_mode int_mode;
4361   if (!is_a <scalar_int_mode> (mode, &int_mode))
4362     return GET_MODE_MASK (mode);
4363   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4364 }
4365 
4366 unsigned int
num_sign_bit_copies(const_rtx x,machine_mode mode)4367 num_sign_bit_copies (const_rtx x, machine_mode mode)
4368 {
4369   if (mode == VOIDmode)
4370     mode = GET_MODE (x);
4371   scalar_int_mode int_mode;
4372   if (!is_a <scalar_int_mode> (mode, &int_mode))
4373     return 1;
4374   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4375 }
4376 
4377 /* Return true if nonzero_bits1 might recurse into both operands
4378    of X.  */
4379 
4380 static inline bool
nonzero_bits_binary_arith_p(const_rtx x)4381 nonzero_bits_binary_arith_p (const_rtx x)
4382 {
4383   if (!ARITHMETIC_P (x))
4384     return false;
4385   switch (GET_CODE (x))
4386     {
4387     case AND:
4388     case XOR:
4389     case IOR:
4390     case UMIN:
4391     case UMAX:
4392     case SMIN:
4393     case SMAX:
4394     case PLUS:
4395     case MINUS:
4396     case MULT:
4397     case DIV:
4398     case UDIV:
4399     case MOD:
4400     case UMOD:
4401       return true;
4402     default:
4403       return false;
4404     }
4405 }
4406 
4407 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4408    It avoids exponential behavior in nonzero_bits1 when X has
4409    identical subexpressions on the first or the second level.  */
4410 
4411 static unsigned HOST_WIDE_INT
cached_nonzero_bits(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4412 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4413 		     machine_mode known_mode,
4414 		     unsigned HOST_WIDE_INT known_ret)
4415 {
4416   if (x == known_x && mode == known_mode)
4417     return known_ret;
4418 
4419   /* Try to find identical subexpressions.  If found call
4420      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4421      precomputed value for the subexpression as KNOWN_RET.  */
4422 
4423   if (nonzero_bits_binary_arith_p (x))
4424     {
4425       rtx x0 = XEXP (x, 0);
4426       rtx x1 = XEXP (x, 1);
4427 
4428       /* Check the first level.  */
4429       if (x0 == x1)
4430 	return nonzero_bits1 (x, mode, x0, mode,
4431 			      cached_nonzero_bits (x0, mode, known_x,
4432 						   known_mode, known_ret));
4433 
4434       /* Check the second level.  */
4435       if (nonzero_bits_binary_arith_p (x0)
4436 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4437 	return nonzero_bits1 (x, mode, x1, mode,
4438 			      cached_nonzero_bits (x1, mode, known_x,
4439 						   known_mode, known_ret));
4440 
4441       if (nonzero_bits_binary_arith_p (x1)
4442 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4443 	return nonzero_bits1 (x, mode, x0, mode,
4444 			      cached_nonzero_bits (x0, mode, known_x,
4445 						   known_mode, known_ret));
4446     }
4447 
4448   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4449 }
4450 
4451 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4452    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4453    is less useful.  We can't allow both, because that results in exponential
4454    run time recursion.  There is a nullstone testcase that triggered
4455    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4456 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4457 
4458 /* Given an expression, X, compute which bits in X can be nonzero.
4459    We don't care about bits outside of those defined in MODE.
4460 
4461    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4462    an arithmetic operation, we can do better.  */
4463 
4464 static unsigned HOST_WIDE_INT
nonzero_bits1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4465 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4466 	       machine_mode known_mode,
4467 	       unsigned HOST_WIDE_INT known_ret)
4468 {
4469   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4470   unsigned HOST_WIDE_INT inner_nz;
4471   enum rtx_code code = GET_CODE (x);
4472   machine_mode inner_mode;
4473   unsigned int inner_width;
4474   scalar_int_mode xmode;
4475 
4476   unsigned int mode_width = GET_MODE_PRECISION (mode);
4477 
4478   if (CONST_INT_P (x))
4479     {
4480       if (SHORT_IMMEDIATES_SIGN_EXTEND
4481 	  && INTVAL (x) > 0
4482 	  && mode_width < BITS_PER_WORD
4483 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4484 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4485 
4486       return UINTVAL (x);
4487     }
4488 
4489   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4490     return nonzero;
4491   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4492 
4493   /* If X is wider than MODE, use its mode instead.  */
4494   if (xmode_width > mode_width)
4495     {
4496       mode = xmode;
4497       nonzero = GET_MODE_MASK (mode);
4498       mode_width = xmode_width;
4499     }
4500 
4501   if (mode_width > HOST_BITS_PER_WIDE_INT)
4502     /* Our only callers in this case look for single bit values.  So
4503        just return the mode mask.  Those tests will then be false.  */
4504     return nonzero;
4505 
4506   /* If MODE is wider than X, but both are a single word for both the host
4507      and target machines, we can compute this from which bits of the object
4508      might be nonzero in its own mode, taking into account the fact that, on
4509      CISC machines, accessing an object in a wider mode generally causes the
4510      high-order bits to become undefined, so they are not known to be zero.
4511      We extend this reasoning to RISC machines for operations that might not
4512      operate on the full registers.  */
4513   if (mode_width > xmode_width
4514       && xmode_width <= BITS_PER_WORD
4515       && xmode_width <= HOST_BITS_PER_WIDE_INT
4516       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4517     {
4518       nonzero &= cached_nonzero_bits (x, xmode,
4519 				      known_x, known_mode, known_ret);
4520       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4521       return nonzero;
4522     }
4523 
4524   /* Please keep nonzero_bits_binary_arith_p above in sync with
4525      the code in the switch below.  */
4526   switch (code)
4527     {
4528     case REG:
4529 #if defined(POINTERS_EXTEND_UNSIGNED)
4530       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4531 	 all the bits above ptr_mode are known to be zero.  */
4532       /* As we do not know which address space the pointer is referring to,
4533 	 we can do this only if the target does not support different pointer
4534 	 or address modes depending on the address space.  */
4535       if (target_default_pointer_address_modes_p ()
4536 	  && POINTERS_EXTEND_UNSIGNED
4537 	  && xmode == Pmode
4538 	  && REG_POINTER (x)
4539 	  && !targetm.have_ptr_extend ())
4540 	nonzero &= GET_MODE_MASK (ptr_mode);
4541 #endif
4542 
4543       /* Include declared information about alignment of pointers.  */
4544       /* ??? We don't properly preserve REG_POINTER changes across
4545 	 pointer-to-integer casts, so we can't trust it except for
4546 	 things that we know must be pointers.  See execute/960116-1.c.  */
4547       if ((x == stack_pointer_rtx
4548 	   || x == frame_pointer_rtx
4549 	   || x == arg_pointer_rtx)
4550 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4551 	{
4552 	  unsigned HOST_WIDE_INT alignment
4553 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4554 
4555 #ifdef PUSH_ROUNDING
4556 	  /* If PUSH_ROUNDING is defined, it is possible for the
4557 	     stack to be momentarily aligned only to that amount,
4558 	     so we pick the least alignment.  */
4559 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4560 	    {
4561 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4562 	      alignment = MIN (known_alignment (rounded_1), alignment);
4563 	    }
4564 #endif
4565 
4566 	  nonzero &= ~(alignment - 1);
4567 	}
4568 
4569       {
4570 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4571 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4572 						  &nonzero_for_hook);
4573 
4574 	if (new_rtx)
4575 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4576 						   known_mode, known_ret);
4577 
4578 	return nonzero_for_hook;
4579       }
4580 
4581     case MEM:
4582       /* In many, if not most, RISC machines, reading a byte from memory
4583 	 zeros the rest of the register.  Noticing that fact saves a lot
4584 	 of extra zero-extends.  */
4585       if (load_extend_op (xmode) == ZERO_EXTEND)
4586 	nonzero &= GET_MODE_MASK (xmode);
4587       break;
4588 
4589     case EQ:  case NE:
4590     case UNEQ:  case LTGT:
4591     case GT:  case GTU:  case UNGT:
4592     case LT:  case LTU:  case UNLT:
4593     case GE:  case GEU:  case UNGE:
4594     case LE:  case LEU:  case UNLE:
4595     case UNORDERED: case ORDERED:
4596       /* If this produces an integer result, we know which bits are set.
4597 	 Code here used to clear bits outside the mode of X, but that is
4598 	 now done above.  */
4599       /* Mind that MODE is the mode the caller wants to look at this
4600 	 operation in, and not the actual operation mode.  We can wind
4601 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4602 	 that describes the results of a vector compare.  */
4603       if (GET_MODE_CLASS (xmode) == MODE_INT
4604 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4605 	nonzero = STORE_FLAG_VALUE;
4606       break;
4607 
4608     case NEG:
4609 #if 0
4610       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4611 	 and num_sign_bit_copies.  */
4612       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4613 	nonzero = 1;
4614 #endif
4615 
4616       if (xmode_width < mode_width)
4617 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4618       break;
4619 
4620     case ABS:
4621 #if 0
4622       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4623 	 and num_sign_bit_copies.  */
4624       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4625 	nonzero = 1;
4626 #endif
4627       break;
4628 
4629     case TRUNCATE:
4630       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4631 				       known_x, known_mode, known_ret)
4632 		  & GET_MODE_MASK (mode));
4633       break;
4634 
4635     case ZERO_EXTEND:
4636       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4637 				      known_x, known_mode, known_ret);
4638       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4639 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4640       break;
4641 
4642     case SIGN_EXTEND:
4643       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4644 	 Otherwise, show all the bits in the outer mode but not the inner
4645 	 may be nonzero.  */
4646       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4647 				      known_x, known_mode, known_ret);
4648       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4649 	{
4650 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4651 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4652 	    inner_nz |= (GET_MODE_MASK (mode)
4653 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4654 	}
4655 
4656       nonzero &= inner_nz;
4657       break;
4658 
4659     case AND:
4660       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4661 				       known_x, known_mode, known_ret)
4662       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4663 					known_x, known_mode, known_ret);
4664       break;
4665 
4666     case XOR:   case IOR:
4667     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4668       {
4669 	unsigned HOST_WIDE_INT nonzero0
4670 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4671 				  known_x, known_mode, known_ret);
4672 
4673 	/* Don't call nonzero_bits for the second time if it cannot change
4674 	   anything.  */
4675 	if ((nonzero & nonzero0) != nonzero)
4676 	  nonzero &= nonzero0
4677       		     | cached_nonzero_bits (XEXP (x, 1), mode,
4678 					    known_x, known_mode, known_ret);
4679       }
4680       break;
4681 
4682     case PLUS:  case MINUS:
4683     case MULT:
4684     case DIV:   case UDIV:
4685     case MOD:   case UMOD:
4686       /* We can apply the rules of arithmetic to compute the number of
4687 	 high- and low-order zero bits of these operations.  We start by
4688 	 computing the width (position of the highest-order nonzero bit)
4689 	 and the number of low-order zero bits for each value.  */
4690       {
4691 	unsigned HOST_WIDE_INT nz0
4692 	  = cached_nonzero_bits (XEXP (x, 0), mode,
4693 				 known_x, known_mode, known_ret);
4694 	unsigned HOST_WIDE_INT nz1
4695 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4696 				 known_x, known_mode, known_ret);
4697 	int sign_index = xmode_width - 1;
4698 	int width0 = floor_log2 (nz0) + 1;
4699 	int width1 = floor_log2 (nz1) + 1;
4700 	int low0 = ctz_or_zero (nz0);
4701 	int low1 = ctz_or_zero (nz1);
4702 	unsigned HOST_WIDE_INT op0_maybe_minusp
4703 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
4704 	unsigned HOST_WIDE_INT op1_maybe_minusp
4705 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
4706 	unsigned int result_width = mode_width;
4707 	int result_low = 0;
4708 
4709 	switch (code)
4710 	  {
4711 	  case PLUS:
4712 	    result_width = MAX (width0, width1) + 1;
4713 	    result_low = MIN (low0, low1);
4714 	    break;
4715 	  case MINUS:
4716 	    result_low = MIN (low0, low1);
4717 	    break;
4718 	  case MULT:
4719 	    result_width = width0 + width1;
4720 	    result_low = low0 + low1;
4721 	    break;
4722 	  case DIV:
4723 	    if (width1 == 0)
4724 	      break;
4725 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4726 	      result_width = width0;
4727 	    break;
4728 	  case UDIV:
4729 	    if (width1 == 0)
4730 	      break;
4731 	    result_width = width0;
4732 	    break;
4733 	  case MOD:
4734 	    if (width1 == 0)
4735 	      break;
4736 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4737 	      result_width = MIN (width0, width1);
4738 	    result_low = MIN (low0, low1);
4739 	    break;
4740 	  case UMOD:
4741 	    if (width1 == 0)
4742 	      break;
4743 	    result_width = MIN (width0, width1);
4744 	    result_low = MIN (low0, low1);
4745 	    break;
4746 	  default:
4747 	    gcc_unreachable ();
4748 	  }
4749 
4750 	if (result_width < mode_width)
4751 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4752 
4753 	if (result_low > 0)
4754 	  nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4755       }
4756       break;
4757 
4758     case ZERO_EXTRACT:
4759       if (CONST_INT_P (XEXP (x, 1))
4760 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4761 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4762       break;
4763 
4764     case SUBREG:
4765       /* If this is a SUBREG formed for a promoted variable that has
4766 	 been zero-extended, we know that at least the high-order bits
4767 	 are zero, though others might be too.  */
4768       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4769 	nonzero = GET_MODE_MASK (xmode)
4770 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
4771 					 known_x, known_mode, known_ret);
4772 
4773       /* If the inner mode is a single word for both the host and target
4774 	 machines, we can compute this from which bits of the inner
4775 	 object might be nonzero.  */
4776       inner_mode = GET_MODE (SUBREG_REG (x));
4777       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4778 	  && inner_width <= BITS_PER_WORD
4779 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
4780 	{
4781 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4782 					  known_x, known_mode, known_ret);
4783 
4784           /* On a typical CISC machine, accessing an object in a wider mode
4785 	     causes the high-order bits to become undefined.  So they are
4786 	     not known to be zero.
4787 
4788 	     On a typical RISC machine, we only have to worry about the way
4789 	     loads are extended.  Otherwise, if we get a reload for the inner
4790 	     part, it may be loaded from the stack, and then we may lose all
4791 	     the zero bits that existed before the store to the stack.  */
4792 	  rtx_code extend_op;
4793 	  if ((!WORD_REGISTER_OPERATIONS
4794 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4795 		   ? val_signbit_known_set_p (inner_mode, nonzero)
4796 		   : extend_op != ZERO_EXTEND)
4797 	       || !MEM_P (SUBREG_REG (x)))
4798 	      && xmode_width > inner_width)
4799 	    nonzero
4800 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4801 	}
4802       break;
4803 
4804     case ASHIFT:
4805     case ASHIFTRT:
4806     case LSHIFTRT:
4807     case ROTATE:
4808     case ROTATERT:
4809       /* The nonzero bits are in two classes: any bits within MODE
4810 	 that aren't in xmode are always significant.  The rest of the
4811 	 nonzero bits are those that are significant in the operand of
4812 	 the shift when shifted the appropriate number of bits.  This
4813 	 shows that high-order bits are cleared by the right shift and
4814 	 low-order bits by left shifts.  */
4815       if (CONST_INT_P (XEXP (x, 1))
4816 	  && INTVAL (XEXP (x, 1)) >= 0
4817 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4818 	  && INTVAL (XEXP (x, 1)) < xmode_width)
4819 	{
4820 	  int count = INTVAL (XEXP (x, 1));
4821 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4822 	  unsigned HOST_WIDE_INT op_nonzero
4823 	    = cached_nonzero_bits (XEXP (x, 0), mode,
4824 				   known_x, known_mode, known_ret);
4825 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4826 	  unsigned HOST_WIDE_INT outer = 0;
4827 
4828 	  if (mode_width > xmode_width)
4829 	    outer = (op_nonzero & nonzero & ~mode_mask);
4830 
4831 	  switch (code)
4832 	    {
4833 	    case ASHIFT:
4834 	      inner <<= count;
4835 	      break;
4836 
4837 	    case LSHIFTRT:
4838 	      inner >>= count;
4839 	      break;
4840 
4841 	    case ASHIFTRT:
4842 	      inner >>= count;
4843 
4844 	      /* If the sign bit may have been nonzero before the shift, we
4845 		 need to mark all the places it could have been copied to
4846 		 by the shift as possibly nonzero.  */
4847 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4848 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
4849 			  << (xmode_width - count));
4850 	      break;
4851 
4852 	    case ROTATE:
4853 	      inner = (inner << (count % xmode_width)
4854 		       | (inner >> (xmode_width - (count % xmode_width))))
4855 		      & mode_mask;
4856 	      break;
4857 
4858 	    case ROTATERT:
4859 	      inner = (inner >> (count % xmode_width)
4860 		       | (inner << (xmode_width - (count % xmode_width))))
4861 		      & mode_mask;
4862 	      break;
4863 
4864 	    default:
4865 	      gcc_unreachable ();
4866 	    }
4867 
4868 	  nonzero &= (outer | inner);
4869 	}
4870       break;
4871 
4872     case FFS:
4873     case POPCOUNT:
4874       /* This is at most the number of bits in the mode.  */
4875       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4876       break;
4877 
4878     case CLZ:
4879       /* If CLZ has a known value at zero, then the nonzero bits are
4880 	 that value, plus the number of bits in the mode minus one.  */
4881       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4882 	nonzero
4883 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4884       else
4885 	nonzero = -1;
4886       break;
4887 
4888     case CTZ:
4889       /* If CTZ has a known value at zero, then the nonzero bits are
4890 	 that value, plus the number of bits in the mode minus one.  */
4891       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4892 	nonzero
4893 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4894       else
4895 	nonzero = -1;
4896       break;
4897 
4898     case CLRSB:
4899       /* This is at most the number of bits in the mode minus 1.  */
4900       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4901       break;
4902 
4903     case PARITY:
4904       nonzero = 1;
4905       break;
4906 
4907     case IF_THEN_ELSE:
4908       {
4909 	unsigned HOST_WIDE_INT nonzero_true
4910 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4911 				 known_x, known_mode, known_ret);
4912 
4913 	/* Don't call nonzero_bits for the second time if it cannot change
4914 	   anything.  */
4915 	if ((nonzero & nonzero_true) != nonzero)
4916 	  nonzero &= nonzero_true
4917       		     | cached_nonzero_bits (XEXP (x, 2), mode,
4918 					    known_x, known_mode, known_ret);
4919       }
4920       break;
4921 
4922     default:
4923       break;
4924     }
4925 
4926   return nonzero;
4927 }
4928 
4929 /* See the macro definition above.  */
4930 #undef cached_num_sign_bit_copies
4931 
4932 
4933 /* Return true if num_sign_bit_copies1 might recurse into both operands
4934    of X.  */
4935 
4936 static inline bool
num_sign_bit_copies_binary_arith_p(const_rtx x)4937 num_sign_bit_copies_binary_arith_p (const_rtx x)
4938 {
4939   if (!ARITHMETIC_P (x))
4940     return false;
4941   switch (GET_CODE (x))
4942     {
4943     case IOR:
4944     case AND:
4945     case XOR:
4946     case SMIN:
4947     case SMAX:
4948     case UMIN:
4949     case UMAX:
4950     case PLUS:
4951     case MINUS:
4952     case MULT:
4953       return true;
4954     default:
4955       return false;
4956     }
4957 }
4958 
4959 /* The function cached_num_sign_bit_copies is a wrapper around
4960    num_sign_bit_copies1.  It avoids exponential behavior in
4961    num_sign_bit_copies1 when X has identical subexpressions on the
4962    first or the second level.  */
4963 
4964 static unsigned int
cached_num_sign_bit_copies(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)4965 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4966 			    const_rtx known_x, machine_mode known_mode,
4967 			    unsigned int known_ret)
4968 {
4969   if (x == known_x && mode == known_mode)
4970     return known_ret;
4971 
4972   /* Try to find identical subexpressions.  If found call
4973      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4974      the precomputed value for the subexpression as KNOWN_RET.  */
4975 
4976   if (num_sign_bit_copies_binary_arith_p (x))
4977     {
4978       rtx x0 = XEXP (x, 0);
4979       rtx x1 = XEXP (x, 1);
4980 
4981       /* Check the first level.  */
4982       if (x0 == x1)
4983 	return
4984 	  num_sign_bit_copies1 (x, mode, x0, mode,
4985 				cached_num_sign_bit_copies (x0, mode, known_x,
4986 							    known_mode,
4987 							    known_ret));
4988 
4989       /* Check the second level.  */
4990       if (num_sign_bit_copies_binary_arith_p (x0)
4991 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4992 	return
4993 	  num_sign_bit_copies1 (x, mode, x1, mode,
4994 				cached_num_sign_bit_copies (x1, mode, known_x,
4995 							    known_mode,
4996 							    known_ret));
4997 
4998       if (num_sign_bit_copies_binary_arith_p (x1)
4999 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5000 	return
5001 	  num_sign_bit_copies1 (x, mode, x0, mode,
5002 				cached_num_sign_bit_copies (x0, mode, known_x,
5003 							    known_mode,
5004 							    known_ret));
5005     }
5006 
5007   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5008 }
5009 
5010 /* Return the number of bits at the high-order end of X that are known to
5011    be equal to the sign bit.  X will be used in mode MODE.  The returned
5012    value will always be between 1 and the number of bits in MODE.  */
5013 
5014 static unsigned int
num_sign_bit_copies1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5015 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5016 		      machine_mode known_mode,
5017 		      unsigned int known_ret)
5018 {
5019   enum rtx_code code = GET_CODE (x);
5020   unsigned int bitwidth = GET_MODE_PRECISION (mode);
5021   int num0, num1, result;
5022   unsigned HOST_WIDE_INT nonzero;
5023 
5024   if (CONST_INT_P (x))
5025     {
5026       /* If the constant is negative, take its 1's complement and remask.
5027 	 Then see how many zero bits we have.  */
5028       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5029       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5030 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5031 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5032 
5033       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5034     }
5035 
5036   scalar_int_mode xmode, inner_mode;
5037   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5038     return 1;
5039 
5040   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5041 
5042   /* For a smaller mode, just ignore the high bits.  */
5043   if (bitwidth < xmode_width)
5044     {
5045       num0 = cached_num_sign_bit_copies (x, xmode,
5046 					 known_x, known_mode, known_ret);
5047       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5048     }
5049 
5050   if (bitwidth > xmode_width)
5051     {
5052       /* If this machine does not do all register operations on the entire
5053 	 register and MODE is wider than the mode of X, we can say nothing
5054 	 at all about the high-order bits.  We extend this reasoning to RISC
5055 	 machines for operations that might not operate on full registers.  */
5056       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5057 	return 1;
5058 
5059       /* Likewise on machines that do, if the mode of the object is smaller
5060 	 than a word and loads of that size don't sign extend, we can say
5061 	 nothing about the high order bits.  */
5062       if (xmode_width < BITS_PER_WORD
5063 	  && load_extend_op (xmode) != SIGN_EXTEND)
5064 	return 1;
5065     }
5066 
5067   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5068      the code in the switch below.  */
5069   switch (code)
5070     {
5071     case REG:
5072 
5073 #if defined(POINTERS_EXTEND_UNSIGNED)
5074       /* If pointers extend signed and this is a pointer in Pmode, say that
5075 	 all the bits above ptr_mode are known to be sign bit copies.  */
5076       /* As we do not know which address space the pointer is referring to,
5077 	 we can do this only if the target does not support different pointer
5078 	 or address modes depending on the address space.  */
5079       if (target_default_pointer_address_modes_p ()
5080 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5081 	  && mode == Pmode && REG_POINTER (x)
5082 	  && !targetm.have_ptr_extend ())
5083 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5084 #endif
5085 
5086       {
5087 	unsigned int copies_for_hook = 1, copies = 1;
5088 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5089 							 &copies_for_hook);
5090 
5091 	if (new_rtx)
5092 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5093 					       known_mode, known_ret);
5094 
5095 	if (copies > 1 || copies_for_hook > 1)
5096 	  return MAX (copies, copies_for_hook);
5097 
5098 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5099       }
5100       break;
5101 
5102     case MEM:
5103       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5104       if (load_extend_op (xmode) == SIGN_EXTEND)
5105 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5106       break;
5107 
5108     case SUBREG:
5109       /* If this is a SUBREG for a promoted object that is sign-extended
5110 	 and we are looking at it in a wider mode, we know that at least the
5111 	 high-order bits are known to be sign bit copies.  */
5112 
5113       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5114 	{
5115 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5116 					     known_x, known_mode, known_ret);
5117 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5118 	}
5119 
5120       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5121 	{
5122 	  /* For a smaller object, just ignore the high bits.  */
5123 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5124 	    {
5125 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5126 						 known_x, known_mode,
5127 						 known_ret);
5128 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5129 					   - bitwidth));
5130 	    }
5131 
5132 	  /* For paradoxical SUBREGs on machines where all register operations
5133 	     affect the entire register, just look inside.  Note that we are
5134 	     passing MODE to the recursive call, so the number of sign bit
5135 	     copies will remain relative to that mode, not the inner mode.
5136 
5137 	     This works only if loads sign extend.  Otherwise, if we get a
5138 	     reload for the inner part, it may be loaded from the stack, and
5139 	     then we lose all sign bit copies that existed before the store
5140 	     to the stack.  */
5141 	  if (WORD_REGISTER_OPERATIONS
5142 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5143 	      && paradoxical_subreg_p (x)
5144 	      && MEM_P (SUBREG_REG (x)))
5145 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5146 					       known_x, known_mode, known_ret);
5147 	}
5148       break;
5149 
5150     case SIGN_EXTRACT:
5151       if (CONST_INT_P (XEXP (x, 1)))
5152 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5153       break;
5154 
5155     case SIGN_EXTEND:
5156       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5157 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5158 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5159 					      known_x, known_mode, known_ret));
5160       break;
5161 
5162     case TRUNCATE:
5163       /* For a smaller object, just ignore the high bits.  */
5164       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5165       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5166 					 known_x, known_mode, known_ret);
5167       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5168 				    - bitwidth)));
5169 
5170     case NOT:
5171       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5172 					 known_x, known_mode, known_ret);
5173 
5174     case ROTATE:       case ROTATERT:
5175       /* If we are rotating left by a number of bits less than the number
5176 	 of sign bit copies, we can just subtract that amount from the
5177 	 number.  */
5178       if (CONST_INT_P (XEXP (x, 1))
5179 	  && INTVAL (XEXP (x, 1)) >= 0
5180 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5181 	{
5182 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5183 					     known_x, known_mode, known_ret);
5184 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5185 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5186 	}
5187       break;
5188 
5189     case NEG:
5190       /* In general, this subtracts one sign bit copy.  But if the value
5191 	 is known to be positive, the number of sign bit copies is the
5192 	 same as that of the input.  Finally, if the input has just one bit
5193 	 that might be nonzero, all the bits are copies of the sign bit.  */
5194       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5195 					 known_x, known_mode, known_ret);
5196       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5197 	return num0 > 1 ? num0 - 1 : 1;
5198 
5199       nonzero = nonzero_bits (XEXP (x, 0), mode);
5200       if (nonzero == 1)
5201 	return bitwidth;
5202 
5203       if (num0 > 1
5204 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5205 	num0--;
5206 
5207       return num0;
5208 
5209     case IOR:   case AND:   case XOR:
5210     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5211       /* Logical operations will preserve the number of sign-bit copies.
5212 	 MIN and MAX operations always return one of the operands.  */
5213       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5214 					 known_x, known_mode, known_ret);
5215       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5216 					 known_x, known_mode, known_ret);
5217 
5218       /* If num1 is clearing some of the top bits then regardless of
5219 	 the other term, we are guaranteed to have at least that many
5220 	 high-order zero bits.  */
5221       if (code == AND
5222 	  && num1 > 1
5223 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5224 	  && CONST_INT_P (XEXP (x, 1))
5225 	  && (UINTVAL (XEXP (x, 1))
5226 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5227 	return num1;
5228 
5229       /* Similarly for IOR when setting high-order bits.  */
5230       if (code == IOR
5231 	  && num1 > 1
5232 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5233 	  && CONST_INT_P (XEXP (x, 1))
5234 	  && (UINTVAL (XEXP (x, 1))
5235 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5236 	return num1;
5237 
5238       return MIN (num0, num1);
5239 
5240     case PLUS:  case MINUS:
5241       /* For addition and subtraction, we can have a 1-bit carry.  However,
5242 	 if we are subtracting 1 from a positive number, there will not
5243 	 be such a carry.  Furthermore, if the positive number is known to
5244 	 be 0 or 1, we know the result is either -1 or 0.  */
5245 
5246       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5247 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5248 	{
5249 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5250 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5251 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5252 		    : bitwidth - floor_log2 (nonzero) - 1);
5253 	}
5254 
5255       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5256 					 known_x, known_mode, known_ret);
5257       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5258 					 known_x, known_mode, known_ret);
5259       result = MAX (1, MIN (num0, num1) - 1);
5260 
5261       return result;
5262 
5263     case MULT:
5264       /* The number of bits of the product is the sum of the number of
5265 	 bits of both terms.  However, unless one of the terms if known
5266 	 to be positive, we must allow for an additional bit since negating
5267 	 a negative number can remove one sign bit copy.  */
5268 
5269       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5270 					 known_x, known_mode, known_ret);
5271       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5272 					 known_x, known_mode, known_ret);
5273 
5274       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5275       if (result > 0
5276 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5277 	      || (((nonzero_bits (XEXP (x, 0), mode)
5278 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5279 		  && ((nonzero_bits (XEXP (x, 1), mode)
5280 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5281 		      != 0))))
5282 	result--;
5283 
5284       return MAX (1, result);
5285 
5286     case UDIV:
5287       /* The result must be <= the first operand.  If the first operand
5288 	 has the high bit set, we know nothing about the number of sign
5289 	 bit copies.  */
5290       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5291 	return 1;
5292       else if ((nonzero_bits (XEXP (x, 0), mode)
5293 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5294 	return 1;
5295       else
5296 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5297 					   known_x, known_mode, known_ret);
5298 
5299     case UMOD:
5300       /* The result must be <= the second operand.  If the second operand
5301 	 has (or just might have) the high bit set, we know nothing about
5302 	 the number of sign bit copies.  */
5303       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5304 	return 1;
5305       else if ((nonzero_bits (XEXP (x, 1), mode)
5306 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5307 	return 1;
5308       else
5309 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5310 					   known_x, known_mode, known_ret);
5311 
5312     case DIV:
5313       /* Similar to unsigned division, except that we have to worry about
5314 	 the case where the divisor is negative, in which case we have
5315 	 to add 1.  */
5316       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5317 					   known_x, known_mode, known_ret);
5318       if (result > 1
5319 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5320 	      || (nonzero_bits (XEXP (x, 1), mode)
5321 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5322 	result--;
5323 
5324       return result;
5325 
5326     case MOD:
5327       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5328 					   known_x, known_mode, known_ret);
5329       if (result > 1
5330 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5331 	      || (nonzero_bits (XEXP (x, 1), mode)
5332 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5333 	result--;
5334 
5335       return result;
5336 
5337     case ASHIFTRT:
5338       /* Shifts by a constant add to the number of bits equal to the
5339 	 sign bit.  */
5340       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5341 					 known_x, known_mode, known_ret);
5342       if (CONST_INT_P (XEXP (x, 1))
5343 	  && INTVAL (XEXP (x, 1)) > 0
5344 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5345 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5346 
5347       return num0;
5348 
5349     case ASHIFT:
5350       /* Left shifts destroy copies.  */
5351       if (!CONST_INT_P (XEXP (x, 1))
5352 	  || INTVAL (XEXP (x, 1)) < 0
5353 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5354 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5355 	return 1;
5356 
5357       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5358 					 known_x, known_mode, known_ret);
5359       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5360 
5361     case IF_THEN_ELSE:
5362       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5363 					 known_x, known_mode, known_ret);
5364       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5365 					 known_x, known_mode, known_ret);
5366       return MIN (num0, num1);
5367 
5368     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5369     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5370     case GEU: case GTU: case LEU: case LTU:
5371     case UNORDERED: case ORDERED:
5372       /* If the constant is negative, take its 1's complement and remask.
5373 	 Then see how many zero bits we have.  */
5374       nonzero = STORE_FLAG_VALUE;
5375       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5376 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5377 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5378 
5379       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5380 
5381     default:
5382       break;
5383     }
5384 
5385   /* If we haven't been able to figure it out by one of the above rules,
5386      see if some of the high-order bits are known to be zero.  If so,
5387      count those bits and return one less than that amount.  If we can't
5388      safely compute the mask for this mode, always return BITWIDTH.  */
5389 
5390   bitwidth = GET_MODE_PRECISION (mode);
5391   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5392     return 1;
5393 
5394   nonzero = nonzero_bits (x, mode);
5395   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5396 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5397 }
5398 
5399 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5400    zero indicates an instruction pattern without a known cost.  */
5401 
5402 int
pattern_cost(rtx pat,bool speed)5403 pattern_cost (rtx pat, bool speed)
5404 {
5405   int i, cost;
5406   rtx set;
5407 
5408   /* Extract the single set rtx from the instruction pattern.  We
5409      can't use single_set since we only have the pattern.  We also
5410      consider PARALLELs of a normal set and a single comparison.  In
5411      that case we use the cost of the non-comparison SET operation,
5412      which is most-likely to be the real cost of this operation.  */
5413   if (GET_CODE (pat) == SET)
5414     set = pat;
5415   else if (GET_CODE (pat) == PARALLEL)
5416     {
5417       set = NULL_RTX;
5418       rtx comparison = NULL_RTX;
5419 
5420       for (i = 0; i < XVECLEN (pat, 0); i++)
5421 	{
5422 	  rtx x = XVECEXP (pat, 0, i);
5423 	  if (GET_CODE (x) == SET)
5424 	    {
5425 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5426 		{
5427 		  if (comparison)
5428 		    return 0;
5429 		  comparison = x;
5430 		}
5431 	      else
5432 		{
5433 		  if (set)
5434 		    return 0;
5435 		  set = x;
5436 		}
5437 	    }
5438 	}
5439 
5440       if (!set && comparison)
5441 	set = comparison;
5442 
5443       if (!set)
5444 	return 0;
5445     }
5446   else
5447     return 0;
5448 
5449   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5450   return cost > 0 ? cost : COSTS_N_INSNS (1);
5451 }
5452 
5453 /* Calculate the cost of a single instruction.  A return value of zero
5454    indicates an instruction pattern without a known cost.  */
5455 
5456 int
insn_cost(rtx_insn * insn,bool speed)5457 insn_cost (rtx_insn *insn, bool speed)
5458 {
5459   if (targetm.insn_cost)
5460     return targetm.insn_cost (insn, speed);
5461 
5462   return pattern_cost (PATTERN (insn), speed);
5463 }
5464 
5465 /* Returns estimate on cost of computing SEQ.  */
5466 
5467 unsigned
seq_cost(const rtx_insn * seq,bool speed)5468 seq_cost (const rtx_insn *seq, bool speed)
5469 {
5470   unsigned cost = 0;
5471   rtx set;
5472 
5473   for (; seq; seq = NEXT_INSN (seq))
5474     {
5475       set = single_set (seq);
5476       if (set)
5477         cost += set_rtx_cost (set, speed);
5478       else if (NONDEBUG_INSN_P (seq))
5479 	{
5480 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5481 	  if (this_cost > 0)
5482 	    cost += this_cost;
5483 	  else
5484 	    cost++;
5485 	}
5486     }
5487 
5488   return cost;
5489 }
5490 
5491 /* Given an insn INSN and condition COND, return the condition in a
5492    canonical form to simplify testing by callers.  Specifically:
5493 
5494    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5495    (2) Both operands will be machine operands; (cc0) will have been replaced.
5496    (3) If an operand is a constant, it will be the second operand.
5497    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5498        for GE, GEU, and LEU.
5499 
5500    If the condition cannot be understood, or is an inequality floating-point
5501    comparison which needs to be reversed, 0 will be returned.
5502 
5503    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5504 
5505    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5506    insn used in locating the condition was found.  If a replacement test
5507    of the condition is desired, it should be placed in front of that
5508    insn and we will be sure that the inputs are still valid.
5509 
5510    If WANT_REG is nonzero, we wish the condition to be relative to that
5511    register, if possible.  Therefore, do not canonicalize the condition
5512    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5513    to be a compare to a CC mode register.
5514 
5515    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5516    and at INSN.  */
5517 
5518 rtx
canonicalize_condition(rtx_insn * insn,rtx cond,int reverse,rtx_insn ** earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)5519 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5520 			rtx_insn **earliest,
5521 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5522 {
5523   enum rtx_code code;
5524   rtx_insn *prev = insn;
5525   const_rtx set;
5526   rtx tem;
5527   rtx op0, op1;
5528   int reverse_code = 0;
5529   machine_mode mode;
5530   basic_block bb = BLOCK_FOR_INSN (insn);
5531 
5532   code = GET_CODE (cond);
5533   mode = GET_MODE (cond);
5534   op0 = XEXP (cond, 0);
5535   op1 = XEXP (cond, 1);
5536 
5537   if (reverse)
5538     code = reversed_comparison_code (cond, insn);
5539   if (code == UNKNOWN)
5540     return 0;
5541 
5542   if (earliest)
5543     *earliest = insn;
5544 
5545   /* If we are comparing a register with zero, see if the register is set
5546      in the previous insn to a COMPARE or a comparison operation.  Perform
5547      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5548      in cse.c  */
5549 
5550   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5551 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5552 	 && op1 == CONST0_RTX (GET_MODE (op0))
5553 	 && op0 != want_reg)
5554     {
5555       /* Set nonzero when we find something of interest.  */
5556       rtx x = 0;
5557 
5558       /* If comparison with cc0, import actual comparison from compare
5559 	 insn.  */
5560       if (op0 == cc0_rtx)
5561 	{
5562 	  if ((prev = prev_nonnote_insn (prev)) == 0
5563 	      || !NONJUMP_INSN_P (prev)
5564 	      || (set = single_set (prev)) == 0
5565 	      || SET_DEST (set) != cc0_rtx)
5566 	    return 0;
5567 
5568 	  op0 = SET_SRC (set);
5569 	  op1 = CONST0_RTX (GET_MODE (op0));
5570 	  if (earliest)
5571 	    *earliest = prev;
5572 	}
5573 
5574       /* If this is a COMPARE, pick up the two things being compared.  */
5575       if (GET_CODE (op0) == COMPARE)
5576 	{
5577 	  op1 = XEXP (op0, 1);
5578 	  op0 = XEXP (op0, 0);
5579 	  continue;
5580 	}
5581       else if (!REG_P (op0))
5582 	break;
5583 
5584       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5585 	 stop if it isn't a single set or if it has a REG_INC note because
5586 	 we don't want to bother dealing with it.  */
5587 
5588       prev = prev_nonnote_nondebug_insn (prev);
5589 
5590       if (prev == 0
5591 	  || !NONJUMP_INSN_P (prev)
5592 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5593 	  /* In cfglayout mode, there do not have to be labels at the
5594 	     beginning of a block, or jumps at the end, so the previous
5595 	     conditions would not stop us when we reach bb boundary.  */
5596 	  || BLOCK_FOR_INSN (prev) != bb)
5597 	break;
5598 
5599       set = set_of (op0, prev);
5600 
5601       if (set
5602 	  && (GET_CODE (set) != SET
5603 	      || !rtx_equal_p (SET_DEST (set), op0)))
5604 	break;
5605 
5606       /* If this is setting OP0, get what it sets it to if it looks
5607 	 relevant.  */
5608       if (set)
5609 	{
5610 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5611 #ifdef FLOAT_STORE_FLAG_VALUE
5612 	  REAL_VALUE_TYPE fsfv;
5613 #endif
5614 
5615 	  /* ??? We may not combine comparisons done in a CCmode with
5616 	     comparisons not done in a CCmode.  This is to aid targets
5617 	     like Alpha that have an IEEE compliant EQ instruction, and
5618 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5619 	     actually artificial, simply to prevent the combination, but
5620 	     should not affect other platforms.
5621 
5622 	     However, we must allow VOIDmode comparisons to match either
5623 	     CCmode or non-CCmode comparison, because some ports have
5624 	     modeless comparisons inside branch patterns.
5625 
5626 	     ??? This mode check should perhaps look more like the mode check
5627 	     in simplify_comparison in combine.  */
5628 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5629 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5630 	      && mode != VOIDmode
5631 	      && inner_mode != VOIDmode)
5632 	    break;
5633 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5634 	      || (((code == NE
5635 		    || (code == LT
5636 			&& val_signbit_known_set_p (inner_mode,
5637 						    STORE_FLAG_VALUE))
5638 #ifdef FLOAT_STORE_FLAG_VALUE
5639 		    || (code == LT
5640 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5641 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5642 			    REAL_VALUE_NEGATIVE (fsfv)))
5643 #endif
5644 		    ))
5645 		  && COMPARISON_P (SET_SRC (set))))
5646 	    x = SET_SRC (set);
5647 	  else if (((code == EQ
5648 		     || (code == GE
5649 			 && val_signbit_known_set_p (inner_mode,
5650 						     STORE_FLAG_VALUE))
5651 #ifdef FLOAT_STORE_FLAG_VALUE
5652 		     || (code == GE
5653 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5654 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5655 			     REAL_VALUE_NEGATIVE (fsfv)))
5656 #endif
5657 		     ))
5658 		   && COMPARISON_P (SET_SRC (set)))
5659 	    {
5660 	      reverse_code = 1;
5661 	      x = SET_SRC (set);
5662 	    }
5663 	  else if ((code == EQ || code == NE)
5664 		   && GET_CODE (SET_SRC (set)) == XOR)
5665 	    /* Handle sequences like:
5666 
5667 	       (set op0 (xor X Y))
5668 	       ...(eq|ne op0 (const_int 0))...
5669 
5670 	       in which case:
5671 
5672 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5673 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5674 
5675 	       This is the form used by MIPS16, for example.  */
5676 	    x = SET_SRC (set);
5677 	  else
5678 	    break;
5679 	}
5680 
5681       else if (reg_set_p (op0, prev))
5682 	/* If this sets OP0, but not directly, we have to give up.  */
5683 	break;
5684 
5685       if (x)
5686 	{
5687 	  /* If the caller is expecting the condition to be valid at INSN,
5688 	     make sure X doesn't change before INSN.  */
5689 	  if (valid_at_insn_p)
5690 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5691 	      break;
5692 	  if (COMPARISON_P (x))
5693 	    code = GET_CODE (x);
5694 	  if (reverse_code)
5695 	    {
5696 	      code = reversed_comparison_code (x, prev);
5697 	      if (code == UNKNOWN)
5698 		return 0;
5699 	      reverse_code = 0;
5700 	    }
5701 
5702 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5703 	  if (earliest)
5704 	    *earliest = prev;
5705 	}
5706     }
5707 
5708   /* If constant is first, put it last.  */
5709   if (CONSTANT_P (op0))
5710     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5711 
5712   /* If OP0 is the result of a comparison, we weren't able to find what
5713      was really being compared, so fail.  */
5714   if (!allow_cc_mode
5715       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5716     return 0;
5717 
5718   /* Canonicalize any ordered comparison with integers involving equality
5719      if we can do computations in the relevant mode and we do not
5720      overflow.  */
5721 
5722   scalar_int_mode op0_mode;
5723   if (CONST_INT_P (op1)
5724       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5725       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5726     {
5727       HOST_WIDE_INT const_val = INTVAL (op1);
5728       unsigned HOST_WIDE_INT uconst_val = const_val;
5729       unsigned HOST_WIDE_INT max_val
5730 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5731 
5732       switch (code)
5733 	{
5734 	case LE:
5735 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5736 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5737 	  break;
5738 
5739 	/* When cross-compiling, const_val might be sign-extended from
5740 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5741 	case GE:
5742 	  if ((const_val & max_val)
5743 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5744 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5745 	  break;
5746 
5747 	case LEU:
5748 	  if (uconst_val < max_val)
5749 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5750 	  break;
5751 
5752 	case GEU:
5753 	  if (uconst_val != 0)
5754 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5755 	  break;
5756 
5757 	default:
5758 	  break;
5759 	}
5760     }
5761 
5762   /* Never return CC0; return zero instead.  */
5763   if (CC0_P (op0))
5764     return 0;
5765 
5766   /* We promised to return a comparison.  */
5767   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5768   if (COMPARISON_P (ret))
5769     return ret;
5770   return 0;
5771 }
5772 
5773 /* Given a jump insn JUMP, return the condition that will cause it to branch
5774    to its JUMP_LABEL.  If the condition cannot be understood, or is an
5775    inequality floating-point comparison which needs to be reversed, 0 will
5776    be returned.
5777 
5778    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5779    insn used in locating the condition was found.  If a replacement test
5780    of the condition is desired, it should be placed in front of that
5781    insn and we will be sure that the inputs are still valid.  If EARLIEST
5782    is null, the returned condition will be valid at INSN.
5783 
5784    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5785    compare CC mode register.
5786 
5787    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
5788 
5789 rtx
get_condition(rtx_insn * jump,rtx_insn ** earliest,int allow_cc_mode,int valid_at_insn_p)5790 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5791 	       int valid_at_insn_p)
5792 {
5793   rtx cond;
5794   int reverse;
5795   rtx set;
5796 
5797   /* If this is not a standard conditional jump, we can't parse it.  */
5798   if (!JUMP_P (jump)
5799       || ! any_condjump_p (jump))
5800     return 0;
5801   set = pc_set (jump);
5802 
5803   cond = XEXP (SET_SRC (set), 0);
5804 
5805   /* If this branches to JUMP_LABEL when the condition is false, reverse
5806      the condition.  */
5807   reverse
5808     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5809       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5810 
5811   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5812 				 allow_cc_mode, valid_at_insn_p);
5813 }
5814 
5815 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5816    TARGET_MODE_REP_EXTENDED.
5817 
5818    Note that we assume that the property of
5819    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5820    narrower than mode B.  I.e., if A is a mode narrower than B then in
5821    order to be able to operate on it in mode B, mode A needs to
5822    satisfy the requirements set by the representation of mode B.  */
5823 
5824 static void
init_num_sign_bit_copies_in_rep(void)5825 init_num_sign_bit_copies_in_rep (void)
5826 {
5827   opt_scalar_int_mode in_mode_iter;
5828   scalar_int_mode mode;
5829 
5830   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5831     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5832       {
5833 	scalar_int_mode in_mode = in_mode_iter.require ();
5834 	scalar_int_mode i;
5835 
5836 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5837 	   extends to the next widest mode.  */
5838 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5839 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5840 
5841 	/* We are in in_mode.  Count how many bits outside of mode
5842 	   have to be copies of the sign-bit.  */
5843 	FOR_EACH_MODE (i, mode, in_mode)
5844 	  {
5845 	    /* This must always exist (for the last iteration it will be
5846 	       IN_MODE).  */
5847 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5848 
5849 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5850 		/* We can only check sign-bit copies starting from the
5851 		   top-bit.  In order to be able to check the bits we
5852 		   have already seen we pretend that subsequent bits
5853 		   have to be sign-bit copies too.  */
5854 		|| num_sign_bit_copies_in_rep [in_mode][mode])
5855 	      num_sign_bit_copies_in_rep [in_mode][mode]
5856 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5857 	  }
5858       }
5859 }
5860 
5861 /* Suppose that truncation from the machine mode of X to MODE is not a
5862    no-op.  See if there is anything special about X so that we can
5863    assume it already contains a truncated value of MODE.  */
5864 
5865 bool
truncated_to_mode(machine_mode mode,const_rtx x)5866 truncated_to_mode (machine_mode mode, const_rtx x)
5867 {
5868   /* This register has already been used in MODE without explicit
5869      truncation.  */
5870   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5871     return true;
5872 
5873   /* See if we already satisfy the requirements of MODE.  If yes we
5874      can just switch to MODE.  */
5875   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5876       && (num_sign_bit_copies (x, GET_MODE (x))
5877 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5878     return true;
5879 
5880   return false;
5881 }
5882 
5883 /* Return true if RTX code CODE has a single sequence of zero or more
5884    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
5885    entry in that case.  */
5886 
5887 static bool
setup_reg_subrtx_bounds(unsigned int code)5888 setup_reg_subrtx_bounds (unsigned int code)
5889 {
5890   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5891   unsigned int i = 0;
5892   for (; format[i] != 'e'; ++i)
5893     {
5894       if (!format[i])
5895 	/* No subrtxes.  Leave start and count as 0.  */
5896 	return true;
5897       if (format[i] == 'E' || format[i] == 'V')
5898 	return false;
5899     }
5900 
5901   /* Record the sequence of 'e's.  */
5902   rtx_all_subrtx_bounds[code].start = i;
5903   do
5904     ++i;
5905   while (format[i] == 'e');
5906   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5907   /* rtl-iter.h relies on this.  */
5908   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5909 
5910   for (; format[i]; ++i)
5911     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5912       return false;
5913 
5914   return true;
5915 }
5916 
5917 /* Initialize rtx_all_subrtx_bounds.  */
5918 void
init_rtlanal(void)5919 init_rtlanal (void)
5920 {
5921   int i;
5922   for (i = 0; i < NUM_RTX_CODE; i++)
5923     {
5924       if (!setup_reg_subrtx_bounds (i))
5925 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5926       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5927 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5928     }
5929 
5930   init_num_sign_bit_copies_in_rep ();
5931 }
5932 
5933 /* Check whether this is a constant pool constant.  */
5934 bool
constant_pool_constant_p(rtx x)5935 constant_pool_constant_p (rtx x)
5936 {
5937   x = avoid_constant_pool_reference (x);
5938   return CONST_DOUBLE_P (x);
5939 }
5940 
5941 /* If M is a bitmask that selects a field of low-order bits within an item but
5942    not the entire word, return the length of the field.  Return -1 otherwise.
5943    M is used in machine mode MODE.  */
5944 
5945 int
low_bitmask_len(machine_mode mode,unsigned HOST_WIDE_INT m)5946 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5947 {
5948   if (mode != VOIDmode)
5949     {
5950       if (!HWI_COMPUTABLE_MODE_P (mode))
5951 	return -1;
5952       m &= GET_MODE_MASK (mode);
5953     }
5954 
5955   return exact_log2 (m + 1);
5956 }
5957 
5958 /* Return the mode of MEM's address.  */
5959 
5960 scalar_int_mode
get_address_mode(rtx mem)5961 get_address_mode (rtx mem)
5962 {
5963   machine_mode mode;
5964 
5965   gcc_assert (MEM_P (mem));
5966   mode = GET_MODE (XEXP (mem, 0));
5967   if (mode != VOIDmode)
5968     return as_a <scalar_int_mode> (mode);
5969   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5970 }
5971 
5972 /* Split up a CONST_DOUBLE or integer constant rtx
5973    into two rtx's for single words,
5974    storing in *FIRST the word that comes first in memory in the target
5975    and in *SECOND the other.
5976 
5977    TODO: This function needs to be rewritten to work on any size
5978    integer.  */
5979 
5980 void
split_double(rtx value,rtx * first,rtx * second)5981 split_double (rtx value, rtx *first, rtx *second)
5982 {
5983   if (CONST_INT_P (value))
5984     {
5985       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5986 	{
5987 	  /* In this case the CONST_INT holds both target words.
5988 	     Extract the bits from it into two word-sized pieces.
5989 	     Sign extend each half to HOST_WIDE_INT.  */
5990 	  unsigned HOST_WIDE_INT low, high;
5991 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5992 	  unsigned bits_per_word = BITS_PER_WORD;
5993 
5994 	  /* Set sign_bit to the most significant bit of a word.  */
5995 	  sign_bit = 1;
5996 	  sign_bit <<= bits_per_word - 1;
5997 
5998 	  /* Set mask so that all bits of the word are set.  We could
5999 	     have used 1 << BITS_PER_WORD instead of basing the
6000 	     calculation on sign_bit.  However, on machines where
6001 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6002 	     compiler warning, even though the code would never be
6003 	     executed.  */
6004 	  mask = sign_bit << 1;
6005 	  mask--;
6006 
6007 	  /* Set sign_extend as any remaining bits.  */
6008 	  sign_extend = ~mask;
6009 
6010 	  /* Pick the lower word and sign-extend it.  */
6011 	  low = INTVAL (value);
6012 	  low &= mask;
6013 	  if (low & sign_bit)
6014 	    low |= sign_extend;
6015 
6016 	  /* Pick the higher word, shifted to the least significant
6017 	     bits, and sign-extend it.  */
6018 	  high = INTVAL (value);
6019 	  high >>= bits_per_word - 1;
6020 	  high >>= 1;
6021 	  high &= mask;
6022 	  if (high & sign_bit)
6023 	    high |= sign_extend;
6024 
6025 	  /* Store the words in the target machine order.  */
6026 	  if (WORDS_BIG_ENDIAN)
6027 	    {
6028 	      *first = GEN_INT (high);
6029 	      *second = GEN_INT (low);
6030 	    }
6031 	  else
6032 	    {
6033 	      *first = GEN_INT (low);
6034 	      *second = GEN_INT (high);
6035 	    }
6036 	}
6037       else
6038 	{
6039 	  /* The rule for using CONST_INT for a wider mode
6040 	     is that we regard the value as signed.
6041 	     So sign-extend it.  */
6042 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6043 	  if (WORDS_BIG_ENDIAN)
6044 	    {
6045 	      *first = high;
6046 	      *second = value;
6047 	    }
6048 	  else
6049 	    {
6050 	      *first = value;
6051 	      *second = high;
6052 	    }
6053 	}
6054     }
6055   else if (GET_CODE (value) == CONST_WIDE_INT)
6056     {
6057       /* All of this is scary code and needs to be converted to
6058 	 properly work with any size integer.  */
6059       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6060       if (WORDS_BIG_ENDIAN)
6061 	{
6062 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6063 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6064 	}
6065       else
6066 	{
6067 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6068 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6069 	}
6070     }
6071   else if (!CONST_DOUBLE_P (value))
6072     {
6073       if (WORDS_BIG_ENDIAN)
6074 	{
6075 	  *first = const0_rtx;
6076 	  *second = value;
6077 	}
6078       else
6079 	{
6080 	  *first = value;
6081 	  *second = const0_rtx;
6082 	}
6083     }
6084   else if (GET_MODE (value) == VOIDmode
6085 	   /* This is the old way we did CONST_DOUBLE integers.  */
6086 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6087     {
6088       /* In an integer, the words are defined as most and least significant.
6089 	 So order them by the target's convention.  */
6090       if (WORDS_BIG_ENDIAN)
6091 	{
6092 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6093 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6094 	}
6095       else
6096 	{
6097 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6098 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6099 	}
6100     }
6101   else
6102     {
6103       long l[2];
6104 
6105       /* Note, this converts the REAL_VALUE_TYPE to the target's
6106 	 format, splits up the floating point double and outputs
6107 	 exactly 32 bits of it into each of l[0] and l[1] --
6108 	 not necessarily BITS_PER_WORD bits.  */
6109       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6110 
6111       /* If 32 bits is an entire word for the target, but not for the host,
6112 	 then sign-extend on the host so that the number will look the same
6113 	 way on the host that it would on the target.  See for instance
6114 	 simplify_unary_operation.  The #if is needed to avoid compiler
6115 	 warnings.  */
6116 
6117 #if HOST_BITS_PER_LONG > 32
6118       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6119 	{
6120 	  if (l[0] & ((long) 1 << 31))
6121 	    l[0] |= ((unsigned long) (-1) << 32);
6122 	  if (l[1] & ((long) 1 << 31))
6123 	    l[1] |= ((unsigned long) (-1) << 32);
6124 	}
6125 #endif
6126 
6127       *first = GEN_INT (l[0]);
6128       *second = GEN_INT (l[1]);
6129     }
6130 }
6131 
6132 /* Return true if X is a sign_extract or zero_extract from the least
6133    significant bit.  */
6134 
6135 static bool
lsb_bitfield_op_p(rtx x)6136 lsb_bitfield_op_p (rtx x)
6137 {
6138   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6139     {
6140       machine_mode mode = GET_MODE (XEXP (x, 0));
6141       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6142       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6143       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6144 
6145       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6146     }
6147   return false;
6148 }
6149 
6150 /* Strip outer address "mutations" from LOC and return a pointer to the
6151    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6152    stripped expression there.
6153 
6154    "Mutations" either convert between modes or apply some kind of
6155    extension, truncation or alignment.  */
6156 
6157 rtx *
strip_address_mutations(rtx * loc,enum rtx_code * outer_code)6158 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6159 {
6160   for (;;)
6161     {
6162       enum rtx_code code = GET_CODE (*loc);
6163       if (GET_RTX_CLASS (code) == RTX_UNARY)
6164 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6165 	   used to convert between pointer sizes.  */
6166 	loc = &XEXP (*loc, 0);
6167       else if (lsb_bitfield_op_p (*loc))
6168 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6169 	   acts as a combined truncation and extension.  */
6170 	loc = &XEXP (*loc, 0);
6171       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6172 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6173 	loc = &XEXP (*loc, 0);
6174       else if (code == SUBREG
6175                && !OBJECT_P (SUBREG_REG (*loc))
6176                && subreg_lowpart_p (*loc))
6177 	/* (subreg (operator ...) ...) inside and is used for mode
6178 	   conversion too.  */
6179 	loc = &SUBREG_REG (*loc);
6180       else
6181 	return loc;
6182       if (outer_code)
6183 	*outer_code = code;
6184     }
6185 }
6186 
6187 /* Return true if CODE applies some kind of scale.  The scaled value is
6188    is the first operand and the scale is the second.  */
6189 
6190 static bool
binary_scale_code_p(enum rtx_code code)6191 binary_scale_code_p (enum rtx_code code)
6192 {
6193   return (code == MULT
6194           || code == ASHIFT
6195           /* Needed by ARM targets.  */
6196           || code == ASHIFTRT
6197           || code == LSHIFTRT
6198           || code == ROTATE
6199           || code == ROTATERT);
6200 }
6201 
6202 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6203    (see address_info).  Return null otherwise.  */
6204 
6205 static rtx *
get_base_term(rtx * inner)6206 get_base_term (rtx *inner)
6207 {
6208   if (GET_CODE (*inner) == LO_SUM)
6209     inner = strip_address_mutations (&XEXP (*inner, 0));
6210   if (REG_P (*inner)
6211       || MEM_P (*inner)
6212       || GET_CODE (*inner) == SUBREG
6213       || GET_CODE (*inner) == SCRATCH)
6214     return inner;
6215   return 0;
6216 }
6217 
6218 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6219    (see address_info).  Return null otherwise.  */
6220 
6221 static rtx *
get_index_term(rtx * inner)6222 get_index_term (rtx *inner)
6223 {
6224   /* At present, only constant scales are allowed.  */
6225   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6226     inner = strip_address_mutations (&XEXP (*inner, 0));
6227   if (REG_P (*inner)
6228       || MEM_P (*inner)
6229       || GET_CODE (*inner) == SUBREG
6230       || GET_CODE (*inner) == SCRATCH)
6231     return inner;
6232   return 0;
6233 }
6234 
6235 /* Set the segment part of address INFO to LOC, given that INNER is the
6236    unmutated value.  */
6237 
6238 static void
set_address_segment(struct address_info * info,rtx * loc,rtx * inner)6239 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6240 {
6241   gcc_assert (!info->segment);
6242   info->segment = loc;
6243   info->segment_term = inner;
6244 }
6245 
6246 /* Set the base part of address INFO to LOC, given that INNER is the
6247    unmutated value.  */
6248 
6249 static void
set_address_base(struct address_info * info,rtx * loc,rtx * inner)6250 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6251 {
6252   gcc_assert (!info->base);
6253   info->base = loc;
6254   info->base_term = inner;
6255 }
6256 
6257 /* Set the index part of address INFO to LOC, given that INNER is the
6258    unmutated value.  */
6259 
6260 static void
set_address_index(struct address_info * info,rtx * loc,rtx * inner)6261 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6262 {
6263   gcc_assert (!info->index);
6264   info->index = loc;
6265   info->index_term = inner;
6266 }
6267 
6268 /* Set the displacement part of address INFO to LOC, given that INNER
6269    is the constant term.  */
6270 
6271 static void
set_address_disp(struct address_info * info,rtx * loc,rtx * inner)6272 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6273 {
6274   gcc_assert (!info->disp);
6275   info->disp = loc;
6276   info->disp_term = inner;
6277 }
6278 
6279 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6280    rest of INFO accordingly.  */
6281 
6282 static void
decompose_incdec_address(struct address_info * info)6283 decompose_incdec_address (struct address_info *info)
6284 {
6285   info->autoinc_p = true;
6286 
6287   rtx *base = &XEXP (*info->inner, 0);
6288   set_address_base (info, base, base);
6289   gcc_checking_assert (info->base == info->base_term);
6290 
6291   /* These addresses are only valid when the size of the addressed
6292      value is known.  */
6293   gcc_checking_assert (info->mode != VOIDmode);
6294 }
6295 
6296 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6297    of INFO accordingly.  */
6298 
6299 static void
decompose_automod_address(struct address_info * info)6300 decompose_automod_address (struct address_info *info)
6301 {
6302   info->autoinc_p = true;
6303 
6304   rtx *base = &XEXP (*info->inner, 0);
6305   set_address_base (info, base, base);
6306   gcc_checking_assert (info->base == info->base_term);
6307 
6308   rtx plus = XEXP (*info->inner, 1);
6309   gcc_assert (GET_CODE (plus) == PLUS);
6310 
6311   info->base_term2 = &XEXP (plus, 0);
6312   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6313 
6314   rtx *step = &XEXP (plus, 1);
6315   rtx *inner_step = strip_address_mutations (step);
6316   if (CONSTANT_P (*inner_step))
6317     set_address_disp (info, step, inner_step);
6318   else
6319     set_address_index (info, step, inner_step);
6320 }
6321 
6322 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6323    values in [PTR, END).  Return a pointer to the end of the used array.  */
6324 
6325 static rtx **
extract_plus_operands(rtx * loc,rtx ** ptr,rtx ** end)6326 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6327 {
6328   rtx x = *loc;
6329   if (GET_CODE (x) == PLUS)
6330     {
6331       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6332       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6333     }
6334   else
6335     {
6336       gcc_assert (ptr != end);
6337       *ptr++ = loc;
6338     }
6339   return ptr;
6340 }
6341 
6342 /* Evaluate the likelihood of X being a base or index value, returning
6343    positive if it is likely to be a base, negative if it is likely to be
6344    an index, and 0 if we can't tell.  Make the magnitude of the return
6345    value reflect the amount of confidence we have in the answer.
6346 
6347    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6348 
6349 static int
baseness(rtx x,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)6350 baseness (rtx x, machine_mode mode, addr_space_t as,
6351 	  enum rtx_code outer_code, enum rtx_code index_code)
6352 {
6353   /* Believe *_POINTER unless the address shape requires otherwise.  */
6354   if (REG_P (x) && REG_POINTER (x))
6355     return 2;
6356   if (MEM_P (x) && MEM_POINTER (x))
6357     return 2;
6358 
6359   if (REG_P (x) && HARD_REGISTER_P (x))
6360     {
6361       /* X is a hard register.  If it only fits one of the base
6362 	 or index classes, choose that interpretation.  */
6363       int regno = REGNO (x);
6364       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6365       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6366       if (base_p != index_p)
6367 	return base_p ? 1 : -1;
6368     }
6369   return 0;
6370 }
6371 
6372 /* INFO->INNER describes a normal, non-automodified address.
6373    Fill in the rest of INFO accordingly.  */
6374 
6375 static void
decompose_normal_address(struct address_info * info)6376 decompose_normal_address (struct address_info *info)
6377 {
6378   /* Treat the address as the sum of up to four values.  */
6379   rtx *ops[4];
6380   size_t n_ops = extract_plus_operands (info->inner, ops,
6381 					ops + ARRAY_SIZE (ops)) - ops;
6382 
6383   /* If there is more than one component, any base component is in a PLUS.  */
6384   if (n_ops > 1)
6385     info->base_outer_code = PLUS;
6386 
6387   /* Try to classify each sum operand now.  Leave those that could be
6388      either a base or an index in OPS.  */
6389   rtx *inner_ops[4];
6390   size_t out = 0;
6391   for (size_t in = 0; in < n_ops; ++in)
6392     {
6393       rtx *loc = ops[in];
6394       rtx *inner = strip_address_mutations (loc);
6395       if (CONSTANT_P (*inner))
6396 	set_address_disp (info, loc, inner);
6397       else if (GET_CODE (*inner) == UNSPEC)
6398 	set_address_segment (info, loc, inner);
6399       else
6400 	{
6401 	  /* The only other possibilities are a base or an index.  */
6402 	  rtx *base_term = get_base_term (inner);
6403 	  rtx *index_term = get_index_term (inner);
6404 	  gcc_assert (base_term || index_term);
6405 	  if (!base_term)
6406 	    set_address_index (info, loc, index_term);
6407 	  else if (!index_term)
6408 	    set_address_base (info, loc, base_term);
6409 	  else
6410 	    {
6411 	      gcc_assert (base_term == index_term);
6412 	      ops[out] = loc;
6413 	      inner_ops[out] = base_term;
6414 	      ++out;
6415 	    }
6416 	}
6417     }
6418 
6419   /* Classify the remaining OPS members as bases and indexes.  */
6420   if (out == 1)
6421     {
6422       /* If we haven't seen a base or an index yet, assume that this is
6423 	 the base.  If we were confident that another term was the base
6424 	 or index, treat the remaining operand as the other kind.  */
6425       if (!info->base)
6426 	set_address_base (info, ops[0], inner_ops[0]);
6427       else
6428 	set_address_index (info, ops[0], inner_ops[0]);
6429     }
6430   else if (out == 2)
6431     {
6432       /* In the event of a tie, assume the base comes first.  */
6433       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6434 		    GET_CODE (*ops[1]))
6435 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6436 		       GET_CODE (*ops[0])))
6437 	{
6438 	  set_address_base (info, ops[0], inner_ops[0]);
6439 	  set_address_index (info, ops[1], inner_ops[1]);
6440 	}
6441       else
6442 	{
6443 	  set_address_base (info, ops[1], inner_ops[1]);
6444 	  set_address_index (info, ops[0], inner_ops[0]);
6445 	}
6446     }
6447   else
6448     gcc_assert (out == 0);
6449 }
6450 
6451 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6452    or VOIDmode if not known.  AS is the address space associated with LOC.
6453    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6454 
6455 void
decompose_address(struct address_info * info,rtx * loc,machine_mode mode,addr_space_t as,enum rtx_code outer_code)6456 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6457 		   addr_space_t as, enum rtx_code outer_code)
6458 {
6459   memset (info, 0, sizeof (*info));
6460   info->mode = mode;
6461   info->as = as;
6462   info->addr_outer_code = outer_code;
6463   info->outer = loc;
6464   info->inner = strip_address_mutations (loc, &outer_code);
6465   info->base_outer_code = outer_code;
6466   switch (GET_CODE (*info->inner))
6467     {
6468     case PRE_DEC:
6469     case PRE_INC:
6470     case POST_DEC:
6471     case POST_INC:
6472       decompose_incdec_address (info);
6473       break;
6474 
6475     case PRE_MODIFY:
6476     case POST_MODIFY:
6477       decompose_automod_address (info);
6478       break;
6479 
6480     default:
6481       decompose_normal_address (info);
6482       break;
6483     }
6484 }
6485 
6486 /* Describe address operand LOC in INFO.  */
6487 
6488 void
decompose_lea_address(struct address_info * info,rtx * loc)6489 decompose_lea_address (struct address_info *info, rtx *loc)
6490 {
6491   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6492 }
6493 
6494 /* Describe the address of MEM X in INFO.  */
6495 
6496 void
decompose_mem_address(struct address_info * info,rtx x)6497 decompose_mem_address (struct address_info *info, rtx x)
6498 {
6499   gcc_assert (MEM_P (x));
6500   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6501 		     MEM_ADDR_SPACE (x), MEM);
6502 }
6503 
6504 /* Update INFO after a change to the address it describes.  */
6505 
6506 void
update_address(struct address_info * info)6507 update_address (struct address_info *info)
6508 {
6509   decompose_address (info, info->outer, info->mode, info->as,
6510 		     info->addr_outer_code);
6511 }
6512 
6513 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6514    more complicated than that.  */
6515 
6516 HOST_WIDE_INT
get_index_scale(const struct address_info * info)6517 get_index_scale (const struct address_info *info)
6518 {
6519   rtx index = *info->index;
6520   if (GET_CODE (index) == MULT
6521       && CONST_INT_P (XEXP (index, 1))
6522       && info->index_term == &XEXP (index, 0))
6523     return INTVAL (XEXP (index, 1));
6524 
6525   if (GET_CODE (index) == ASHIFT
6526       && CONST_INT_P (XEXP (index, 1))
6527       && info->index_term == &XEXP (index, 0))
6528     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6529 
6530   if (info->index == info->index_term)
6531     return 1;
6532 
6533   return 0;
6534 }
6535 
6536 /* Return the "index code" of INFO, in the form required by
6537    ok_for_base_p_1.  */
6538 
6539 enum rtx_code
get_index_code(const struct address_info * info)6540 get_index_code (const struct address_info *info)
6541 {
6542   if (info->index)
6543     return GET_CODE (*info->index);
6544 
6545   if (info->disp)
6546     return GET_CODE (*info->disp);
6547 
6548   return SCRATCH;
6549 }
6550 
6551 /* Return true if RTL X contains a SYMBOL_REF.  */
6552 
6553 bool
contains_symbol_ref_p(const_rtx x)6554 contains_symbol_ref_p (const_rtx x)
6555 {
6556   subrtx_iterator::array_type array;
6557   FOR_EACH_SUBRTX (iter, array, x, ALL)
6558     if (SYMBOL_REF_P (*iter))
6559       return true;
6560 
6561   return false;
6562 }
6563 
6564 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6565 
6566 bool
contains_symbolic_reference_p(const_rtx x)6567 contains_symbolic_reference_p (const_rtx x)
6568 {
6569   subrtx_iterator::array_type array;
6570   FOR_EACH_SUBRTX (iter, array, x, ALL)
6571     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6572       return true;
6573 
6574   return false;
6575 }
6576 
6577 /* Return true if RTL X contains a constant pool address.  */
6578 
6579 bool
contains_constant_pool_address_p(const_rtx x)6580 contains_constant_pool_address_p (const_rtx x)
6581 {
6582   subrtx_iterator::array_type array;
6583   FOR_EACH_SUBRTX (iter, array, x, ALL)
6584     if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6585       return true;
6586 
6587   return false;
6588 }
6589 
6590 
6591 /* Return true if X contains a thread-local symbol.  */
6592 
6593 bool
tls_referenced_p(const_rtx x)6594 tls_referenced_p (const_rtx x)
6595 {
6596   if (!targetm.have_tls)
6597     return false;
6598 
6599   subrtx_iterator::array_type array;
6600   FOR_EACH_SUBRTX (iter, array, x, ALL)
6601     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6602       return true;
6603   return false;
6604 }
6605