xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/sched-deps.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5    Free Software Foundation, Inc.
6    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7    and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 
9 This file is part of GCC.
10 
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15 
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19 for more details.
20 
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3.  If not see
23 <http://www.gnu.org/licenses/>.  */
24 
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "sched-int.h"
42 #include "params.h"
43 #include "cselib.h"
44 #include "ira.h"
45 #include "target.h"
46 
47 #ifdef INSN_SCHEDULING
48 
49 #ifdef ENABLE_CHECKING
50 #define CHECK (true)
51 #else
52 #define CHECK (false)
53 #endif
54 
55 /* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
56    added to the list because of flush_pending_lists, stands just
57    for itself and not for any other pending memory reads/writes.  */
58 #define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
59 #define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
60 
61 /* Holds current parameters for the dependency analyzer.  */
62 struct sched_deps_info_def *sched_deps_info;
63 
64 /* The data is specific to the Haifa scheduler.  */
65 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
66 
67 /* Return the major type present in the DS.  */
68 enum reg_note
69 ds_to_dk (ds_t ds)
70 {
71   if (ds & DEP_TRUE)
72     return REG_DEP_TRUE;
73 
74   if (ds & DEP_OUTPUT)
75     return REG_DEP_OUTPUT;
76 
77   gcc_assert (ds & DEP_ANTI);
78 
79   return REG_DEP_ANTI;
80 }
81 
82 /* Return equivalent dep_status.  */
83 ds_t
84 dk_to_ds (enum reg_note dk)
85 {
86   switch (dk)
87     {
88     case REG_DEP_TRUE:
89       return DEP_TRUE;
90 
91     case REG_DEP_OUTPUT:
92       return DEP_OUTPUT;
93 
94     default:
95       gcc_assert (dk == REG_DEP_ANTI);
96       return DEP_ANTI;
97     }
98 }
99 
100 /* Functions to operate with dependence information container - dep_t.  */
101 
102 /* Init DEP with the arguments.  */
103 void
104 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
105 {
106   DEP_PRO (dep) = pro;
107   DEP_CON (dep) = con;
108   DEP_TYPE (dep) = type;
109   DEP_STATUS (dep) = ds;
110 }
111 
112 /* Init DEP with the arguments.
113    While most of the scheduler (including targets) only need the major type
114    of the dependency, it is convenient to hide full dep_status from them.  */
115 void
116 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
117 {
118   ds_t ds;
119 
120   if ((current_sched_info->flags & USE_DEPS_LIST))
121     ds = dk_to_ds (kind);
122   else
123     ds = -1;
124 
125   init_dep_1 (dep, pro, con, kind, ds);
126 }
127 
128 /* Make a copy of FROM in TO.  */
129 static void
130 copy_dep (dep_t to, dep_t from)
131 {
132   memcpy (to, from, sizeof (*to));
133 }
134 
135 static void dump_ds (FILE *, ds_t);
136 
137 /* Define flags for dump_dep ().  */
138 
139 /* Dump producer of the dependence.  */
140 #define DUMP_DEP_PRO (2)
141 
142 /* Dump consumer of the dependence.  */
143 #define DUMP_DEP_CON (4)
144 
145 /* Dump type of the dependence.  */
146 #define DUMP_DEP_TYPE (8)
147 
148 /* Dump status of the dependence.  */
149 #define DUMP_DEP_STATUS (16)
150 
151 /* Dump all information about the dependence.  */
152 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
153 		      |DUMP_DEP_STATUS)
154 
155 /* Dump DEP to DUMP.
156    FLAGS is a bit mask specifying what information about DEP needs
157    to be printed.
158    If FLAGS has the very first bit set, then dump all information about DEP
159    and propagate this bit into the callee dump functions.  */
160 static void
161 dump_dep (FILE *dump, dep_t dep, int flags)
162 {
163   if (flags & 1)
164     flags |= DUMP_DEP_ALL;
165 
166   fprintf (dump, "<");
167 
168   if (flags & DUMP_DEP_PRO)
169     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
170 
171   if (flags & DUMP_DEP_CON)
172     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
173 
174   if (flags & DUMP_DEP_TYPE)
175     {
176       char t;
177       enum reg_note type = DEP_TYPE (dep);
178 
179       switch (type)
180 	{
181 	case REG_DEP_TRUE:
182 	  t = 't';
183 	  break;
184 
185 	case REG_DEP_OUTPUT:
186 	  t = 'o';
187 	  break;
188 
189 	case REG_DEP_ANTI:
190 	  t = 'a';
191 	  break;
192 
193 	default:
194 	  gcc_unreachable ();
195 	  break;
196 	}
197 
198       fprintf (dump, "%c; ", t);
199     }
200 
201   if (flags & DUMP_DEP_STATUS)
202     {
203       if (current_sched_info->flags & USE_DEPS_LIST)
204 	dump_ds (dump, DEP_STATUS (dep));
205     }
206 
207   fprintf (dump, ">");
208 }
209 
210 /* Default flags for dump_dep ().  */
211 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
212 
213 /* Dump all fields of DEP to STDERR.  */
214 void
215 sd_debug_dep (dep_t dep)
216 {
217   dump_dep (stderr, dep, 1);
218   fprintf (stderr, "\n");
219 }
220 
221 /* Determine whether DEP is a dependency link of a non-debug insn on a
222    debug insn.  */
223 
224 static inline bool
225 depl_on_debug_p (dep_link_t dep)
226 {
227   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
228 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
229 }
230 
231 /* Functions to operate with a single link from the dependencies lists -
232    dep_link_t.  */
233 
234 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
235    PREV_NEXT_P.  */
236 static void
237 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
238 {
239   dep_link_t next = *prev_nextp;
240 
241   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
242 	      && DEP_LINK_NEXT (l) == NULL);
243 
244   /* Init node being inserted.  */
245   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
246   DEP_LINK_NEXT (l) = next;
247 
248   /* Fix next node.  */
249   if (next != NULL)
250     {
251       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
252 
253       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
254     }
255 
256   /* Fix prev node.  */
257   *prev_nextp = l;
258 }
259 
260 /* Add dep_link LINK to deps_list L.  */
261 static void
262 add_to_deps_list (dep_link_t link, deps_list_t l)
263 {
264   attach_dep_link (link, &DEPS_LIST_FIRST (l));
265 
266   /* Don't count debug deps.  */
267   if (!depl_on_debug_p (link))
268     ++DEPS_LIST_N_LINKS (l);
269 }
270 
271 /* Detach dep_link L from the list.  */
272 static void
273 detach_dep_link (dep_link_t l)
274 {
275   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
276   dep_link_t next = DEP_LINK_NEXT (l);
277 
278   *prev_nextp = next;
279 
280   if (next != NULL)
281     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
282 
283   DEP_LINK_PREV_NEXTP (l) = NULL;
284   DEP_LINK_NEXT (l) = NULL;
285 }
286 
287 /* Remove link LINK from list LIST.  */
288 static void
289 remove_from_deps_list (dep_link_t link, deps_list_t list)
290 {
291   detach_dep_link (link);
292 
293   /* Don't count debug deps.  */
294   if (!depl_on_debug_p (link))
295     --DEPS_LIST_N_LINKS (list);
296 }
297 
298 /* Move link LINK from list FROM to list TO.  */
299 static void
300 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
301 {
302   remove_from_deps_list (link, from);
303   add_to_deps_list (link, to);
304 }
305 
306 /* Return true of LINK is not attached to any list.  */
307 static bool
308 dep_link_is_detached_p (dep_link_t link)
309 {
310   return DEP_LINK_PREV_NEXTP (link) == NULL;
311 }
312 
313 /* Pool to hold all dependency nodes (dep_node_t).  */
314 static alloc_pool dn_pool;
315 
316 /* Number of dep_nodes out there.  */
317 static int dn_pool_diff = 0;
318 
319 /* Create a dep_node.  */
320 static dep_node_t
321 create_dep_node (void)
322 {
323   dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
324   dep_link_t back = DEP_NODE_BACK (n);
325   dep_link_t forw = DEP_NODE_FORW (n);
326 
327   DEP_LINK_NODE (back) = n;
328   DEP_LINK_NEXT (back) = NULL;
329   DEP_LINK_PREV_NEXTP (back) = NULL;
330 
331   DEP_LINK_NODE (forw) = n;
332   DEP_LINK_NEXT (forw) = NULL;
333   DEP_LINK_PREV_NEXTP (forw) = NULL;
334 
335   ++dn_pool_diff;
336 
337   return n;
338 }
339 
340 /* Delete dep_node N.  N must not be connected to any deps_list.  */
341 static void
342 delete_dep_node (dep_node_t n)
343 {
344   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
345 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
346 
347   --dn_pool_diff;
348 
349   pool_free (dn_pool, n);
350 }
351 
352 /* Pool to hold dependencies lists (deps_list_t).  */
353 static alloc_pool dl_pool;
354 
355 /* Number of deps_lists out there.  */
356 static int dl_pool_diff = 0;
357 
358 /* Functions to operate with dependences lists - deps_list_t.  */
359 
360 /* Return true if list L is empty.  */
361 static bool
362 deps_list_empty_p (deps_list_t l)
363 {
364   return DEPS_LIST_N_LINKS (l) == 0;
365 }
366 
367 /* Create a new deps_list.  */
368 static deps_list_t
369 create_deps_list (void)
370 {
371   deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
372 
373   DEPS_LIST_FIRST (l) = NULL;
374   DEPS_LIST_N_LINKS (l) = 0;
375 
376   ++dl_pool_diff;
377   return l;
378 }
379 
380 /* Free deps_list L.  */
381 static void
382 free_deps_list (deps_list_t l)
383 {
384   gcc_assert (deps_list_empty_p (l));
385 
386   --dl_pool_diff;
387 
388   pool_free (dl_pool, l);
389 }
390 
391 /* Return true if there is no dep_nodes and deps_lists out there.
392    After the region is scheduled all the dependency nodes and lists
393    should [generally] be returned to pool.  */
394 bool
395 deps_pools_are_empty_p (void)
396 {
397   return dn_pool_diff == 0 && dl_pool_diff == 0;
398 }
399 
400 /* Remove all elements from L.  */
401 static void
402 clear_deps_list (deps_list_t l)
403 {
404   do
405     {
406       dep_link_t link = DEPS_LIST_FIRST (l);
407 
408       if (link == NULL)
409 	break;
410 
411       remove_from_deps_list (link, l);
412     }
413   while (1);
414 }
415 
416 static regset reg_pending_sets;
417 static regset reg_pending_clobbers;
418 static regset reg_pending_uses;
419 static enum reg_pending_barrier_mode reg_pending_barrier;
420 
421 /* Hard registers implicitly clobbered or used (or may be implicitly
422    clobbered or used) by the currently analyzed insn.  For example,
423    insn in its constraint has one register class.  Even if there is
424    currently no hard register in the insn, the particular hard
425    register will be in the insn after reload pass because the
426    constraint requires it.  */
427 static HARD_REG_SET implicit_reg_pending_clobbers;
428 static HARD_REG_SET implicit_reg_pending_uses;
429 
430 /* To speed up the test for duplicate dependency links we keep a
431    record of dependencies created by add_dependence when the average
432    number of instructions in a basic block is very large.
433 
434    Studies have shown that there is typically around 5 instructions between
435    branches for typical C code.  So we can make a guess that the average
436    basic block is approximately 5 instructions long; we will choose 100X
437    the average size as a very large basic block.
438 
439    Each insn has associated bitmaps for its dependencies.  Each bitmap
440    has enough entries to represent a dependency on any other insn in
441    the insn chain.  All bitmap for true dependencies cache is
442    allocated then the rest two ones are also allocated.  */
443 static bitmap_head *true_dependency_cache = NULL;
444 static bitmap_head *output_dependency_cache = NULL;
445 static bitmap_head *anti_dependency_cache = NULL;
446 static bitmap_head *spec_dependency_cache = NULL;
447 static int cache_size;
448 
449 static int deps_may_trap_p (const_rtx);
450 static void add_dependence_list (rtx, rtx, int, enum reg_note);
451 static void add_dependence_list_and_free (struct deps_desc *, rtx,
452 					  rtx *, int, enum reg_note);
453 static void delete_all_dependences (rtx);
454 static void fixup_sched_groups (rtx);
455 
456 static void flush_pending_lists (struct deps_desc *, rtx, int, int);
457 static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
458 static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
459 static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
460 
461 static bool sched_has_condition_p (const_rtx);
462 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
463 
464 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
465 							  rtx, rtx);
466 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
467 
468 #ifdef ENABLE_CHECKING
469 static void check_dep (dep_t, bool);
470 #endif
471 
472 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
473 
474 static int
475 deps_may_trap_p (const_rtx mem)
476 {
477   const_rtx addr = XEXP (mem, 0);
478 
479   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
480     {
481       const_rtx t = get_reg_known_value (REGNO (addr));
482       if (t)
483 	addr = t;
484     }
485   return rtx_addr_can_trap_p (addr);
486 }
487 
488 
489 /* Find the condition under which INSN is executed.  If REV is not NULL,
490    it is set to TRUE when the returned comparison should be reversed
491    to get the actual condition.  */
492 static rtx
493 sched_get_condition_with_rev (const_rtx insn, bool *rev)
494 {
495   rtx pat = PATTERN (insn);
496   rtx src;
497 
498   if (pat == 0)
499     return 0;
500 
501   if (rev)
502     *rev = false;
503 
504   if (GET_CODE (pat) == COND_EXEC)
505     return COND_EXEC_TEST (pat);
506 
507   if (!any_condjump_p (insn) || !onlyjump_p (insn))
508     return 0;
509 
510   src = SET_SRC (pc_set (insn));
511 
512   if (XEXP (src, 2) == pc_rtx)
513     return XEXP (src, 0);
514   else if (XEXP (src, 1) == pc_rtx)
515     {
516       rtx cond = XEXP (src, 0);
517       enum rtx_code revcode = reversed_comparison_code (cond, insn);
518 
519       if (revcode == UNKNOWN)
520 	return 0;
521 
522       if (rev)
523 	*rev = true;
524       return cond;
525     }
526 
527   return 0;
528 }
529 
530 /* True when we can find a condition under which INSN is executed.  */
531 static bool
532 sched_has_condition_p (const_rtx insn)
533 {
534   return !! sched_get_condition_with_rev (insn, NULL);
535 }
536 
537 
538 
539 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
540 static int
541 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
542 {
543   if (COMPARISON_P (cond1)
544       && COMPARISON_P (cond2)
545       && GET_CODE (cond1) ==
546 	  (rev1==rev2
547 	  ? reversed_comparison_code (cond2, NULL)
548 	  : GET_CODE (cond2))
549       && XEXP (cond1, 0) == XEXP (cond2, 0)
550       && XEXP (cond1, 1) == XEXP (cond2, 1))
551     return 1;
552   return 0;
553 }
554 
555 /* Return true if insn1 and insn2 can never depend on one another because
556    the conditions under which they are executed are mutually exclusive.  */
557 bool
558 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
559 {
560   rtx cond1, cond2;
561   bool rev1 = false, rev2 = false;
562 
563   /* df doesn't handle conditional lifetimes entirely correctly;
564      calls mess up the conditional lifetimes.  */
565   if (!CALL_P (insn1) && !CALL_P (insn2))
566     {
567       cond1 = sched_get_condition_with_rev (insn1, &rev1);
568       cond2 = sched_get_condition_with_rev (insn2, &rev2);
569       if (cond1 && cond2
570 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
571 	  /* Make sure first instruction doesn't affect condition of second
572 	     instruction if switched.  */
573 	  && !modified_in_p (cond1, insn2)
574 	  /* Make sure second instruction doesn't affect condition of first
575 	     instruction if switched.  */
576 	  && !modified_in_p (cond2, insn1))
577 	return true;
578     }
579   return false;
580 }
581 
582 
583 /* Return true if INSN can potentially be speculated with type DS.  */
584 bool
585 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
586 {
587   if (HAS_INTERNAL_DEP (insn))
588     return false;
589 
590   if (!NONJUMP_INSN_P (insn))
591     return false;
592 
593   if (SCHED_GROUP_P (insn))
594     return false;
595 
596   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
597     return false;
598 
599   if (side_effects_p (PATTERN (insn)))
600     return false;
601 
602   if (ds & BE_IN_SPEC)
603     /* The following instructions, which depend on a speculatively scheduled
604        instruction, cannot be speculatively scheduled along.  */
605     {
606       if (may_trap_p (PATTERN (insn)))
607 	/* If instruction might trap, it cannot be speculatively scheduled.
608 	   For control speculation it's obvious why and for data speculation
609 	   it's because the insn might get wrong input if speculation
610 	   wasn't successful.  */
611 	return false;
612 
613       if ((ds & BE_IN_DATA)
614 	  && sched_has_condition_p (insn))
615 	/* If this is a predicated instruction, then it cannot be
616 	   speculatively scheduled.  See PR35659.  */
617 	return false;
618     }
619 
620   return true;
621 }
622 
623 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
624    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
625    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
626    This function is used to switch sd_iterator to the next list.
627    !!! For internal use only.  Might consider moving it to sched-int.h.  */
628 void
629 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
630 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
631 {
632   sd_list_types_def types = *types_ptr;
633 
634   if (types & SD_LIST_HARD_BACK)
635     {
636       *list_ptr = INSN_HARD_BACK_DEPS (insn);
637       *resolved_p_ptr = false;
638       *types_ptr = types & ~SD_LIST_HARD_BACK;
639     }
640   else if (types & SD_LIST_SPEC_BACK)
641     {
642       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
643       *resolved_p_ptr = false;
644       *types_ptr = types & ~SD_LIST_SPEC_BACK;
645     }
646   else if (types & SD_LIST_FORW)
647     {
648       *list_ptr = INSN_FORW_DEPS (insn);
649       *resolved_p_ptr = false;
650       *types_ptr = types & ~SD_LIST_FORW;
651     }
652   else if (types & SD_LIST_RES_BACK)
653     {
654       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
655       *resolved_p_ptr = true;
656       *types_ptr = types & ~SD_LIST_RES_BACK;
657     }
658   else if (types & SD_LIST_RES_FORW)
659     {
660       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
661       *resolved_p_ptr = true;
662       *types_ptr = types & ~SD_LIST_RES_FORW;
663     }
664   else
665     {
666       *list_ptr = NULL;
667       *resolved_p_ptr = false;
668       *types_ptr = SD_LIST_NONE;
669     }
670 }
671 
672 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
673 int
674 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
675 {
676   int size = 0;
677 
678   while (list_types != SD_LIST_NONE)
679     {
680       deps_list_t list;
681       bool resolved_p;
682 
683       sd_next_list (insn, &list_types, &list, &resolved_p);
684       if (list)
685 	size += DEPS_LIST_N_LINKS (list);
686     }
687 
688   return size;
689 }
690 
691 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
692 
693 bool
694 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
695 {
696   while (list_types != SD_LIST_NONE)
697     {
698       deps_list_t list;
699       bool resolved_p;
700 
701       sd_next_list (insn, &list_types, &list, &resolved_p);
702       if (!deps_list_empty_p (list))
703 	return false;
704     }
705 
706   return true;
707 }
708 
709 /* Initialize data for INSN.  */
710 void
711 sd_init_insn (rtx insn)
712 {
713   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
714   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
715   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
716   INSN_FORW_DEPS (insn) = create_deps_list ();
717   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
718 
719   if (DEBUG_INSN_P (insn))
720     DEBUG_INSN_SCHED_P (insn) = TRUE;
721 
722   /* ??? It would be nice to allocate dependency caches here.  */
723 }
724 
725 /* Free data for INSN.  */
726 void
727 sd_finish_insn (rtx insn)
728 {
729   /* ??? It would be nice to deallocate dependency caches here.  */
730 
731   if (DEBUG_INSN_P (insn))
732     {
733       gcc_assert (DEBUG_INSN_SCHED_P (insn));
734       DEBUG_INSN_SCHED_P (insn) = FALSE;
735     }
736 
737   free_deps_list (INSN_HARD_BACK_DEPS (insn));
738   INSN_HARD_BACK_DEPS (insn) = NULL;
739 
740   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
741   INSN_SPEC_BACK_DEPS (insn) = NULL;
742 
743   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
744   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
745 
746   free_deps_list (INSN_FORW_DEPS (insn));
747   INSN_FORW_DEPS (insn) = NULL;
748 
749   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
750   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
751 }
752 
753 /* Find a dependency between producer PRO and consumer CON.
754    Search through resolved dependency lists if RESOLVED_P is true.
755    If no such dependency is found return NULL,
756    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
757    with an iterator pointing to it.  */
758 static dep_t
759 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
760 			      sd_iterator_def *sd_it_ptr)
761 {
762   sd_list_types_def pro_list_type;
763   sd_list_types_def con_list_type;
764   sd_iterator_def sd_it;
765   dep_t dep;
766   bool found_p = false;
767 
768   if (resolved_p)
769     {
770       pro_list_type = SD_LIST_RES_FORW;
771       con_list_type = SD_LIST_RES_BACK;
772     }
773   else
774     {
775       pro_list_type = SD_LIST_FORW;
776       con_list_type = SD_LIST_BACK;
777     }
778 
779   /* Walk through either back list of INSN or forw list of ELEM
780      depending on which one is shorter.  */
781   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
782     {
783       /* Find the dep_link with producer PRO in consumer's back_deps.  */
784       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
785 	if (DEP_PRO (dep) == pro)
786 	  {
787 	    found_p = true;
788 	    break;
789 	  }
790     }
791   else
792     {
793       /* Find the dep_link with consumer CON in producer's forw_deps.  */
794       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
795 	if (DEP_CON (dep) == con)
796 	  {
797 	    found_p = true;
798 	    break;
799 	  }
800     }
801 
802   if (found_p)
803     {
804       if (sd_it_ptr != NULL)
805 	*sd_it_ptr = sd_it;
806 
807       return dep;
808     }
809 
810   return NULL;
811 }
812 
813 /* Find a dependency between producer PRO and consumer CON.
814    Use dependency [if available] to check if dependency is present at all.
815    Search through resolved dependency lists if RESOLVED_P is true.
816    If the dependency or NULL if none found.  */
817 dep_t
818 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
819 {
820   if (true_dependency_cache != NULL)
821     /* Avoiding the list walk below can cut compile times dramatically
822        for some code.  */
823     {
824       int elem_luid = INSN_LUID (pro);
825       int insn_luid = INSN_LUID (con);
826 
827       gcc_assert (output_dependency_cache != NULL
828 		  && anti_dependency_cache != NULL);
829 
830       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
831 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
832 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
833 	return NULL;
834     }
835 
836   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
837 }
838 
839 /* Add or update  a dependence described by DEP.
840    MEM1 and MEM2, if non-null, correspond to memory locations in case of
841    data speculation.
842 
843    The function returns a value indicating if an old entry has been changed
844    or a new entry has been added to insn's backward deps.
845 
846    This function merely checks if producer and consumer is the same insn
847    and doesn't create a dep in this case.  Actual manipulation of
848    dependence data structures is performed in add_or_update_dep_1.  */
849 static enum DEPS_ADJUST_RESULT
850 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
851 {
852   rtx elem = DEP_PRO (dep);
853   rtx insn = DEP_CON (dep);
854 
855   gcc_assert (INSN_P (insn) && INSN_P (elem));
856 
857   /* Don't depend an insn on itself.  */
858   if (insn == elem)
859     {
860       if (sched_deps_info->generate_spec_deps)
861         /* INSN has an internal dependence, which we can't overcome.  */
862         HAS_INTERNAL_DEP (insn) = 1;
863 
864       return DEP_NODEP;
865     }
866 
867   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
868 }
869 
870 /* Ask dependency caches what needs to be done for dependence DEP.
871    Return DEP_CREATED if new dependence should be created and there is no
872    need to try to find one searching the dependencies lists.
873    Return DEP_PRESENT if there already is a dependence described by DEP and
874    hence nothing is to be done.
875    Return DEP_CHANGED if there already is a dependence, but it should be
876    updated to incorporate additional information from DEP.  */
877 static enum DEPS_ADJUST_RESULT
878 ask_dependency_caches (dep_t dep)
879 {
880   int elem_luid = INSN_LUID (DEP_PRO (dep));
881   int insn_luid = INSN_LUID (DEP_CON (dep));
882 
883   gcc_assert (true_dependency_cache != NULL
884 	      && output_dependency_cache != NULL
885 	      && anti_dependency_cache != NULL);
886 
887   if (!(current_sched_info->flags & USE_DEPS_LIST))
888     {
889       enum reg_note present_dep_type;
890 
891       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
892 	present_dep_type = REG_DEP_TRUE;
893       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
894 	present_dep_type = REG_DEP_OUTPUT;
895       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
896 	present_dep_type = REG_DEP_ANTI;
897       else
898 	/* There is no existing dep so it should be created.  */
899 	return DEP_CREATED;
900 
901       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
902 	/* DEP does not add anything to the existing dependence.  */
903 	return DEP_PRESENT;
904     }
905   else
906     {
907       ds_t present_dep_types = 0;
908 
909       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
910 	present_dep_types |= DEP_TRUE;
911       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
912 	present_dep_types |= DEP_OUTPUT;
913       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
914 	present_dep_types |= DEP_ANTI;
915 
916       if (present_dep_types == 0)
917 	/* There is no existing dep so it should be created.  */
918 	return DEP_CREATED;
919 
920       if (!(current_sched_info->flags & DO_SPECULATION)
921 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
922 	{
923 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
924 	      == present_dep_types)
925 	    /* DEP does not add anything to the existing dependence.  */
926 	    return DEP_PRESENT;
927 	}
928       else
929 	{
930 	  /* Only true dependencies can be data speculative and
931 	     only anti dependencies can be control speculative.  */
932 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
933 		      == present_dep_types);
934 
935 	  /* if (DEP is SPECULATIVE) then
936 	     ..we should update DEP_STATUS
937 	     else
938 	     ..we should reset existing dep to non-speculative.  */
939 	}
940     }
941 
942   return DEP_CHANGED;
943 }
944 
945 /* Set dependency caches according to DEP.  */
946 static void
947 set_dependency_caches (dep_t dep)
948 {
949   int elem_luid = INSN_LUID (DEP_PRO (dep));
950   int insn_luid = INSN_LUID (DEP_CON (dep));
951 
952   if (!(current_sched_info->flags & USE_DEPS_LIST))
953     {
954       switch (DEP_TYPE (dep))
955 	{
956 	case REG_DEP_TRUE:
957 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
958 	  break;
959 
960 	case REG_DEP_OUTPUT:
961 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
962 	  break;
963 
964 	case REG_DEP_ANTI:
965 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
966 	  break;
967 
968 	default:
969 	  gcc_unreachable ();
970 	}
971     }
972   else
973     {
974       ds_t ds = DEP_STATUS (dep);
975 
976       if (ds & DEP_TRUE)
977 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
978       if (ds & DEP_OUTPUT)
979 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
980       if (ds & DEP_ANTI)
981 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
982 
983       if (ds & SPECULATIVE)
984 	{
985 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
986 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
987 	}
988     }
989 }
990 
991 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
992    caches accordingly.  */
993 static void
994 update_dependency_caches (dep_t dep, enum reg_note old_type)
995 {
996   int elem_luid = INSN_LUID (DEP_PRO (dep));
997   int insn_luid = INSN_LUID (DEP_CON (dep));
998 
999   /* Clear corresponding cache entry because type of the link
1000      may have changed.  Keep them if we use_deps_list.  */
1001   if (!(current_sched_info->flags & USE_DEPS_LIST))
1002     {
1003       switch (old_type)
1004 	{
1005 	case REG_DEP_OUTPUT:
1006 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1007 	  break;
1008 
1009 	case REG_DEP_ANTI:
1010 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1011 	  break;
1012 
1013 	default:
1014 	  gcc_unreachable ();
1015 	}
1016     }
1017 
1018   set_dependency_caches (dep);
1019 }
1020 
1021 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1022 static void
1023 change_spec_dep_to_hard (sd_iterator_def sd_it)
1024 {
1025   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1026   dep_link_t link = DEP_NODE_BACK (node);
1027   dep_t dep = DEP_NODE_DEP (node);
1028   rtx elem = DEP_PRO (dep);
1029   rtx insn = DEP_CON (dep);
1030 
1031   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1032 
1033   DEP_STATUS (dep) &= ~SPECULATIVE;
1034 
1035   if (true_dependency_cache != NULL)
1036     /* Clear the cache entry.  */
1037     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1038 		      INSN_LUID (elem));
1039 }
1040 
1041 /* Update DEP to incorporate information from NEW_DEP.
1042    SD_IT points to DEP in case it should be moved to another list.
1043    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1044    data-speculative dependence should be updated.  */
1045 static enum DEPS_ADJUST_RESULT
1046 update_dep (dep_t dep, dep_t new_dep,
1047 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1048 	    rtx mem1 ATTRIBUTE_UNUSED,
1049 	    rtx mem2 ATTRIBUTE_UNUSED)
1050 {
1051   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1052   enum reg_note old_type = DEP_TYPE (dep);
1053 
1054   /* If this is a more restrictive type of dependence than the
1055      existing one, then change the existing dependence to this
1056      type.  */
1057   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1058     {
1059       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1060       res = DEP_CHANGED;
1061     }
1062 
1063   if (current_sched_info->flags & USE_DEPS_LIST)
1064     /* Update DEP_STATUS.  */
1065     {
1066       ds_t dep_status = DEP_STATUS (dep);
1067       ds_t ds = DEP_STATUS (new_dep);
1068       ds_t new_status = ds | dep_status;
1069 
1070       if (new_status & SPECULATIVE)
1071 	/* Either existing dep or a dep we're adding or both are
1072 	   speculative.  */
1073 	{
1074 	  if (!(ds & SPECULATIVE)
1075 	      || !(dep_status & SPECULATIVE))
1076 	    /* The new dep can't be speculative.  */
1077 	    {
1078 	      new_status &= ~SPECULATIVE;
1079 
1080 	      if (dep_status & SPECULATIVE)
1081 		/* The old dep was speculative, but now it
1082 		   isn't.  */
1083 		change_spec_dep_to_hard (sd_it);
1084 	    }
1085 	  else
1086 	    {
1087 	      /* Both are speculative.  Merge probabilities.  */
1088 	      if (mem1 != NULL)
1089 		{
1090 		  dw_t dw;
1091 
1092 		  dw = estimate_dep_weak (mem1, mem2);
1093 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1094 		}
1095 
1096 	      new_status = ds_merge (dep_status, ds);
1097 	    }
1098 	}
1099 
1100       ds = new_status;
1101 
1102       if (dep_status != ds)
1103 	{
1104 	  DEP_STATUS (dep) = ds;
1105 	  res = DEP_CHANGED;
1106 	}
1107     }
1108 
1109   if (true_dependency_cache != NULL
1110       && res == DEP_CHANGED)
1111     update_dependency_caches (dep, old_type);
1112 
1113   return res;
1114 }
1115 
1116 /* Add or update  a dependence described by DEP.
1117    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1118    data speculation.
1119 
1120    The function returns a value indicating if an old entry has been changed
1121    or a new entry has been added to insn's backward deps or nothing has
1122    been updated at all.  */
1123 static enum DEPS_ADJUST_RESULT
1124 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1125 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1126 {
1127   bool maybe_present_p = true;
1128   bool present_p = false;
1129 
1130   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1131 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1132 
1133 #ifdef ENABLE_CHECKING
1134   check_dep (new_dep, mem1 != NULL);
1135 #endif
1136 
1137   if (true_dependency_cache != NULL)
1138     {
1139       switch (ask_dependency_caches (new_dep))
1140 	{
1141 	case DEP_PRESENT:
1142 	  return DEP_PRESENT;
1143 
1144 	case DEP_CHANGED:
1145 	  maybe_present_p = true;
1146 	  present_p = true;
1147 	  break;
1148 
1149 	case DEP_CREATED:
1150 	  maybe_present_p = false;
1151 	  present_p = false;
1152 	  break;
1153 
1154 	default:
1155 	  gcc_unreachable ();
1156 	  break;
1157 	}
1158     }
1159 
1160   /* Check that we don't already have this dependence.  */
1161   if (maybe_present_p)
1162     {
1163       dep_t present_dep;
1164       sd_iterator_def sd_it;
1165 
1166       gcc_assert (true_dependency_cache == NULL || present_p);
1167 
1168       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1169 						  DEP_CON (new_dep),
1170 						  resolved_p, &sd_it);
1171 
1172       if (present_dep != NULL)
1173 	/* We found an existing dependency between ELEM and INSN.  */
1174 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1175       else
1176 	/* We didn't find a dep, it shouldn't present in the cache.  */
1177 	gcc_assert (!present_p);
1178     }
1179 
1180   /* Might want to check one level of transitivity to save conses.
1181      This check should be done in maybe_add_or_update_dep_1.
1182      Since we made it to add_or_update_dep_1, we must create
1183      (or update) a link.  */
1184 
1185   if (mem1 != NULL_RTX)
1186     {
1187       gcc_assert (sched_deps_info->generate_spec_deps);
1188       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1189 					   estimate_dep_weak (mem1, mem2));
1190     }
1191 
1192   sd_add_dep (new_dep, resolved_p);
1193 
1194   return DEP_CREATED;
1195 }
1196 
1197 /* Initialize BACK_LIST_PTR with consumer's backward list and
1198    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1199    initialize with lists that hold resolved deps.  */
1200 static void
1201 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1202 			 deps_list_t *back_list_ptr,
1203 			 deps_list_t *forw_list_ptr)
1204 {
1205   rtx con = DEP_CON (dep);
1206 
1207   if (!resolved_p)
1208     {
1209       if ((current_sched_info->flags & DO_SPECULATION)
1210 	  && (DEP_STATUS (dep) & SPECULATIVE))
1211 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1212       else
1213 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1214 
1215       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1216     }
1217   else
1218     {
1219       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1220       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1221     }
1222 }
1223 
1224 /* Add dependence described by DEP.
1225    If RESOLVED_P is true treat the dependence as a resolved one.  */
1226 void
1227 sd_add_dep (dep_t dep, bool resolved_p)
1228 {
1229   dep_node_t n = create_dep_node ();
1230   deps_list_t con_back_deps;
1231   deps_list_t pro_forw_deps;
1232   rtx elem = DEP_PRO (dep);
1233   rtx insn = DEP_CON (dep);
1234 
1235   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1236 
1237   if ((current_sched_info->flags & DO_SPECULATION)
1238       && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1239     DEP_STATUS (dep) &= ~SPECULATIVE;
1240 
1241   copy_dep (DEP_NODE_DEP (n), dep);
1242 
1243   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1244 
1245   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1246 
1247 #ifdef ENABLE_CHECKING
1248   check_dep (dep, false);
1249 #endif
1250 
1251   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1252 
1253   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1254      in the bitmap caches of dependency information.  */
1255   if (true_dependency_cache != NULL)
1256     set_dependency_caches (dep);
1257 }
1258 
1259 /* Add or update backward dependence between INSN and ELEM
1260    with given type DEP_TYPE and dep_status DS.
1261    This function is a convenience wrapper.  */
1262 enum DEPS_ADJUST_RESULT
1263 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1264 {
1265   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1266 }
1267 
1268 /* Resolved dependence pointed to by SD_IT.
1269    SD_IT will advance to the next element.  */
1270 void
1271 sd_resolve_dep (sd_iterator_def sd_it)
1272 {
1273   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1274   dep_t dep = DEP_NODE_DEP (node);
1275   rtx pro = DEP_PRO (dep);
1276   rtx con = DEP_CON (dep);
1277 
1278   if ((current_sched_info->flags & DO_SPECULATION)
1279       && (DEP_STATUS (dep) & SPECULATIVE))
1280     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1281 		   INSN_RESOLVED_BACK_DEPS (con));
1282   else
1283     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1284 		   INSN_RESOLVED_BACK_DEPS (con));
1285 
1286   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1287 		 INSN_RESOLVED_FORW_DEPS (pro));
1288 }
1289 
1290 /* Make TO depend on all the FROM's producers.
1291    If RESOLVED_P is true add dependencies to the resolved lists.  */
1292 void
1293 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1294 {
1295   sd_list_types_def list_type;
1296   sd_iterator_def sd_it;
1297   dep_t dep;
1298 
1299   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1300 
1301   FOR_EACH_DEP (from, list_type, sd_it, dep)
1302     {
1303       dep_def _new_dep, *new_dep = &_new_dep;
1304 
1305       copy_dep (new_dep, dep);
1306       DEP_CON (new_dep) = to;
1307       sd_add_dep (new_dep, resolved_p);
1308     }
1309 }
1310 
1311 /* Remove a dependency referred to by SD_IT.
1312    SD_IT will point to the next dependence after removal.  */
1313 void
1314 sd_delete_dep (sd_iterator_def sd_it)
1315 {
1316   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1317   dep_t dep = DEP_NODE_DEP (n);
1318   rtx pro = DEP_PRO (dep);
1319   rtx con = DEP_CON (dep);
1320   deps_list_t con_back_deps;
1321   deps_list_t pro_forw_deps;
1322 
1323   if (true_dependency_cache != NULL)
1324     {
1325       int elem_luid = INSN_LUID (pro);
1326       int insn_luid = INSN_LUID (con);
1327 
1328       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1329       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1330       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1331 
1332       if (current_sched_info->flags & DO_SPECULATION)
1333 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1334     }
1335 
1336   get_back_and_forw_lists (dep, sd_it.resolved_p,
1337 			   &con_back_deps, &pro_forw_deps);
1338 
1339   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1340   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341 
1342   delete_dep_node (n);
1343 }
1344 
1345 /* Dump size of the lists.  */
1346 #define DUMP_LISTS_SIZE (2)
1347 
1348 /* Dump dependencies of the lists.  */
1349 #define DUMP_LISTS_DEPS (4)
1350 
1351 /* Dump all information about the lists.  */
1352 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1353 
1354 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1355    FLAGS is a bit mask specifying what information about the lists needs
1356    to be printed.
1357    If FLAGS has the very first bit set, then dump all information about
1358    the lists and propagate this bit into the callee dump functions.  */
1359 static void
1360 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1361 {
1362   sd_iterator_def sd_it;
1363   dep_t dep;
1364   int all;
1365 
1366   all = (flags & 1);
1367 
1368   if (all)
1369     flags |= DUMP_LISTS_ALL;
1370 
1371   fprintf (dump, "[");
1372 
1373   if (flags & DUMP_LISTS_SIZE)
1374     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1375 
1376   if (flags & DUMP_LISTS_DEPS)
1377     {
1378       FOR_EACH_DEP (insn, types, sd_it, dep)
1379 	{
1380 	  dump_dep (dump, dep, dump_dep_flags | all);
1381 	  fprintf (dump, " ");
1382 	}
1383     }
1384 }
1385 
1386 /* Dump all information about deps_lists of INSN specified by TYPES
1387    to STDERR.  */
1388 void
1389 sd_debug_lists (rtx insn, sd_list_types_def types)
1390 {
1391   dump_lists (stderr, insn, types, 1);
1392   fprintf (stderr, "\n");
1393 }
1394 
1395 /* A convenience wrapper to operate on an entire list.  */
1396 
1397 static void
1398 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1399 {
1400   for (; list; list = XEXP (list, 1))
1401     {
1402       if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1403 	add_dependence (insn, XEXP (list, 0), dep_type);
1404     }
1405 }
1406 
1407 /* Similar, but free *LISTP at the same time, when the context
1408    is not readonly.  */
1409 
1410 static void
1411 add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1412                               int uncond, enum reg_note dep_type)
1413 {
1414   rtx list, next;
1415 
1416   if (deps->readonly)
1417     {
1418       add_dependence_list (insn, *listp, uncond, dep_type);
1419       return;
1420     }
1421 
1422   for (list = *listp, *listp = NULL; list ; list = next)
1423     {
1424       next = XEXP (list, 1);
1425       if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1426 	add_dependence (insn, XEXP (list, 0), dep_type);
1427       free_INSN_LIST_node (list);
1428     }
1429 }
1430 
1431 /* Remove all occurences of INSN from LIST.  Return the number of
1432    occurences removed.  */
1433 
1434 static int
1435 remove_from_dependence_list (rtx insn, rtx* listp)
1436 {
1437   int removed = 0;
1438 
1439   while (*listp)
1440     {
1441       if (XEXP (*listp, 0) == insn)
1442         {
1443           remove_free_INSN_LIST_node (listp);
1444           removed++;
1445           continue;
1446         }
1447 
1448       listp = &XEXP (*listp, 1);
1449     }
1450 
1451   return removed;
1452 }
1453 
1454 /* Same as above, but process two lists at once.  */
1455 static int
1456 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1457 {
1458   int removed = 0;
1459 
1460   while (*listp)
1461     {
1462       if (XEXP (*listp, 0) == insn)
1463         {
1464           remove_free_INSN_LIST_node (listp);
1465           remove_free_EXPR_LIST_node (exprp);
1466           removed++;
1467           continue;
1468         }
1469 
1470       listp = &XEXP (*listp, 1);
1471       exprp = &XEXP (*exprp, 1);
1472     }
1473 
1474   return removed;
1475 }
1476 
1477 /* Clear all dependencies for an insn.  */
1478 static void
1479 delete_all_dependences (rtx insn)
1480 {
1481   sd_iterator_def sd_it;
1482   dep_t dep;
1483 
1484   /* The below cycle can be optimized to clear the caches and back_deps
1485      in one call but that would provoke duplication of code from
1486      delete_dep ().  */
1487 
1488   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1489        sd_iterator_cond (&sd_it, &dep);)
1490     sd_delete_dep (sd_it);
1491 }
1492 
1493 /* All insns in a scheduling group except the first should only have
1494    dependencies on the previous insn in the group.  So we find the
1495    first instruction in the scheduling group by walking the dependence
1496    chains backwards. Then we add the dependencies for the group to
1497    the previous nonnote insn.  */
1498 
1499 static void
1500 fixup_sched_groups (rtx insn)
1501 {
1502   sd_iterator_def sd_it;
1503   dep_t dep;
1504   rtx prev_nonnote;
1505 
1506   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1507     {
1508       rtx i = insn;
1509       rtx pro = DEP_PRO (dep);
1510 
1511       do
1512 	{
1513 	  i = prev_nonnote_insn (i);
1514 
1515 	  if (pro == i)
1516 	    goto next_link;
1517 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1518 
1519       if (! sched_insns_conditions_mutex_p (i, pro))
1520 	add_dependence (i, pro, DEP_TYPE (dep));
1521     next_link:;
1522     }
1523 
1524   delete_all_dependences (insn);
1525 
1526   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1527   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1528       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1529     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1530 }
1531 
1532 /* Process an insn's memory dependencies.  There are four kinds of
1533    dependencies:
1534 
1535    (0) read dependence: read follows read
1536    (1) true dependence: read follows write
1537    (2) output dependence: write follows write
1538    (3) anti dependence: write follows read
1539 
1540    We are careful to build only dependencies which actually exist, and
1541    use transitivity to avoid building too many links.  */
1542 
1543 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1544    The MEM is a memory reference contained within INSN, which we are saving
1545    so that we can do memory aliasing on it.  */
1546 
1547 static void
1548 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1549 			 rtx insn, rtx mem)
1550 {
1551   rtx *insn_list;
1552   rtx *mem_list;
1553   rtx link;
1554 
1555   gcc_assert (!deps->readonly);
1556   if (read_p)
1557     {
1558       insn_list = &deps->pending_read_insns;
1559       mem_list = &deps->pending_read_mems;
1560       if (!DEBUG_INSN_P (insn))
1561 	deps->pending_read_list_length++;
1562     }
1563   else
1564     {
1565       insn_list = &deps->pending_write_insns;
1566       mem_list = &deps->pending_write_mems;
1567       deps->pending_write_list_length++;
1568     }
1569 
1570   link = alloc_INSN_LIST (insn, *insn_list);
1571   *insn_list = link;
1572 
1573   if (sched_deps_info->use_cselib)
1574     {
1575       mem = shallow_copy_rtx (mem);
1576       XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1577     }
1578   link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1579   *mem_list = link;
1580 }
1581 
1582 /* Make a dependency between every memory reference on the pending lists
1583    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1584    dependencies for a read operation, similarly with FOR_WRITE.  */
1585 
1586 static void
1587 flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1588 		     int for_write)
1589 {
1590   if (for_write)
1591     {
1592       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1593                                     1, REG_DEP_ANTI);
1594       if (!deps->readonly)
1595         {
1596           free_EXPR_LIST_list (&deps->pending_read_mems);
1597           deps->pending_read_list_length = 0;
1598         }
1599     }
1600 
1601   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1602 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1603 
1604   add_dependence_list_and_free (deps, insn,
1605                                 &deps->last_pending_memory_flush, 1,
1606                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1607   if (!deps->readonly)
1608     {
1609       free_EXPR_LIST_list (&deps->pending_write_mems);
1610       deps->pending_write_list_length = 0;
1611 
1612       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1613       deps->pending_flush_length = 1;
1614     }
1615 }
1616 
1617 /* Instruction which dependencies we are analyzing.  */
1618 static rtx cur_insn = NULL_RTX;
1619 
1620 /* Implement hooks for haifa scheduler.  */
1621 
1622 static void
1623 haifa_start_insn (rtx insn)
1624 {
1625   gcc_assert (insn && !cur_insn);
1626 
1627   cur_insn = insn;
1628 }
1629 
1630 static void
1631 haifa_finish_insn (void)
1632 {
1633   cur_insn = NULL;
1634 }
1635 
1636 void
1637 haifa_note_reg_set (int regno)
1638 {
1639   SET_REGNO_REG_SET (reg_pending_sets, regno);
1640 }
1641 
1642 void
1643 haifa_note_reg_clobber (int regno)
1644 {
1645   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1646 }
1647 
1648 void
1649 haifa_note_reg_use (int regno)
1650 {
1651   SET_REGNO_REG_SET (reg_pending_uses, regno);
1652 }
1653 
1654 static void
1655 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1656 {
1657   if (!(ds & SPECULATIVE))
1658     {
1659       mem = NULL_RTX;
1660       pending_mem = NULL_RTX;
1661     }
1662   else
1663     gcc_assert (ds & BEGIN_DATA);
1664 
1665   {
1666     dep_def _dep, *dep = &_dep;
1667 
1668     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1669                 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1670     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1671   }
1672 
1673 }
1674 
1675 static void
1676 haifa_note_dep (rtx elem, ds_t ds)
1677 {
1678   dep_def _dep;
1679   dep_t dep = &_dep;
1680 
1681   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1682   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1683 }
1684 
1685 static void
1686 note_reg_use (int r)
1687 {
1688   if (sched_deps_info->note_reg_use)
1689     sched_deps_info->note_reg_use (r);
1690 }
1691 
1692 static void
1693 note_reg_set (int r)
1694 {
1695   if (sched_deps_info->note_reg_set)
1696     sched_deps_info->note_reg_set (r);
1697 }
1698 
1699 static void
1700 note_reg_clobber (int r)
1701 {
1702   if (sched_deps_info->note_reg_clobber)
1703     sched_deps_info->note_reg_clobber (r);
1704 }
1705 
1706 static void
1707 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1708 {
1709   if (sched_deps_info->note_mem_dep)
1710     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1711 }
1712 
1713 static void
1714 note_dep (rtx e, ds_t ds)
1715 {
1716   if (sched_deps_info->note_dep)
1717     sched_deps_info->note_dep (e, ds);
1718 }
1719 
1720 /* Return corresponding to DS reg_note.  */
1721 enum reg_note
1722 ds_to_dt (ds_t ds)
1723 {
1724   if (ds & DEP_TRUE)
1725     return REG_DEP_TRUE;
1726   else if (ds & DEP_OUTPUT)
1727     return REG_DEP_OUTPUT;
1728   else
1729     {
1730       gcc_assert (ds & DEP_ANTI);
1731       return REG_DEP_ANTI;
1732     }
1733 }
1734 
1735 
1736 
1737 /* Functions for computation of info needed for register pressure
1738    sensitive insn scheduling.  */
1739 
1740 
1741 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1742 static struct reg_use_data *
1743 create_insn_reg_use (int regno, rtx insn)
1744 {
1745   struct reg_use_data *use;
1746 
1747   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1748   use->regno = regno;
1749   use->insn = insn;
1750   use->next_insn_use = INSN_REG_USE_LIST (insn);
1751   INSN_REG_USE_LIST (insn) = use;
1752   return use;
1753 }
1754 
1755 /* Allocate and return reg_set_data structure for REGNO and INSN.  */
1756 static struct reg_set_data *
1757 create_insn_reg_set (int regno, rtx insn)
1758 {
1759   struct reg_set_data *set;
1760 
1761   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1762   set->regno = regno;
1763   set->insn = insn;
1764   set->next_insn_set = INSN_REG_SET_LIST (insn);
1765   INSN_REG_SET_LIST (insn) = set;
1766   return set;
1767 }
1768 
1769 /* Set up insn register uses for INSN and dependency context DEPS.  */
1770 static void
1771 setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1772 {
1773   unsigned i;
1774   reg_set_iterator rsi;
1775   rtx list;
1776   struct reg_use_data *use, *use2, *next;
1777   struct deps_reg *reg_last;
1778 
1779   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1780     {
1781       if (i < FIRST_PSEUDO_REGISTER
1782 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1783 	continue;
1784 
1785       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1786 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1787 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1788 	/* Ignore use which is not dying.  */
1789 	continue;
1790 
1791       use = create_insn_reg_use (i, insn);
1792       use->next_regno_use = use;
1793       reg_last = &deps->reg_last[i];
1794 
1795       /* Create the cycle list of uses.  */
1796       for (list = reg_last->uses; list; list = XEXP (list, 1))
1797 	{
1798 	  use2 = create_insn_reg_use (i, XEXP (list, 0));
1799 	  next = use->next_regno_use;
1800 	  use->next_regno_use = use2;
1801 	  use2->next_regno_use = next;
1802 	}
1803     }
1804 }
1805 
1806 /* Register pressure info for the currently processed insn.  */
1807 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1808 
1809 /* Return TRUE if INSN has the use structure for REGNO.  */
1810 static bool
1811 insn_use_p (rtx insn, int regno)
1812 {
1813   struct reg_use_data *use;
1814 
1815   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1816     if (use->regno == regno)
1817       return true;
1818   return false;
1819 }
1820 
1821 /* Update the register pressure info after birth of pseudo register REGNO
1822    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
1823    the register is in clobber or unused after the insn.  */
1824 static void
1825 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1826 {
1827   int incr, new_incr;
1828   enum reg_class cl;
1829 
1830   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1831   cl = sched_regno_cover_class[regno];
1832   if (cl != NO_REGS)
1833     {
1834       incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1835       if (clobber_p)
1836 	{
1837 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
1838 	  reg_pressure_info[cl].clobber_increase = new_incr;
1839 	}
1840       else if (unused_p)
1841 	{
1842 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1843 	  reg_pressure_info[cl].unused_set_increase = new_incr;
1844 	}
1845       else
1846 	{
1847 	  new_incr = reg_pressure_info[cl].set_increase + incr;
1848 	  reg_pressure_info[cl].set_increase = new_incr;
1849 	  if (! insn_use_p (insn, regno))
1850 	    reg_pressure_info[cl].change += incr;
1851 	  create_insn_reg_set (regno, insn);
1852 	}
1853       gcc_assert (new_incr < (1 << INCREASE_BITS));
1854     }
1855 }
1856 
1857 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1858    hard registers involved in the birth.  */
1859 static void
1860 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1861 			    bool clobber_p, bool unused_p)
1862 {
1863   enum reg_class cl;
1864   int new_incr, last = regno + nregs;
1865 
1866   while (regno < last)
1867     {
1868       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1869       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1870 	{
1871 	  cl = sched_regno_cover_class[regno];
1872 	  if (cl != NO_REGS)
1873 	    {
1874 	      if (clobber_p)
1875 		{
1876 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
1877 		  reg_pressure_info[cl].clobber_increase = new_incr;
1878 		}
1879 	      else if (unused_p)
1880 		{
1881 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1882 		  reg_pressure_info[cl].unused_set_increase = new_incr;
1883 		}
1884 	      else
1885 		{
1886 		  new_incr = reg_pressure_info[cl].set_increase + 1;
1887 		  reg_pressure_info[cl].set_increase = new_incr;
1888 		  if (! insn_use_p (insn, regno))
1889 		    reg_pressure_info[cl].change += 1;
1890 		  create_insn_reg_set (regno, insn);
1891 		}
1892 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
1893 	    }
1894 	}
1895       regno++;
1896     }
1897 }
1898 
1899 /* Update the register pressure info after birth of pseudo or hard
1900    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
1901    correspondingly that the register is in clobber or unused after the
1902    insn.  */
1903 static void
1904 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1905 {
1906   int regno;
1907 
1908   if (GET_CODE (reg) == SUBREG)
1909     reg = SUBREG_REG (reg);
1910 
1911   if (! REG_P (reg))
1912     return;
1913 
1914   regno = REGNO (reg);
1915   if (regno < FIRST_PSEUDO_REGISTER)
1916     mark_insn_hard_regno_birth (insn, regno,
1917 				hard_regno_nregs[regno][GET_MODE (reg)],
1918 				clobber_p, unused_p);
1919   else
1920     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1921 }
1922 
1923 /* Update the register pressure info after death of pseudo register
1924    REGNO.  */
1925 static void
1926 mark_pseudo_death (int regno)
1927 {
1928   int incr;
1929   enum reg_class cl;
1930 
1931   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1932   cl = sched_regno_cover_class[regno];
1933   if (cl != NO_REGS)
1934     {
1935       incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1936       reg_pressure_info[cl].change -= incr;
1937     }
1938 }
1939 
1940 /* Like mark_pseudo_death except that NREGS saying how many hard
1941    registers involved in the death.  */
1942 static void
1943 mark_hard_regno_death (int regno, int nregs)
1944 {
1945   enum reg_class cl;
1946   int last = regno + nregs;
1947 
1948   while (regno < last)
1949     {
1950       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1951       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1952 	{
1953 	  cl = sched_regno_cover_class[regno];
1954 	  if (cl != NO_REGS)
1955 	    reg_pressure_info[cl].change -= 1;
1956 	}
1957       regno++;
1958     }
1959 }
1960 
1961 /* Update the register pressure info after death of pseudo or hard
1962    register REG.  */
1963 static void
1964 mark_reg_death (rtx reg)
1965 {
1966   int regno;
1967 
1968   if (GET_CODE (reg) == SUBREG)
1969     reg = SUBREG_REG (reg);
1970 
1971   if (! REG_P (reg))
1972     return;
1973 
1974   regno = REGNO (reg);
1975   if (regno < FIRST_PSEUDO_REGISTER)
1976     mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1977   else
1978     mark_pseudo_death (regno);
1979 }
1980 
1981 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
1982 static void
1983 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1984 {
1985   if (setter != NULL_RTX && GET_CODE (setter) != SET)
1986     return;
1987   mark_insn_reg_birth
1988     ((rtx) data, reg, false,
1989      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1990 }
1991 
1992 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
1993 static void
1994 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1995 {
1996   if (GET_CODE (setter) == CLOBBER)
1997     mark_insn_reg_birth ((rtx) data, reg, true, false);
1998 }
1999 
2000 /* Set up reg pressure info related to INSN.  */
2001 static void
2002 setup_insn_reg_pressure_info (rtx insn)
2003 {
2004   int i, len;
2005   enum reg_class cl;
2006   static struct reg_pressure_data *pressure_info;
2007   rtx link;
2008 
2009   gcc_assert (sched_pressure_p);
2010 
2011   if (! INSN_P (insn))
2012     return;
2013 
2014   for (i = 0; i < ira_reg_class_cover_size; i++)
2015     {
2016       cl = ira_reg_class_cover[i];
2017       reg_pressure_info[cl].clobber_increase = 0;
2018       reg_pressure_info[cl].set_increase = 0;
2019       reg_pressure_info[cl].unused_set_increase = 0;
2020       reg_pressure_info[cl].change = 0;
2021     }
2022 
2023   note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2024 
2025   note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2026 
2027 #ifdef AUTO_INC_DEC
2028   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2029     if (REG_NOTE_KIND (link) == REG_INC)
2030       mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2031 #endif
2032 
2033   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2034     if (REG_NOTE_KIND (link) == REG_DEAD)
2035       mark_reg_death (XEXP (link, 0));
2036 
2037   len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2038   pressure_info
2039     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2040   INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2041 						  * sizeof (int), 1);
2042   for (i = 0; i < ira_reg_class_cover_size; i++)
2043     {
2044       cl = ira_reg_class_cover[i];
2045       pressure_info[i].clobber_increase
2046 	= reg_pressure_info[cl].clobber_increase;
2047       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2048       pressure_info[i].unused_set_increase
2049 	= reg_pressure_info[cl].unused_set_increase;
2050       pressure_info[i].change = reg_pressure_info[cl].change;
2051     }
2052 }
2053 
2054 
2055 
2056 
2057 /* Internal variable for sched_analyze_[12] () functions.
2058    If it is nonzero, this means that sched_analyze_[12] looks
2059    at the most toplevel SET.  */
2060 static bool can_start_lhs_rhs_p;
2061 
2062 /* Extend reg info for the deps context DEPS given that
2063    we have just generated a register numbered REGNO.  */
2064 static void
2065 extend_deps_reg_info (struct deps_desc *deps, int regno)
2066 {
2067   int max_regno = regno + 1;
2068 
2069   gcc_assert (!reload_completed);
2070 
2071   /* In a readonly context, it would not hurt to extend info,
2072      but it should not be needed.  */
2073   if (reload_completed && deps->readonly)
2074     {
2075       deps->max_reg = max_regno;
2076       return;
2077     }
2078 
2079   if (max_regno > deps->max_reg)
2080     {
2081       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2082                                    max_regno);
2083       memset (&deps->reg_last[deps->max_reg],
2084               0, (max_regno - deps->max_reg)
2085               * sizeof (struct deps_reg));
2086       deps->max_reg = max_regno;
2087     }
2088 }
2089 
2090 /* Extends REG_INFO_P if needed.  */
2091 void
2092 maybe_extend_reg_info_p (void)
2093 {
2094   /* Extend REG_INFO_P, if needed.  */
2095   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2096     {
2097       size_t new_reg_info_p_size = max_regno + 128;
2098 
2099       gcc_assert (!reload_completed && sel_sched_p ());
2100 
2101       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2102                                                     new_reg_info_p_size,
2103                                                     reg_info_p_size,
2104                                                     sizeof (*reg_info_p));
2105       reg_info_p_size = new_reg_info_p_size;
2106     }
2107 }
2108 
2109 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2110    The type of the reference is specified by REF and can be SET,
2111    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2112 
2113 static void
2114 sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2115 		   enum rtx_code ref, rtx insn)
2116 {
2117   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2118   if (!reload_completed && sel_sched_p ()
2119       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2120     extend_deps_reg_info (deps, regno);
2121 
2122   maybe_extend_reg_info_p ();
2123 
2124   /* A hard reg in a wide mode may really be multiple registers.
2125      If so, mark all of them just like the first.  */
2126   if (regno < FIRST_PSEUDO_REGISTER)
2127     {
2128       int i = hard_regno_nregs[regno][mode];
2129       if (ref == SET)
2130 	{
2131 	  while (--i >= 0)
2132 	    note_reg_set (regno + i);
2133 	}
2134       else if (ref == USE)
2135 	{
2136 	  while (--i >= 0)
2137 	    note_reg_use (regno + i);
2138 	}
2139       else
2140 	{
2141 	  while (--i >= 0)
2142 	    note_reg_clobber (regno + i);
2143 	}
2144     }
2145 
2146   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2147      it does not reload.  Ignore these as they have served their
2148      purpose already.  */
2149   else if (regno >= deps->max_reg)
2150     {
2151       enum rtx_code code = GET_CODE (PATTERN (insn));
2152       gcc_assert (code == USE || code == CLOBBER);
2153     }
2154 
2155   else
2156     {
2157       if (ref == SET)
2158 	note_reg_set (regno);
2159       else if (ref == USE)
2160 	note_reg_use (regno);
2161       else
2162 	note_reg_clobber (regno);
2163 
2164       /* Pseudos that are REG_EQUIV to something may be replaced
2165 	 by that during reloading.  We need only add dependencies for
2166 	the address in the REG_EQUIV note.  */
2167       if (!reload_completed && get_reg_known_equiv_p (regno))
2168 	{
2169 	  rtx t = get_reg_known_value (regno);
2170 	  if (MEM_P (t))
2171 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2172 	}
2173 
2174       /* Don't let it cross a call after scheduling if it doesn't
2175 	 already cross one.  */
2176       if (REG_N_CALLS_CROSSED (regno) == 0)
2177 	{
2178 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2179 	    deps->sched_before_next_call
2180 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2181 	  else
2182 	    add_dependence_list (insn, deps->last_function_call, 1,
2183 				 REG_DEP_ANTI);
2184 	}
2185     }
2186 }
2187 
2188 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2189    rtx, X, creating all dependencies generated by the write to the
2190    destination of X, and reads of everything mentioned.  */
2191 
2192 static void
2193 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2194 {
2195   rtx dest = XEXP (x, 0);
2196   enum rtx_code code = GET_CODE (x);
2197   bool cslr_p = can_start_lhs_rhs_p;
2198 
2199   can_start_lhs_rhs_p = false;
2200 
2201   gcc_assert (dest);
2202   if (dest == 0)
2203     return;
2204 
2205   if (cslr_p && sched_deps_info->start_lhs)
2206     sched_deps_info->start_lhs (dest);
2207 
2208   if (GET_CODE (dest) == PARALLEL)
2209     {
2210       int i;
2211 
2212       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2213 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2214 	  sched_analyze_1 (deps,
2215 			   gen_rtx_CLOBBER (VOIDmode,
2216 					    XEXP (XVECEXP (dest, 0, i), 0)),
2217 			   insn);
2218 
2219       if (cslr_p && sched_deps_info->finish_lhs)
2220 	sched_deps_info->finish_lhs ();
2221 
2222       if (code == SET)
2223 	{
2224 	  can_start_lhs_rhs_p = cslr_p;
2225 
2226 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2227 
2228 	  can_start_lhs_rhs_p = false;
2229 	}
2230 
2231       return;
2232     }
2233 
2234   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2235 	 || GET_CODE (dest) == ZERO_EXTRACT)
2236     {
2237       if (GET_CODE (dest) == STRICT_LOW_PART
2238 	 || GET_CODE (dest) == ZERO_EXTRACT
2239 	 || df_read_modify_subreg_p (dest))
2240         {
2241 	  /* These both read and modify the result.  We must handle
2242              them as writes to get proper dependencies for following
2243              instructions.  We must handle them as reads to get proper
2244              dependencies from this to previous instructions.
2245              Thus we need to call sched_analyze_2.  */
2246 
2247 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2248 	}
2249       if (GET_CODE (dest) == ZERO_EXTRACT)
2250 	{
2251 	  /* The second and third arguments are values read by this insn.  */
2252 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2253 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2254 	}
2255       dest = XEXP (dest, 0);
2256     }
2257 
2258   if (REG_P (dest))
2259     {
2260       int regno = REGNO (dest);
2261       enum machine_mode mode = GET_MODE (dest);
2262 
2263       sched_analyze_reg (deps, regno, mode, code, insn);
2264 
2265 #ifdef STACK_REGS
2266       /* Treat all writes to a stack register as modifying the TOS.  */
2267       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2268 	{
2269 	  int nregs;
2270 
2271 	  /* Avoid analyzing the same register twice.  */
2272 	  if (regno != FIRST_STACK_REG)
2273 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2274 
2275 	  nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2276 	  while (--nregs >= 0)
2277 	    SET_HARD_REG_BIT (implicit_reg_pending_uses,
2278 			      FIRST_STACK_REG + nregs);
2279 	}
2280 #endif
2281     }
2282   else if (MEM_P (dest))
2283     {
2284       /* Writing memory.  */
2285       rtx t = dest;
2286 
2287       if (sched_deps_info->use_cselib)
2288 	{
2289 	  enum machine_mode address_mode
2290 	    = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2291 
2292 	  t = shallow_copy_rtx (dest);
2293 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2294 	  XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2295 	}
2296       t = canon_rtx (t);
2297 
2298       /* Pending lists can't get larger with a readonly context.  */
2299       if (!deps->readonly
2300           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2301               > MAX_PENDING_LIST_LENGTH))
2302 	{
2303 	  /* Flush all pending reads and writes to prevent the pending lists
2304 	     from getting any larger.  Insn scheduling runs too slowly when
2305 	     these lists get long.  When compiling GCC with itself,
2306 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2307 	     the default value of 32.  */
2308 	  flush_pending_lists (deps, insn, false, true);
2309 	}
2310       else
2311 	{
2312 	  rtx pending, pending_mem;
2313 
2314 	  pending = deps->pending_read_insns;
2315 	  pending_mem = deps->pending_read_mems;
2316 	  while (pending)
2317 	    {
2318 	      if (anti_dependence (XEXP (pending_mem, 0), t)
2319 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2320 		note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2321 			      DEP_ANTI);
2322 
2323 	      pending = XEXP (pending, 1);
2324 	      pending_mem = XEXP (pending_mem, 1);
2325 	    }
2326 
2327 	  pending = deps->pending_write_insns;
2328 	  pending_mem = deps->pending_write_mems;
2329 	  while (pending)
2330 	    {
2331 	      if (output_dependence (XEXP (pending_mem, 0), t)
2332 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2333 		note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2334 			      DEP_OUTPUT);
2335 
2336 	      pending = XEXP (pending, 1);
2337 	      pending_mem = XEXP (pending_mem, 1);
2338 	    }
2339 
2340 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2341 			       REG_DEP_ANTI);
2342 
2343           if (!deps->readonly)
2344             add_insn_mem_dependence (deps, false, insn, dest);
2345 	}
2346       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2347     }
2348 
2349   if (cslr_p && sched_deps_info->finish_lhs)
2350     sched_deps_info->finish_lhs ();
2351 
2352   /* Analyze reads.  */
2353   if (GET_CODE (x) == SET)
2354     {
2355       can_start_lhs_rhs_p = cslr_p;
2356 
2357       sched_analyze_2 (deps, SET_SRC (x), insn);
2358 
2359       can_start_lhs_rhs_p = false;
2360     }
2361 }
2362 
2363 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2364 static void
2365 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2366 {
2367   int i;
2368   int j;
2369   enum rtx_code code;
2370   const char *fmt;
2371   bool cslr_p = can_start_lhs_rhs_p;
2372 
2373   can_start_lhs_rhs_p = false;
2374 
2375   gcc_assert (x);
2376   if (x == 0)
2377     return;
2378 
2379   if (cslr_p && sched_deps_info->start_rhs)
2380     sched_deps_info->start_rhs (x);
2381 
2382   code = GET_CODE (x);
2383 
2384   switch (code)
2385     {
2386     case CONST_INT:
2387     case CONST_DOUBLE:
2388     case CONST_FIXED:
2389     case CONST_VECTOR:
2390     case SYMBOL_REF:
2391     case CONST:
2392     case LABEL_REF:
2393       /* Ignore constants.  */
2394       if (cslr_p && sched_deps_info->finish_rhs)
2395 	sched_deps_info->finish_rhs ();
2396 
2397       return;
2398 
2399 #ifdef HAVE_cc0
2400     case CC0:
2401       /* User of CC0 depends on immediately preceding insn.  */
2402       SCHED_GROUP_P (insn) = 1;
2403        /* Don't move CC0 setter to another block (it can set up the
2404         same flag for previous CC0 users which is safe).  */
2405       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2406 
2407       if (cslr_p && sched_deps_info->finish_rhs)
2408 	sched_deps_info->finish_rhs ();
2409 
2410       return;
2411 #endif
2412 
2413     case REG:
2414       {
2415 	int regno = REGNO (x);
2416 	enum machine_mode mode = GET_MODE (x);
2417 
2418 	sched_analyze_reg (deps, regno, mode, USE, insn);
2419 
2420 #ifdef STACK_REGS
2421       /* Treat all reads of a stack register as modifying the TOS.  */
2422       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2423 	{
2424 	  /* Avoid analyzing the same register twice.  */
2425 	  if (regno != FIRST_STACK_REG)
2426 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2427 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2428 	}
2429 #endif
2430 
2431 	if (cslr_p && sched_deps_info->finish_rhs)
2432 	  sched_deps_info->finish_rhs ();
2433 
2434 	return;
2435       }
2436 
2437     case MEM:
2438       {
2439 	/* Reading memory.  */
2440 	rtx u;
2441 	rtx pending, pending_mem;
2442 	rtx t = x;
2443 
2444 	if (sched_deps_info->use_cselib)
2445 	  {
2446 	    enum machine_mode address_mode
2447 	      = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2448 
2449 	    t = shallow_copy_rtx (t);
2450 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2451 	    XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2452 	  }
2453 
2454 	if (!DEBUG_INSN_P (insn))
2455 	  {
2456 	    t = canon_rtx (t);
2457 	    pending = deps->pending_read_insns;
2458 	    pending_mem = deps->pending_read_mems;
2459 	    while (pending)
2460 	      {
2461 		if (read_dependence (XEXP (pending_mem, 0), t)
2462 		    && ! sched_insns_conditions_mutex_p (insn,
2463 							 XEXP (pending, 0)))
2464 		  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2465 				DEP_ANTI);
2466 
2467 		pending = XEXP (pending, 1);
2468 		pending_mem = XEXP (pending_mem, 1);
2469 	      }
2470 
2471 	    pending = deps->pending_write_insns;
2472 	    pending_mem = deps->pending_write_mems;
2473 	    while (pending)
2474 	      {
2475 		if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2476 				     t, rtx_varies_p)
2477 		    && ! sched_insns_conditions_mutex_p (insn,
2478 							 XEXP (pending, 0)))
2479 		  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2480 				sched_deps_info->generate_spec_deps
2481 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2482 
2483 		pending = XEXP (pending, 1);
2484 		pending_mem = XEXP (pending_mem, 1);
2485 	      }
2486 
2487 	    for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2488 	      {
2489 		if (! NON_FLUSH_JUMP_P (u))
2490 		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2491 		else if (deps_may_trap_p (x))
2492 		  {
2493 		    if ((sched_deps_info->generate_spec_deps)
2494 			&& sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2495 		      {
2496 			ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2497 						MAX_DEP_WEAK);
2498 
2499 			note_dep (XEXP (u, 0), ds);
2500 		      }
2501 		    else
2502 		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2503 		  }
2504 	      }
2505 	  }
2506 
2507 	/* Always add these dependencies to pending_reads, since
2508 	   this insn may be followed by a write.  */
2509         if (!deps->readonly)
2510           add_insn_mem_dependence (deps, true, insn, x);
2511 
2512 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2513 
2514 	if (cslr_p && sched_deps_info->finish_rhs)
2515 	  sched_deps_info->finish_rhs ();
2516 
2517 	return;
2518       }
2519 
2520     /* Force pending stores to memory in case a trap handler needs them.  */
2521     case TRAP_IF:
2522       flush_pending_lists (deps, insn, true, false);
2523       break;
2524 
2525     case PREFETCH:
2526       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2527 	reg_pending_barrier = TRUE_BARRIER;
2528       break;
2529 
2530     case UNSPEC_VOLATILE:
2531       flush_pending_lists (deps, insn, true, true);
2532       /* FALLTHRU */
2533 
2534     case ASM_OPERANDS:
2535     case ASM_INPUT:
2536       {
2537 	/* Traditional and volatile asm instructions must be considered to use
2538 	   and clobber all hard registers, all pseudo-registers and all of
2539 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2540 
2541 	   Consider for instance a volatile asm that changes the fpu rounding
2542 	   mode.  An insn should not be moved across this even if it only uses
2543 	   pseudo-regs because it might give an incorrectly rounded result.  */
2544 	if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2545 	  reg_pending_barrier = TRUE_BARRIER;
2546 
2547 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2548 	   We can not just fall through here since then we would be confused
2549 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2550 	   traditional asms unlike their normal usage.  */
2551 
2552 	if (code == ASM_OPERANDS)
2553 	  {
2554 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2555 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2556 
2557 	    if (cslr_p && sched_deps_info->finish_rhs)
2558 	      sched_deps_info->finish_rhs ();
2559 
2560 	    return;
2561 	  }
2562 	break;
2563       }
2564 
2565     case PRE_DEC:
2566     case POST_DEC:
2567     case PRE_INC:
2568     case POST_INC:
2569       /* These both read and modify the result.  We must handle them as writes
2570          to get proper dependencies for following instructions.  We must handle
2571          them as reads to get proper dependencies from this to previous
2572          instructions.  Thus we need to pass them to both sched_analyze_1
2573          and sched_analyze_2.  We must call sched_analyze_2 first in order
2574          to get the proper antecedent for the read.  */
2575       sched_analyze_2 (deps, XEXP (x, 0), insn);
2576       sched_analyze_1 (deps, x, insn);
2577 
2578       if (cslr_p && sched_deps_info->finish_rhs)
2579 	sched_deps_info->finish_rhs ();
2580 
2581       return;
2582 
2583     case POST_MODIFY:
2584     case PRE_MODIFY:
2585       /* op0 = op0 + op1 */
2586       sched_analyze_2 (deps, XEXP (x, 0), insn);
2587       sched_analyze_2 (deps, XEXP (x, 1), insn);
2588       sched_analyze_1 (deps, x, insn);
2589 
2590       if (cslr_p && sched_deps_info->finish_rhs)
2591 	sched_deps_info->finish_rhs ();
2592 
2593       return;
2594 
2595     default:
2596       break;
2597     }
2598 
2599   /* Other cases: walk the insn.  */
2600   fmt = GET_RTX_FORMAT (code);
2601   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2602     {
2603       if (fmt[i] == 'e')
2604 	sched_analyze_2 (deps, XEXP (x, i), insn);
2605       else if (fmt[i] == 'E')
2606 	for (j = 0; j < XVECLEN (x, i); j++)
2607 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2608     }
2609 
2610   if (cslr_p && sched_deps_info->finish_rhs)
2611     sched_deps_info->finish_rhs ();
2612 }
2613 
2614 /* Analyze an INSN with pattern X to find all dependencies.  */
2615 static void
2616 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2617 {
2618   RTX_CODE code = GET_CODE (x);
2619   rtx link;
2620   unsigned i;
2621   reg_set_iterator rsi;
2622 
2623   if (! reload_completed)
2624     {
2625       HARD_REG_SET temp;
2626 
2627       extract_insn (insn);
2628       preprocess_constraints ();
2629       ira_implicitly_set_insn_hard_regs (&temp);
2630       AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2631       IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2632     }
2633 
2634   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2635 			 && code == SET);
2636 
2637   if (may_trap_p (x))
2638     /* Avoid moving trapping instructions accross function calls that might
2639        not always return.  */
2640     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2641 			 1, REG_DEP_ANTI);
2642 
2643   if (code == COND_EXEC)
2644     {
2645       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2646 
2647       /* ??? Should be recording conditions so we reduce the number of
2648 	 false dependencies.  */
2649       x = COND_EXEC_CODE (x);
2650       code = GET_CODE (x);
2651     }
2652   if (code == SET || code == CLOBBER)
2653     {
2654       sched_analyze_1 (deps, x, insn);
2655 
2656       /* Bare clobber insns are used for letting life analysis, reg-stack
2657 	 and others know that a value is dead.  Depend on the last call
2658 	 instruction so that reg-stack won't get confused.  */
2659       if (code == CLOBBER)
2660 	add_dependence_list (insn, deps->last_function_call, 1,
2661 			     REG_DEP_OUTPUT);
2662     }
2663   else if (code == PARALLEL)
2664     {
2665       for (i = XVECLEN (x, 0); i--;)
2666 	{
2667 	  rtx sub = XVECEXP (x, 0, i);
2668 	  code = GET_CODE (sub);
2669 
2670 	  if (code == COND_EXEC)
2671 	    {
2672 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2673 	      sub = COND_EXEC_CODE (sub);
2674 	      code = GET_CODE (sub);
2675 	    }
2676 	  if (code == SET || code == CLOBBER)
2677 	    sched_analyze_1 (deps, sub, insn);
2678 	  else
2679 	    sched_analyze_2 (deps, sub, insn);
2680 	}
2681     }
2682   else
2683     sched_analyze_2 (deps, x, insn);
2684 
2685   /* Mark registers CLOBBERED or used by called function.  */
2686   if (CALL_P (insn))
2687     {
2688       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2689 	{
2690 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2691 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2692 	  else
2693 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2694 	}
2695       if (find_reg_note (insn, REG_SETJMP, NULL))
2696 	reg_pending_barrier = MOVE_BARRIER;
2697     }
2698 
2699   if (JUMP_P (insn))
2700     {
2701       rtx next;
2702       next = next_nonnote_nondebug_insn (insn);
2703       if (next && BARRIER_P (next))
2704 	reg_pending_barrier = MOVE_BARRIER;
2705       else
2706 	{
2707 	  rtx pending, pending_mem;
2708 
2709           if (sched_deps_info->compute_jump_reg_dependencies)
2710             {
2711               regset_head tmp_uses, tmp_sets;
2712               INIT_REG_SET (&tmp_uses);
2713               INIT_REG_SET (&tmp_sets);
2714 
2715               (*sched_deps_info->compute_jump_reg_dependencies)
2716                 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2717               /* Make latency of jump equal to 0 by using anti-dependence.  */
2718               EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2719                 {
2720                   struct deps_reg *reg_last = &deps->reg_last[i];
2721                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2722                   add_dependence_list (insn, reg_last->implicit_sets,
2723 				       0, REG_DEP_ANTI);
2724                   add_dependence_list (insn, reg_last->clobbers, 0,
2725 				       REG_DEP_ANTI);
2726 
2727                   if (!deps->readonly)
2728                     {
2729                       reg_last->uses_length++;
2730                       reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2731                     }
2732                 }
2733               IOR_REG_SET (reg_pending_sets, &tmp_sets);
2734 
2735               CLEAR_REG_SET (&tmp_uses);
2736               CLEAR_REG_SET (&tmp_sets);
2737             }
2738 
2739 	  /* All memory writes and volatile reads must happen before the
2740 	     jump.  Non-volatile reads must happen before the jump iff
2741 	     the result is needed by the above register used mask.  */
2742 
2743 	  pending = deps->pending_write_insns;
2744 	  pending_mem = deps->pending_write_mems;
2745 	  while (pending)
2746 	    {
2747 	      if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2748 		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2749 	      pending = XEXP (pending, 1);
2750 	      pending_mem = XEXP (pending_mem, 1);
2751 	    }
2752 
2753 	  pending = deps->pending_read_insns;
2754 	  pending_mem = deps->pending_read_mems;
2755 	  while (pending)
2756 	    {
2757 	      if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2758 		  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2759 		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2760 	      pending = XEXP (pending, 1);
2761 	      pending_mem = XEXP (pending_mem, 1);
2762 	    }
2763 
2764 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2765 			       REG_DEP_ANTI);
2766 	}
2767     }
2768 
2769   /* If this instruction can throw an exception, then moving it changes
2770      where block boundaries fall.  This is mighty confusing elsewhere.
2771      Therefore, prevent such an instruction from being moved.  Same for
2772      non-jump instructions that define block boundaries.
2773      ??? Unclear whether this is still necessary in EBB mode.  If not,
2774      add_branch_dependences should be adjusted for RGN mode instead.  */
2775   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2776       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2777     reg_pending_barrier = MOVE_BARRIER;
2778 
2779   if (sched_pressure_p)
2780     {
2781       setup_insn_reg_uses (deps, insn);
2782       setup_insn_reg_pressure_info (insn);
2783     }
2784 
2785   /* Add register dependencies for insn.  */
2786   if (DEBUG_INSN_P (insn))
2787     {
2788       rtx prev = deps->last_debug_insn;
2789       rtx u;
2790 
2791       if (!deps->readonly)
2792 	deps->last_debug_insn = insn;
2793 
2794       if (prev)
2795 	add_dependence (insn, prev, REG_DEP_ANTI);
2796 
2797       add_dependence_list (insn, deps->last_function_call, 1,
2798 			   REG_DEP_ANTI);
2799 
2800       for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2801 	if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
2802 	  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2803 
2804       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2805 	{
2806 	  struct deps_reg *reg_last = &deps->reg_last[i];
2807 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2808 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2809 
2810 	  if (!deps->readonly)
2811 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2812 	}
2813       CLEAR_REG_SET (reg_pending_uses);
2814 
2815       /* Quite often, a debug insn will refer to stuff in the
2816 	 previous instruction, but the reason we want this
2817 	 dependency here is to make sure the scheduler doesn't
2818 	 gratuitously move a debug insn ahead.  This could dirty
2819 	 DF flags and cause additional analysis that wouldn't have
2820 	 occurred in compilation without debug insns, and such
2821 	 additional analysis can modify the generated code.  */
2822       prev = PREV_INSN (insn);
2823 
2824       if (prev && NONDEBUG_INSN_P (prev))
2825 	add_dependence (insn, prev, REG_DEP_ANTI);
2826     }
2827   else
2828     {
2829       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2830 	{
2831 	  struct deps_reg *reg_last = &deps->reg_last[i];
2832 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2833 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2834 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2835 
2836 	  if (!deps->readonly)
2837 	    {
2838 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2839 	      reg_last->uses_length++;
2840 	    }
2841 	}
2842 
2843       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2844 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2845 	  {
2846 	    struct deps_reg *reg_last = &deps->reg_last[i];
2847 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2848 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
2849 				 REG_DEP_ANTI);
2850 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2851 
2852 	    if (!deps->readonly)
2853 	      {
2854 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2855 		reg_last->uses_length++;
2856 	      }
2857 	  }
2858 
2859       /* If the current insn is conditional, we can't free any
2860 	 of the lists.  */
2861       if (sched_has_condition_p (insn))
2862 	{
2863 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2864 	    {
2865 	      struct deps_reg *reg_last = &deps->reg_last[i];
2866 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2867 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
2868 				   REG_DEP_ANTI);
2869 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2870 
2871 	      if (!deps->readonly)
2872 		{
2873 		  reg_last->clobbers
2874 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
2875 		  reg_last->clobbers_length++;
2876 		}
2877 	    }
2878 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2879 	    {
2880 	      struct deps_reg *reg_last = &deps->reg_last[i];
2881 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2882 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
2883 				   REG_DEP_ANTI);
2884 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2885 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2886 
2887 	      if (!deps->readonly)
2888 		{
2889 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2890 		  SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2891 		}
2892 	    }
2893 	}
2894       else
2895 	{
2896 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2897 	    {
2898 	      struct deps_reg *reg_last = &deps->reg_last[i];
2899 	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2900 		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2901 		{
2902 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2903 						REG_DEP_OUTPUT);
2904 		  add_dependence_list_and_free (deps, insn,
2905 						&reg_last->implicit_sets, 0,
2906 						REG_DEP_ANTI);
2907 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2908 						REG_DEP_ANTI);
2909 		  add_dependence_list_and_free
2910 		    (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2911 
2912 		  if (!deps->readonly)
2913 		    {
2914 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2915 		      reg_last->clobbers_length = 0;
2916 		      reg_last->uses_length = 0;
2917 		    }
2918 		}
2919 	      else
2920 		{
2921 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2922 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
2923 				       REG_DEP_ANTI);
2924 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2925 		}
2926 
2927 	      if (!deps->readonly)
2928 		{
2929 		  reg_last->clobbers_length++;
2930 		  reg_last->clobbers
2931 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
2932 		}
2933 	    }
2934 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2935 	    {
2936 	      struct deps_reg *reg_last = &deps->reg_last[i];
2937 
2938 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2939 					    REG_DEP_OUTPUT);
2940 	      add_dependence_list_and_free (deps, insn,
2941 					    &reg_last->implicit_sets,
2942 					    0, REG_DEP_ANTI);
2943 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2944 					    REG_DEP_OUTPUT);
2945 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2946 					    REG_DEP_ANTI);
2947 
2948 	      if (!deps->readonly)
2949 		{
2950 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2951 		  reg_last->uses_length = 0;
2952 		  reg_last->clobbers_length = 0;
2953 		  CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2954 		}
2955 	    }
2956 	}
2957     }
2958 
2959   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2960     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2961       {
2962 	struct deps_reg *reg_last = &deps->reg_last[i];
2963 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2964 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2965 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2966 
2967 	if (!deps->readonly)
2968 	  reg_last->implicit_sets
2969 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2970       }
2971 
2972   if (!deps->readonly)
2973     {
2974       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2975       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2976       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2977       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2978 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2979 	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2980 	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2981 
2982       /* Set up the pending barrier found.  */
2983       deps->last_reg_pending_barrier = reg_pending_barrier;
2984     }
2985 
2986   CLEAR_REG_SET (reg_pending_uses);
2987   CLEAR_REG_SET (reg_pending_clobbers);
2988   CLEAR_REG_SET (reg_pending_sets);
2989   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2990   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2991 
2992   /* Add dependencies if a scheduling barrier was found.  */
2993   if (reg_pending_barrier)
2994     {
2995       /* In the case of barrier the most added dependencies are not
2996          real, so we use anti-dependence here.  */
2997       if (sched_has_condition_p (insn))
2998 	{
2999 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3000 	    {
3001 	      struct deps_reg *reg_last = &deps->reg_last[i];
3002 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3003 	      add_dependence_list (insn, reg_last->sets, 0,
3004 				   reg_pending_barrier == TRUE_BARRIER
3005 				   ? REG_DEP_TRUE : REG_DEP_ANTI);
3006 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3007 				   REG_DEP_ANTI);
3008 	      add_dependence_list (insn, reg_last->clobbers, 0,
3009 				   reg_pending_barrier == TRUE_BARRIER
3010 				   ? REG_DEP_TRUE : REG_DEP_ANTI);
3011 	    }
3012 	}
3013       else
3014 	{
3015 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3016 	    {
3017 	      struct deps_reg *reg_last = &deps->reg_last[i];
3018 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3019 					    REG_DEP_ANTI);
3020 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3021 					    reg_pending_barrier == TRUE_BARRIER
3022 					    ? REG_DEP_TRUE : REG_DEP_ANTI);
3023 	      add_dependence_list_and_free (deps, insn,
3024 					    &reg_last->implicit_sets, 0,
3025 					    REG_DEP_ANTI);
3026 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3027 					    reg_pending_barrier == TRUE_BARRIER
3028 					    ? REG_DEP_TRUE : REG_DEP_ANTI);
3029 
3030               if (!deps->readonly)
3031                 {
3032                   reg_last->uses_length = 0;
3033                   reg_last->clobbers_length = 0;
3034                 }
3035 	    }
3036 	}
3037 
3038       if (!deps->readonly)
3039         for (i = 0; i < (unsigned)deps->max_reg; i++)
3040           {
3041             struct deps_reg *reg_last = &deps->reg_last[i];
3042             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3043             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3044           }
3045 
3046       /* Flush pending lists on jumps, but not on speculative checks.  */
3047       if (JUMP_P (insn) && !(sel_sched_p ()
3048                              && sel_insn_is_speculation_check (insn)))
3049 	flush_pending_lists (deps, insn, true, true);
3050 
3051       if (!deps->readonly)
3052         CLEAR_REG_SET (&deps->reg_conditional_sets);
3053       reg_pending_barrier = NOT_A_BARRIER;
3054     }
3055 
3056   /* If a post-call group is still open, see if it should remain so.
3057      This insn must be a simple move of a hard reg to a pseudo or
3058      vice-versa.
3059 
3060      We must avoid moving these insns for correctness on
3061      SMALL_REGISTER_CLASS machines, and for special registers like
3062      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3063      hard regs for all targets.  */
3064 
3065   if (deps->in_post_call_group_p)
3066     {
3067       rtx tmp, set = single_set (insn);
3068       int src_regno, dest_regno;
3069 
3070       if (set == NULL)
3071 	{
3072 	  if (DEBUG_INSN_P (insn))
3073 	    /* We don't want to mark debug insns as part of the same
3074 	       sched group.  We know they really aren't, but if we use
3075 	       debug insns to tell that a call group is over, we'll
3076 	       get different code if debug insns are not there and
3077 	       instructions that follow seem like they should be part
3078 	       of the call group.
3079 
3080 	       Also, if we did, fixup_sched_groups() would move the
3081 	       deps of the debug insn to the call insn, modifying
3082 	       non-debug post-dependency counts of the debug insn
3083 	       dependencies and otherwise messing with the scheduling
3084 	       order.
3085 
3086 	       Instead, let such debug insns be scheduled freely, but
3087 	       keep the call group open in case there are insns that
3088 	       should be part of it afterwards.  Since we grant debug
3089 	       insns higher priority than even sched group insns, it
3090 	       will all turn out all right.  */
3091 	    goto debug_dont_end_call_group;
3092 	  else
3093 	    goto end_call_group;
3094 	}
3095 
3096       tmp = SET_DEST (set);
3097       if (GET_CODE (tmp) == SUBREG)
3098 	tmp = SUBREG_REG (tmp);
3099       if (REG_P (tmp))
3100 	dest_regno = REGNO (tmp);
3101       else
3102 	goto end_call_group;
3103 
3104       tmp = SET_SRC (set);
3105       if (GET_CODE (tmp) == SUBREG)
3106 	tmp = SUBREG_REG (tmp);
3107       if ((GET_CODE (tmp) == PLUS
3108 	   || GET_CODE (tmp) == MINUS)
3109 	  && REG_P (XEXP (tmp, 0))
3110 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3111 	  && dest_regno == STACK_POINTER_REGNUM)
3112 	src_regno = STACK_POINTER_REGNUM;
3113       else if (REG_P (tmp))
3114 	src_regno = REGNO (tmp);
3115       else
3116 	goto end_call_group;
3117 
3118       if (src_regno < FIRST_PSEUDO_REGISTER
3119 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3120 	{
3121 	  if (!deps->readonly
3122               && deps->in_post_call_group_p == post_call_initial)
3123 	    deps->in_post_call_group_p = post_call;
3124 
3125           if (!sel_sched_p () || sched_emulate_haifa_p)
3126             {
3127               SCHED_GROUP_P (insn) = 1;
3128               CANT_MOVE (insn) = 1;
3129             }
3130 	}
3131       else
3132 	{
3133 	end_call_group:
3134           if (!deps->readonly)
3135             deps->in_post_call_group_p = not_post_call;
3136 	}
3137     }
3138 
3139  debug_dont_end_call_group:
3140   if ((current_sched_info->flags & DO_SPECULATION)
3141       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3142     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3143        be speculated.  */
3144     {
3145       if (sel_sched_p ())
3146         sel_mark_hard_insn (insn);
3147       else
3148         {
3149           sd_iterator_def sd_it;
3150           dep_t dep;
3151 
3152           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3153                sd_iterator_cond (&sd_it, &dep);)
3154             change_spec_dep_to_hard (sd_it);
3155         }
3156     }
3157 }
3158 
3159 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3160    longjmp, loop forever, ...).  */
3161 static bool
3162 call_may_noreturn_p (rtx insn)
3163 {
3164   rtx call;
3165 
3166   /* const or pure calls that aren't looping will always return.  */
3167   if (RTL_CONST_OR_PURE_CALL_P (insn)
3168       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3169     return false;
3170 
3171   call = PATTERN (insn);
3172   if (GET_CODE (call) == PARALLEL)
3173     call = XVECEXP (call, 0, 0);
3174   if (GET_CODE (call) == SET)
3175     call = SET_SRC (call);
3176   if (GET_CODE (call) == CALL
3177       && MEM_P (XEXP (call, 0))
3178       && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3179     {
3180       rtx symbol = XEXP (XEXP (call, 0), 0);
3181       if (SYMBOL_REF_DECL (symbol)
3182 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3183 	{
3184 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3185 	      == BUILT_IN_NORMAL)
3186 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3187 	      {
3188 	      case BUILT_IN_BCMP:
3189 	      case BUILT_IN_BCOPY:
3190 	      case BUILT_IN_BZERO:
3191 	      case BUILT_IN_INDEX:
3192 	      case BUILT_IN_MEMCHR:
3193 	      case BUILT_IN_MEMCMP:
3194 	      case BUILT_IN_MEMCPY:
3195 	      case BUILT_IN_MEMMOVE:
3196 	      case BUILT_IN_MEMPCPY:
3197 	      case BUILT_IN_MEMSET:
3198 	      case BUILT_IN_RINDEX:
3199 	      case BUILT_IN_STPCPY:
3200 	      case BUILT_IN_STPNCPY:
3201 	      case BUILT_IN_STRCAT:
3202 	      case BUILT_IN_STRCHR:
3203 	      case BUILT_IN_STRCMP:
3204 	      case BUILT_IN_STRCPY:
3205 	      case BUILT_IN_STRCSPN:
3206 	      case BUILT_IN_STRLEN:
3207 	      case BUILT_IN_STRNCAT:
3208 	      case BUILT_IN_STRNCMP:
3209 	      case BUILT_IN_STRNCPY:
3210 	      case BUILT_IN_STRPBRK:
3211 	      case BUILT_IN_STRRCHR:
3212 	      case BUILT_IN_STRSPN:
3213 	      case BUILT_IN_STRSTR:
3214 		/* Assume certain string/memory builtins always return.  */
3215 		return false;
3216 	      default:
3217 		break;
3218 	      }
3219 	}
3220     }
3221 
3222   /* For all other calls assume that they might not always return.  */
3223   return true;
3224 }
3225 
3226 /* Analyze INSN with DEPS as a context.  */
3227 void
3228 deps_analyze_insn (struct deps_desc *deps, rtx insn)
3229 {
3230   if (sched_deps_info->start_insn)
3231     sched_deps_info->start_insn (insn);
3232 
3233   if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3234     {
3235       /* Make each JUMP_INSN (but not a speculative check)
3236          a scheduling barrier for memory references.  */
3237       if (!deps->readonly
3238           && JUMP_P (insn)
3239           && !(sel_sched_p ()
3240                && sel_insn_is_speculation_check (insn)))
3241         {
3242           /* Keep the list a reasonable size.  */
3243           if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3244             flush_pending_lists (deps, insn, true, true);
3245           else
3246 	    {
3247 	      deps->last_pending_memory_flush
3248 		= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3249 	      /* Signal to sched_analyze_insn that this jump stands
3250 		 just for its own, not any other pending memory
3251 		 reads/writes flush_pending_lists had to flush.  */
3252 	      PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
3253 				 NON_FLUSH_JUMP_KIND);
3254 	    }
3255         }
3256 
3257       sched_analyze_insn (deps, PATTERN (insn), insn);
3258     }
3259   else if (CALL_P (insn))
3260     {
3261       int i;
3262 
3263       CANT_MOVE (insn) = 1;
3264 
3265       if (find_reg_note (insn, REG_SETJMP, NULL))
3266         {
3267           /* This is setjmp.  Assume that all registers, not just
3268              hard registers, may be clobbered by this call.  */
3269           reg_pending_barrier = MOVE_BARRIER;
3270         }
3271       else
3272         {
3273           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3274             /* A call may read and modify global register variables.  */
3275             if (global_regs[i])
3276               {
3277                 SET_REGNO_REG_SET (reg_pending_sets, i);
3278                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3279               }
3280           /* Other call-clobbered hard regs may be clobbered.
3281              Since we only have a choice between 'might be clobbered'
3282              and 'definitely not clobbered', we must include all
3283              partly call-clobbered registers here.  */
3284             else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3285                      || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3286               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3287           /* We don't know what set of fixed registers might be used
3288              by the function, but it is certain that the stack pointer
3289              is among them, but be conservative.  */
3290             else if (fixed_regs[i])
3291 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3292           /* The frame pointer is normally not used by the function
3293              itself, but by the debugger.  */
3294           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3295              in the macro expansion of jal but does not represent this
3296              fact in the call_insn rtl.  */
3297             else if (i == FRAME_POINTER_REGNUM
3298                      || (i == HARD_FRAME_POINTER_REGNUM
3299                          && (! reload_completed || frame_pointer_needed)))
3300 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3301         }
3302 
3303       /* For each insn which shouldn't cross a call, add a dependence
3304          between that insn and this call insn.  */
3305       add_dependence_list_and_free (deps, insn,
3306                                     &deps->sched_before_next_call, 1,
3307                                     REG_DEP_ANTI);
3308 
3309       sched_analyze_insn (deps, PATTERN (insn), insn);
3310 
3311       /* If CALL would be in a sched group, then this will violate
3312 	 convention that sched group insns have dependencies only on the
3313 	 previous instruction.
3314 
3315 	 Of course one can say: "Hey!  What about head of the sched group?"
3316 	 And I will answer: "Basic principles (one dep per insn) are always
3317 	 the same."  */
3318       gcc_assert (!SCHED_GROUP_P (insn));
3319 
3320       /* In the absence of interprocedural alias analysis, we must flush
3321          all pending reads and writes, and start new dependencies starting
3322          from here.  But only flush writes for constant calls (which may
3323          be passed a pointer to something we haven't written yet).  */
3324       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3325 
3326       if (!deps->readonly)
3327         {
3328           /* Remember the last function call for limiting lifetimes.  */
3329           free_INSN_LIST_list (&deps->last_function_call);
3330           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3331 
3332 	  if (call_may_noreturn_p (insn))
3333 	    {
3334 	      /* Remember the last function call that might not always return
3335 		 normally for limiting moves of trapping insns.  */
3336 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3337 	      deps->last_function_call_may_noreturn
3338 		= alloc_INSN_LIST (insn, NULL_RTX);
3339 	    }
3340 
3341           /* Before reload, begin a post-call group, so as to keep the
3342              lifetimes of hard registers correct.  */
3343           if (! reload_completed)
3344             deps->in_post_call_group_p = post_call;
3345         }
3346     }
3347 
3348   if (sched_deps_info->use_cselib)
3349     cselib_process_insn (insn);
3350 
3351   /* EH_REGION insn notes can not appear until well after we complete
3352      scheduling.  */
3353   if (NOTE_P (insn))
3354     gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3355 		&& NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3356 
3357   if (sched_deps_info->finish_insn)
3358     sched_deps_info->finish_insn ();
3359 
3360   /* Fixup the dependencies in the sched group.  */
3361   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3362       && SCHED_GROUP_P (insn) && !sel_sched_p ())
3363     fixup_sched_groups (insn);
3364 }
3365 
3366 /* Initialize DEPS for the new block beginning with HEAD.  */
3367 void
3368 deps_start_bb (struct deps_desc *deps, rtx head)
3369 {
3370   gcc_assert (!deps->readonly);
3371 
3372   /* Before reload, if the previous block ended in a call, show that
3373      we are inside a post-call group, so as to keep the lifetimes of
3374      hard registers correct.  */
3375   if (! reload_completed && !LABEL_P (head))
3376     {
3377       rtx insn = prev_nonnote_nondebug_insn (head);
3378 
3379       if (insn && CALL_P (insn))
3380 	deps->in_post_call_group_p = post_call_initial;
3381     }
3382 }
3383 
3384 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3385    dependencies for each insn.  */
3386 void
3387 sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3388 {
3389   rtx insn;
3390 
3391   if (sched_deps_info->use_cselib)
3392     cselib_init (CSELIB_RECORD_MEMORY);
3393 
3394   deps_start_bb (deps, head);
3395 
3396   for (insn = head;; insn = NEXT_INSN (insn))
3397     {
3398 
3399       if (INSN_P (insn))
3400 	{
3401 	  /* And initialize deps_lists.  */
3402 	  sd_init_insn (insn);
3403 	}
3404 
3405       deps_analyze_insn (deps, insn);
3406 
3407       if (insn == tail)
3408 	{
3409 	  if (sched_deps_info->use_cselib)
3410 	    cselib_finish ();
3411 	  return;
3412 	}
3413     }
3414   gcc_unreachable ();
3415 }
3416 
3417 /* Helper for sched_free_deps ().
3418    Delete INSN's (RESOLVED_P) backward dependencies.  */
3419 static void
3420 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3421 {
3422   sd_iterator_def sd_it;
3423   dep_t dep;
3424   sd_list_types_def types;
3425 
3426   if (resolved_p)
3427     types = SD_LIST_RES_BACK;
3428   else
3429     types = SD_LIST_BACK;
3430 
3431   for (sd_it = sd_iterator_start (insn, types);
3432        sd_iterator_cond (&sd_it, &dep);)
3433     {
3434       dep_link_t link = *sd_it.linkp;
3435       dep_node_t node = DEP_LINK_NODE (link);
3436       deps_list_t back_list;
3437       deps_list_t forw_list;
3438 
3439       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3440       remove_from_deps_list (link, back_list);
3441       delete_dep_node (node);
3442     }
3443 }
3444 
3445 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3446    deps_lists.  */
3447 void
3448 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3449 {
3450   rtx insn;
3451   rtx next_tail = NEXT_INSN (tail);
3452 
3453   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3454     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3455       {
3456 	/* Clear resolved back deps together with its dep_nodes.  */
3457 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3458 
3459 	/* Clear forward deps and leave the dep_nodes to the
3460 	   corresponding back_deps list.  */
3461 	if (resolved_p)
3462 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3463 	else
3464 	  clear_deps_list (INSN_FORW_DEPS (insn));
3465 
3466 	sd_finish_insn (insn);
3467       }
3468 }
3469 
3470 /* Initialize variables for region data dependence analysis.
3471    When LAZY_REG_LAST is true, do not allocate reg_last array
3472    of struct deps_desc immediately.  */
3473 
3474 void
3475 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3476 {
3477   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3478 
3479   deps->max_reg = max_reg;
3480   if (lazy_reg_last)
3481     deps->reg_last = NULL;
3482   else
3483     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3484   INIT_REG_SET (&deps->reg_last_in_use);
3485   INIT_REG_SET (&deps->reg_conditional_sets);
3486 
3487   deps->pending_read_insns = 0;
3488   deps->pending_read_mems = 0;
3489   deps->pending_write_insns = 0;
3490   deps->pending_write_mems = 0;
3491   deps->pending_read_list_length = 0;
3492   deps->pending_write_list_length = 0;
3493   deps->pending_flush_length = 0;
3494   deps->last_pending_memory_flush = 0;
3495   deps->last_function_call = 0;
3496   deps->last_function_call_may_noreturn = 0;
3497   deps->sched_before_next_call = 0;
3498   deps->in_post_call_group_p = not_post_call;
3499   deps->last_debug_insn = 0;
3500   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3501   deps->readonly = 0;
3502 }
3503 
3504 /* Init only reg_last field of DEPS, which was not allocated before as
3505    we inited DEPS lazily.  */
3506 void
3507 init_deps_reg_last (struct deps_desc *deps)
3508 {
3509   gcc_assert (deps && deps->max_reg > 0);
3510   gcc_assert (deps->reg_last == NULL);
3511 
3512   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3513 }
3514 
3515 
3516 /* Free insn lists found in DEPS.  */
3517 
3518 void
3519 free_deps (struct deps_desc *deps)
3520 {
3521   unsigned i;
3522   reg_set_iterator rsi;
3523 
3524   /* We set max_reg to 0 when this context was already freed.  */
3525   if (deps->max_reg == 0)
3526     {
3527       gcc_assert (deps->reg_last == NULL);
3528       return;
3529     }
3530   deps->max_reg = 0;
3531 
3532   free_INSN_LIST_list (&deps->pending_read_insns);
3533   free_EXPR_LIST_list (&deps->pending_read_mems);
3534   free_INSN_LIST_list (&deps->pending_write_insns);
3535   free_EXPR_LIST_list (&deps->pending_write_mems);
3536   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3537 
3538   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3539      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3540      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3541   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3542     {
3543       struct deps_reg *reg_last = &deps->reg_last[i];
3544       if (reg_last->uses)
3545 	free_INSN_LIST_list (&reg_last->uses);
3546       if (reg_last->sets)
3547 	free_INSN_LIST_list (&reg_last->sets);
3548       if (reg_last->implicit_sets)
3549 	free_INSN_LIST_list (&reg_last->implicit_sets);
3550       if (reg_last->clobbers)
3551 	free_INSN_LIST_list (&reg_last->clobbers);
3552     }
3553   CLEAR_REG_SET (&deps->reg_last_in_use);
3554   CLEAR_REG_SET (&deps->reg_conditional_sets);
3555 
3556   /* As we initialize reg_last lazily, it is possible that we didn't allocate
3557      it at all.  */
3558   if (deps->reg_last)
3559     free (deps->reg_last);
3560   deps->reg_last = NULL;
3561 
3562   deps = NULL;
3563 }
3564 
3565 /* Remove INSN from dependence contexts DEPS.  Caution: reg_conditional_sets
3566    is not handled.  */
3567 void
3568 remove_from_deps (struct deps_desc *deps, rtx insn)
3569 {
3570   int removed;
3571   unsigned i;
3572   reg_set_iterator rsi;
3573 
3574   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3575                                                &deps->pending_read_mems);
3576   if (!DEBUG_INSN_P (insn))
3577     deps->pending_read_list_length -= removed;
3578   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3579                                                &deps->pending_write_mems);
3580   deps->pending_write_list_length -= removed;
3581   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3582   deps->pending_flush_length -= removed;
3583 
3584   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3585     {
3586       struct deps_reg *reg_last = &deps->reg_last[i];
3587       if (reg_last->uses)
3588 	remove_from_dependence_list (insn, &reg_last->uses);
3589       if (reg_last->sets)
3590 	remove_from_dependence_list (insn, &reg_last->sets);
3591       if (reg_last->implicit_sets)
3592 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
3593       if (reg_last->clobbers)
3594 	remove_from_dependence_list (insn, &reg_last->clobbers);
3595       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3596 	  && !reg_last->clobbers)
3597         CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3598     }
3599 
3600   if (CALL_P (insn))
3601     {
3602       remove_from_dependence_list (insn, &deps->last_function_call);
3603       remove_from_dependence_list (insn,
3604 				   &deps->last_function_call_may_noreturn);
3605     }
3606   remove_from_dependence_list (insn, &deps->sched_before_next_call);
3607 }
3608 
3609 /* Init deps data vector.  */
3610 static void
3611 init_deps_data_vector (void)
3612 {
3613   int reserve = (sched_max_luid + 1
3614                  - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3615   if (reserve > 0
3616       && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3617     VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3618                            3 * sched_max_luid / 2);
3619 }
3620 
3621 /* If it is profitable to use them, initialize or extend (depending on
3622    GLOBAL_P) dependency data.  */
3623 void
3624 sched_deps_init (bool global_p)
3625 {
3626   /* Average number of insns in the basic block.
3627      '+ 1' is used to make it nonzero.  */
3628   int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3629 
3630   init_deps_data_vector ();
3631 
3632   /* We use another caching mechanism for selective scheduling, so
3633      we don't use this one.  */
3634   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3635     {
3636       /* ?!? We could save some memory by computing a per-region luid mapping
3637          which could reduce both the number of vectors in the cache and the
3638          size of each vector.  Instead we just avoid the cache entirely unless
3639          the average number of instructions in a basic block is very high.  See
3640          the comment before the declaration of true_dependency_cache for
3641          what we consider "very high".  */
3642       cache_size = 0;
3643       extend_dependency_caches (sched_max_luid, true);
3644     }
3645 
3646   if (global_p)
3647     {
3648       dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3649                                    /* Allocate lists for one block at a time.  */
3650                                    insns_in_block);
3651       dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3652                                    /* Allocate nodes for one block at a time.
3653                                       We assume that average insn has
3654                                       5 producers.  */
3655                                    5 * insns_in_block);
3656     }
3657 }
3658 
3659 
3660 /* Create or extend (depending on CREATE_P) dependency caches to
3661    size N.  */
3662 void
3663 extend_dependency_caches (int n, bool create_p)
3664 {
3665   if (create_p || true_dependency_cache)
3666     {
3667       int i, luid = cache_size + n;
3668 
3669       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3670 					  luid);
3671       output_dependency_cache = XRESIZEVEC (bitmap_head,
3672 					    output_dependency_cache, luid);
3673       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3674 					  luid);
3675 
3676       if (current_sched_info->flags & DO_SPECULATION)
3677         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3678 					    luid);
3679 
3680       for (i = cache_size; i < luid; i++)
3681 	{
3682 	  bitmap_initialize (&true_dependency_cache[i], 0);
3683 	  bitmap_initialize (&output_dependency_cache[i], 0);
3684 	  bitmap_initialize (&anti_dependency_cache[i], 0);
3685 
3686           if (current_sched_info->flags & DO_SPECULATION)
3687             bitmap_initialize (&spec_dependency_cache[i], 0);
3688 	}
3689       cache_size = luid;
3690     }
3691 }
3692 
3693 /* Finalize dependency information for the whole function.  */
3694 void
3695 sched_deps_finish (void)
3696 {
3697   gcc_assert (deps_pools_are_empty_p ());
3698   free_alloc_pool_if_empty (&dn_pool);
3699   free_alloc_pool_if_empty (&dl_pool);
3700   gcc_assert (dn_pool == NULL && dl_pool == NULL);
3701 
3702   VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3703   cache_size = 0;
3704 
3705   if (true_dependency_cache)
3706     {
3707       int i;
3708 
3709       for (i = 0; i < cache_size; i++)
3710 	{
3711 	  bitmap_clear (&true_dependency_cache[i]);
3712 	  bitmap_clear (&output_dependency_cache[i]);
3713 	  bitmap_clear (&anti_dependency_cache[i]);
3714 
3715           if (sched_deps_info->generate_spec_deps)
3716             bitmap_clear (&spec_dependency_cache[i]);
3717 	}
3718       free (true_dependency_cache);
3719       true_dependency_cache = NULL;
3720       free (output_dependency_cache);
3721       output_dependency_cache = NULL;
3722       free (anti_dependency_cache);
3723       anti_dependency_cache = NULL;
3724 
3725       if (sched_deps_info->generate_spec_deps)
3726         {
3727           free (spec_dependency_cache);
3728           spec_dependency_cache = NULL;
3729         }
3730 
3731     }
3732 }
3733 
3734 /* Initialize some global variables needed by the dependency analysis
3735    code.  */
3736 
3737 void
3738 init_deps_global (void)
3739 {
3740   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3741   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3742   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3743   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3744   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3745   reg_pending_barrier = NOT_A_BARRIER;
3746 
3747   if (!sel_sched_p () || sched_emulate_haifa_p)
3748     {
3749       sched_deps_info->start_insn = haifa_start_insn;
3750       sched_deps_info->finish_insn = haifa_finish_insn;
3751 
3752       sched_deps_info->note_reg_set = haifa_note_reg_set;
3753       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3754       sched_deps_info->note_reg_use = haifa_note_reg_use;
3755 
3756       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3757       sched_deps_info->note_dep = haifa_note_dep;
3758    }
3759 }
3760 
3761 /* Free everything used by the dependency analysis code.  */
3762 
3763 void
3764 finish_deps_global (void)
3765 {
3766   FREE_REG_SET (reg_pending_sets);
3767   FREE_REG_SET (reg_pending_clobbers);
3768   FREE_REG_SET (reg_pending_uses);
3769 }
3770 
3771 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
3772 dw_t
3773 estimate_dep_weak (rtx mem1, rtx mem2)
3774 {
3775   rtx r1, r2;
3776 
3777   if (mem1 == mem2)
3778     /* MEMs are the same - don't speculate.  */
3779     return MIN_DEP_WEAK;
3780 
3781   r1 = XEXP (mem1, 0);
3782   r2 = XEXP (mem2, 0);
3783 
3784   if (r1 == r2
3785       || (REG_P (r1) && REG_P (r2)
3786 	  && REGNO (r1) == REGNO (r2)))
3787     /* Again, MEMs are the same.  */
3788     return MIN_DEP_WEAK;
3789   else if ((REG_P (r1) && !REG_P (r2))
3790 	   || (!REG_P (r1) && REG_P (r2)))
3791     /* Different addressing modes - reason to be more speculative,
3792        than usual.  */
3793     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3794   else
3795     /* We can't say anything about the dependence.  */
3796     return UNCERTAIN_DEP_WEAK;
3797 }
3798 
3799 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3800    This function can handle same INSN and ELEM (INSN == ELEM).
3801    It is a convenience wrapper.  */
3802 void
3803 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3804 {
3805   ds_t ds;
3806   bool internal;
3807 
3808   if (dep_type == REG_DEP_TRUE)
3809     ds = DEP_TRUE;
3810   else if (dep_type == REG_DEP_OUTPUT)
3811     ds = DEP_OUTPUT;
3812   else
3813     {
3814       gcc_assert (dep_type == REG_DEP_ANTI);
3815       ds = DEP_ANTI;
3816     }
3817 
3818   /* When add_dependence is called from inside sched-deps.c, we expect
3819      cur_insn to be non-null.  */
3820   internal = cur_insn != NULL;
3821   if (internal)
3822     gcc_assert (insn == cur_insn);
3823   else
3824     cur_insn = insn;
3825 
3826   note_dep (elem, ds);
3827   if (!internal)
3828     cur_insn = NULL;
3829 }
3830 
3831 /* Return weakness of speculative type TYPE in the dep_status DS.  */
3832 dw_t
3833 get_dep_weak_1 (ds_t ds, ds_t type)
3834 {
3835   ds = ds & type;
3836 
3837   switch (type)
3838     {
3839     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3840     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3841     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3842     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3843     default: gcc_unreachable ();
3844     }
3845 
3846   return (dw_t) ds;
3847 }
3848 
3849 dw_t
3850 get_dep_weak (ds_t ds, ds_t type)
3851 {
3852   dw_t dw = get_dep_weak_1 (ds, type);
3853 
3854   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3855   return dw;
3856 }
3857 
3858 /* Return the dep_status, which has the same parameters as DS, except for
3859    speculative type TYPE, that will have weakness DW.  */
3860 ds_t
3861 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3862 {
3863   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3864 
3865   ds &= ~type;
3866   switch (type)
3867     {
3868     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3869     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3870     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3871     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3872     default: gcc_unreachable ();
3873     }
3874   return ds;
3875 }
3876 
3877 /* Return the join of two dep_statuses DS1 and DS2.
3878    If MAX_P is true then choose the greater probability,
3879    otherwise multiply probabilities.
3880    This function assumes that both DS1 and DS2 contain speculative bits.  */
3881 static ds_t
3882 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3883 {
3884   ds_t ds, t;
3885 
3886   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3887 
3888   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3889 
3890   t = FIRST_SPEC_TYPE;
3891   do
3892     {
3893       if ((ds1 & t) && !(ds2 & t))
3894 	ds |= ds1 & t;
3895       else if (!(ds1 & t) && (ds2 & t))
3896 	ds |= ds2 & t;
3897       else if ((ds1 & t) && (ds2 & t))
3898 	{
3899 	  dw_t dw1 = get_dep_weak (ds1, t);
3900 	  dw_t dw2 = get_dep_weak (ds2, t);
3901 	  ds_t dw;
3902 
3903 	  if (!max_p)
3904 	    {
3905 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
3906 	      dw /= MAX_DEP_WEAK;
3907 	      if (dw < MIN_DEP_WEAK)
3908 		dw = MIN_DEP_WEAK;
3909 	    }
3910 	  else
3911 	    {
3912 	      if (dw1 >= dw2)
3913 		dw = dw1;
3914 	      else
3915 		dw = dw2;
3916 	    }
3917 
3918 	  ds = set_dep_weak (ds, t, (dw_t) dw);
3919 	}
3920 
3921       if (t == LAST_SPEC_TYPE)
3922 	break;
3923       t <<= SPEC_TYPE_SHIFT;
3924     }
3925   while (1);
3926 
3927   return ds;
3928 }
3929 
3930 /* Return the join of two dep_statuses DS1 and DS2.
3931    This function assumes that both DS1 and DS2 contain speculative bits.  */
3932 ds_t
3933 ds_merge (ds_t ds1, ds_t ds2)
3934 {
3935   return ds_merge_1 (ds1, ds2, false);
3936 }
3937 
3938 /* Return the join of two dep_statuses DS1 and DS2.  */
3939 ds_t
3940 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3941 {
3942   ds_t new_status = ds | ds2;
3943 
3944   if (new_status & SPECULATIVE)
3945     {
3946       if ((ds && !(ds & SPECULATIVE))
3947 	  || (ds2 && !(ds2 & SPECULATIVE)))
3948 	/* Then this dep can't be speculative.  */
3949 	new_status &= ~SPECULATIVE;
3950       else
3951 	{
3952 	  /* Both are speculative.  Merging probabilities.  */
3953 	  if (mem1)
3954 	    {
3955 	      dw_t dw;
3956 
3957 	      dw = estimate_dep_weak (mem1, mem2);
3958 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
3959 	    }
3960 
3961 	  if (!ds)
3962 	    new_status = ds2;
3963 	  else if (!ds2)
3964 	    new_status = ds;
3965 	  else
3966 	    new_status = ds_merge (ds2, ds);
3967 	}
3968     }
3969 
3970   return new_status;
3971 }
3972 
3973 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
3974    probabilities.  */
3975 ds_t
3976 ds_max_merge (ds_t ds1, ds_t ds2)
3977 {
3978   if (ds1 == 0 && ds2 == 0)
3979     return 0;
3980 
3981   if (ds1 == 0 && ds2 != 0)
3982     return ds2;
3983 
3984   if (ds1 != 0 && ds2 == 0)
3985     return ds1;
3986 
3987   return ds_merge_1 (ds1, ds2, true);
3988 }
3989 
3990 /* Return the probability of speculation success for the speculation
3991    status DS.  */
3992 dw_t
3993 ds_weak (ds_t ds)
3994 {
3995   ds_t res = 1, dt;
3996   int n = 0;
3997 
3998   dt = FIRST_SPEC_TYPE;
3999   do
4000     {
4001       if (ds & dt)
4002 	{
4003 	  res *= (ds_t) get_dep_weak (ds, dt);
4004 	  n++;
4005 	}
4006 
4007       if (dt == LAST_SPEC_TYPE)
4008 	break;
4009       dt <<= SPEC_TYPE_SHIFT;
4010     }
4011   while (1);
4012 
4013   gcc_assert (n);
4014   while (--n)
4015     res /= MAX_DEP_WEAK;
4016 
4017   if (res < MIN_DEP_WEAK)
4018     res = MIN_DEP_WEAK;
4019 
4020   gcc_assert (res <= MAX_DEP_WEAK);
4021 
4022   return (dw_t) res;
4023 }
4024 
4025 /* Return a dep status that contains all speculation types of DS.  */
4026 ds_t
4027 ds_get_speculation_types (ds_t ds)
4028 {
4029   if (ds & BEGIN_DATA)
4030     ds |= BEGIN_DATA;
4031   if (ds & BE_IN_DATA)
4032     ds |= BE_IN_DATA;
4033   if (ds & BEGIN_CONTROL)
4034     ds |= BEGIN_CONTROL;
4035   if (ds & BE_IN_CONTROL)
4036     ds |= BE_IN_CONTROL;
4037 
4038   return ds & SPECULATIVE;
4039 }
4040 
4041 /* Return a dep status that contains maximal weakness for each speculation
4042    type present in DS.  */
4043 ds_t
4044 ds_get_max_dep_weak (ds_t ds)
4045 {
4046   if (ds & BEGIN_DATA)
4047     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4048   if (ds & BE_IN_DATA)
4049     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4050   if (ds & BEGIN_CONTROL)
4051     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4052   if (ds & BE_IN_CONTROL)
4053     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4054 
4055   return ds;
4056 }
4057 
4058 /* Dump information about the dependence status S.  */
4059 static void
4060 dump_ds (FILE *f, ds_t s)
4061 {
4062   fprintf (f, "{");
4063 
4064   if (s & BEGIN_DATA)
4065     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4066   if (s & BE_IN_DATA)
4067     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4068   if (s & BEGIN_CONTROL)
4069     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4070   if (s & BE_IN_CONTROL)
4071     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4072 
4073   if (s & HARD_DEP)
4074     fprintf (f, "HARD_DEP; ");
4075 
4076   if (s & DEP_TRUE)
4077     fprintf (f, "DEP_TRUE; ");
4078   if (s & DEP_ANTI)
4079     fprintf (f, "DEP_ANTI; ");
4080   if (s & DEP_OUTPUT)
4081     fprintf (f, "DEP_OUTPUT; ");
4082 
4083   fprintf (f, "}");
4084 }
4085 
4086 void
4087 debug_ds (ds_t s)
4088 {
4089   dump_ds (stderr, s);
4090   fprintf (stderr, "\n");
4091 }
4092 
4093 #ifdef ENABLE_CHECKING
4094 /* Verify that dependence type and status are consistent.
4095    If RELAXED_P is true, then skip dep_weakness checks.  */
4096 static void
4097 check_dep (dep_t dep, bool relaxed_p)
4098 {
4099   enum reg_note dt = DEP_TYPE (dep);
4100   ds_t ds = DEP_STATUS (dep);
4101 
4102   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4103 
4104   if (!(current_sched_info->flags & USE_DEPS_LIST))
4105     {
4106       gcc_assert (ds == -1);
4107       return;
4108     }
4109 
4110   /* Check that dependence type contains the same bits as the status.  */
4111   if (dt == REG_DEP_TRUE)
4112     gcc_assert (ds & DEP_TRUE);
4113   else if (dt == REG_DEP_OUTPUT)
4114     gcc_assert ((ds & DEP_OUTPUT)
4115 		&& !(ds & DEP_TRUE));
4116   else
4117     gcc_assert ((dt == REG_DEP_ANTI)
4118 		&& (ds & DEP_ANTI)
4119 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4120 
4121   /* HARD_DEP can not appear in dep_status of a link.  */
4122   gcc_assert (!(ds & HARD_DEP));
4123 
4124   /* Check that dependence status is set correctly when speculation is not
4125      supported.  */
4126   if (!sched_deps_info->generate_spec_deps)
4127     gcc_assert (!(ds & SPECULATIVE));
4128   else if (ds & SPECULATIVE)
4129     {
4130       if (!relaxed_p)
4131 	{
4132 	  ds_t type = FIRST_SPEC_TYPE;
4133 
4134 	  /* Check that dependence weakness is in proper range.  */
4135 	  do
4136 	    {
4137 	      if (ds & type)
4138 		get_dep_weak (ds, type);
4139 
4140 	      if (type == LAST_SPEC_TYPE)
4141 		break;
4142 	      type <<= SPEC_TYPE_SHIFT;
4143 	    }
4144 	  while (1);
4145 	}
4146 
4147       if (ds & BEGIN_SPEC)
4148 	{
4149 	  /* Only true dependence can be data speculative.  */
4150 	  if (ds & BEGIN_DATA)
4151 	    gcc_assert (ds & DEP_TRUE);
4152 
4153 	  /* Control dependencies in the insn scheduler are represented by
4154 	     anti-dependencies, therefore only anti dependence can be
4155 	     control speculative.  */
4156 	  if (ds & BEGIN_CONTROL)
4157 	    gcc_assert (ds & DEP_ANTI);
4158 	}
4159       else
4160 	{
4161 	  /* Subsequent speculations should resolve true dependencies.  */
4162 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4163 	}
4164 
4165       /* Check that true and anti dependencies can't have other speculative
4166 	 statuses.  */
4167       if (ds & DEP_TRUE)
4168 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4169       /* An output dependence can't be speculative at all.  */
4170       gcc_assert (!(ds & DEP_OUTPUT));
4171       if (ds & DEP_ANTI)
4172 	gcc_assert (ds & BEGIN_CONTROL);
4173     }
4174 }
4175 #endif /* ENABLE_CHECKING */
4176 
4177 #endif /* INSN_SCHEDULING */
4178