xref: /dflybsd-src/contrib/gcc-4.7/gcc/sched-ebb.c (revision 04febcfb30580676d3e95f58a16c5137ee478b32)
1*e4b17023SJohn Marino /* Instruction scheduling pass.
2*e4b17023SJohn Marino    Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3*e4b17023SJohn Marino    2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4*e4b17023SJohn Marino    Free Software Foundation, Inc.
5*e4b17023SJohn Marino    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6*e4b17023SJohn Marino    and currently maintained by, Jim Wilson (wilson@cygnus.com)
7*e4b17023SJohn Marino 
8*e4b17023SJohn Marino This file is part of GCC.
9*e4b17023SJohn Marino 
10*e4b17023SJohn Marino GCC is free software; you can redistribute it and/or modify it under
11*e4b17023SJohn Marino the terms of the GNU General Public License as published by the Free
12*e4b17023SJohn Marino Software Foundation; either version 3, or (at your option) any later
13*e4b17023SJohn Marino version.
14*e4b17023SJohn Marino 
15*e4b17023SJohn Marino GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16*e4b17023SJohn Marino WARRANTY; without even the implied warranty of MERCHANTABILITY or
17*e4b17023SJohn Marino FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18*e4b17023SJohn Marino for more details.
19*e4b17023SJohn Marino 
20*e4b17023SJohn Marino You should have received a copy of the GNU General Public License
21*e4b17023SJohn Marino along with GCC; see the file COPYING3.  If not see
22*e4b17023SJohn Marino <http://www.gnu.org/licenses/>.  */
23*e4b17023SJohn Marino 
24*e4b17023SJohn Marino #include "config.h"
25*e4b17023SJohn Marino #include "system.h"
26*e4b17023SJohn Marino #include "coretypes.h"
27*e4b17023SJohn Marino #include "tm.h"
28*e4b17023SJohn Marino #include "diagnostic-core.h"
29*e4b17023SJohn Marino #include "rtl.h"
30*e4b17023SJohn Marino #include "tm_p.h"
31*e4b17023SJohn Marino #include "hard-reg-set.h"
32*e4b17023SJohn Marino #include "regs.h"
33*e4b17023SJohn Marino #include "function.h"
34*e4b17023SJohn Marino #include "flags.h"
35*e4b17023SJohn Marino #include "insn-config.h"
36*e4b17023SJohn Marino #include "insn-attr.h"
37*e4b17023SJohn Marino #include "except.h"
38*e4b17023SJohn Marino #include "recog.h"
39*e4b17023SJohn Marino #include "cfglayout.h"
40*e4b17023SJohn Marino #include "params.h"
41*e4b17023SJohn Marino #include "sched-int.h"
42*e4b17023SJohn Marino #include "target.h"
43*e4b17023SJohn Marino #include "output.h"
44*e4b17023SJohn Marino 
45*e4b17023SJohn Marino 
46*e4b17023SJohn Marino #ifdef INSN_SCHEDULING
47*e4b17023SJohn Marino 
48*e4b17023SJohn Marino /* The number of insns to be scheduled in total.  */
49*e4b17023SJohn Marino static int rgn_n_insns;
50*e4b17023SJohn Marino 
51*e4b17023SJohn Marino /* The number of insns scheduled so far.  */
52*e4b17023SJohn Marino static int sched_rgn_n_insns;
53*e4b17023SJohn Marino 
54*e4b17023SJohn Marino /* Set of blocks, that already have their dependencies calculated.  */
55*e4b17023SJohn Marino static bitmap_head dont_calc_deps;
56*e4b17023SJohn Marino 
57*e4b17023SJohn Marino /* Last basic block in current ebb.  */
58*e4b17023SJohn Marino static basic_block last_bb;
59*e4b17023SJohn Marino 
60*e4b17023SJohn Marino /* Implementations of the sched_info functions for region scheduling.  */
61*e4b17023SJohn Marino static void init_ready_list (void);
62*e4b17023SJohn Marino static void begin_schedule_ready (rtx);
63*e4b17023SJohn Marino static int schedule_more_p (void);
64*e4b17023SJohn Marino static const char *ebb_print_insn (const_rtx, int);
65*e4b17023SJohn Marino static int rank (rtx, rtx);
66*e4b17023SJohn Marino static int ebb_contributes_to_priority (rtx, rtx);
67*e4b17023SJohn Marino static basic_block earliest_block_with_similiar_load (basic_block, rtx);
68*e4b17023SJohn Marino static void add_deps_for_risky_insns (rtx, rtx);
69*e4b17023SJohn Marino static void debug_ebb_dependencies (rtx, rtx);
70*e4b17023SJohn Marino 
71*e4b17023SJohn Marino static void ebb_add_remove_insn (rtx, int);
72*e4b17023SJohn Marino static void ebb_add_block (basic_block, basic_block);
73*e4b17023SJohn Marino static basic_block advance_target_bb (basic_block, rtx);
74*e4b17023SJohn Marino static void ebb_fix_recovery_cfg (int, int, int);
75*e4b17023SJohn Marino 
76*e4b17023SJohn Marino /* Allocate memory and store the state of the frontend.  Return the allocated
77*e4b17023SJohn Marino    memory.  */
78*e4b17023SJohn Marino static void *
save_ebb_state(void)79*e4b17023SJohn Marino save_ebb_state (void)
80*e4b17023SJohn Marino {
81*e4b17023SJohn Marino   int *p = XNEW (int);
82*e4b17023SJohn Marino   *p = sched_rgn_n_insns;
83*e4b17023SJohn Marino   return p;
84*e4b17023SJohn Marino }
85*e4b17023SJohn Marino 
86*e4b17023SJohn Marino /* Restore the state of the frontend from P_, then free it.  */
87*e4b17023SJohn Marino static void
restore_ebb_state(void * p_)88*e4b17023SJohn Marino restore_ebb_state (void *p_)
89*e4b17023SJohn Marino {
90*e4b17023SJohn Marino   int *p = (int *)p_;
91*e4b17023SJohn Marino   sched_rgn_n_insns = *p;
92*e4b17023SJohn Marino   free (p_);
93*e4b17023SJohn Marino }
94*e4b17023SJohn Marino 
95*e4b17023SJohn Marino /* Return nonzero if there are more insns that should be scheduled.  */
96*e4b17023SJohn Marino 
97*e4b17023SJohn Marino static int
schedule_more_p(void)98*e4b17023SJohn Marino schedule_more_p (void)
99*e4b17023SJohn Marino {
100*e4b17023SJohn Marino   return sched_rgn_n_insns < rgn_n_insns;
101*e4b17023SJohn Marino }
102*e4b17023SJohn Marino 
103*e4b17023SJohn Marino /* Print dependency information about ebb between HEAD and TAIL.  */
104*e4b17023SJohn Marino static void
debug_ebb_dependencies(rtx head,rtx tail)105*e4b17023SJohn Marino debug_ebb_dependencies (rtx head, rtx tail)
106*e4b17023SJohn Marino {
107*e4b17023SJohn Marino   fprintf (sched_dump,
108*e4b17023SJohn Marino 	   ";;   --------------- forward dependences: ------------ \n");
109*e4b17023SJohn Marino 
110*e4b17023SJohn Marino   fprintf (sched_dump, "\n;;   --- EBB Dependences --- from bb%d to bb%d \n",
111*e4b17023SJohn Marino 	   BLOCK_NUM (head), BLOCK_NUM (tail));
112*e4b17023SJohn Marino 
113*e4b17023SJohn Marino   debug_dependencies (head, tail);
114*e4b17023SJohn Marino }
115*e4b17023SJohn Marino 
116*e4b17023SJohn Marino /* Add all insns that are initially ready to the ready list READY.  Called
117*e4b17023SJohn Marino    once before scheduling a set of insns.  */
118*e4b17023SJohn Marino 
119*e4b17023SJohn Marino static void
init_ready_list(void)120*e4b17023SJohn Marino init_ready_list (void)
121*e4b17023SJohn Marino {
122*e4b17023SJohn Marino   int n = 0;
123*e4b17023SJohn Marino   rtx prev_head = current_sched_info->prev_head;
124*e4b17023SJohn Marino   rtx next_tail = current_sched_info->next_tail;
125*e4b17023SJohn Marino   rtx insn;
126*e4b17023SJohn Marino 
127*e4b17023SJohn Marino   sched_rgn_n_insns = 0;
128*e4b17023SJohn Marino 
129*e4b17023SJohn Marino   /* Print debugging information.  */
130*e4b17023SJohn Marino   if (sched_verbose >= 5)
131*e4b17023SJohn Marino     debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
132*e4b17023SJohn Marino 
133*e4b17023SJohn Marino   /* Initialize ready list with all 'ready' insns in target block.
134*e4b17023SJohn Marino      Count number of insns in the target block being scheduled.  */
135*e4b17023SJohn Marino   for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
136*e4b17023SJohn Marino     {
137*e4b17023SJohn Marino       try_ready (insn);
138*e4b17023SJohn Marino       n++;
139*e4b17023SJohn Marino     }
140*e4b17023SJohn Marino 
141*e4b17023SJohn Marino   gcc_assert (n == rgn_n_insns);
142*e4b17023SJohn Marino }
143*e4b17023SJohn Marino 
144*e4b17023SJohn Marino /* INSN is being scheduled after LAST.  Update counters.  */
145*e4b17023SJohn Marino static void
begin_schedule_ready(rtx insn ATTRIBUTE_UNUSED)146*e4b17023SJohn Marino begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED)
147*e4b17023SJohn Marino {
148*e4b17023SJohn Marino   sched_rgn_n_insns++;
149*e4b17023SJohn Marino }
150*e4b17023SJohn Marino 
151*e4b17023SJohn Marino /* INSN is being moved to its place in the schedule, after LAST.  */
152*e4b17023SJohn Marino static void
begin_move_insn(rtx insn,rtx last)153*e4b17023SJohn Marino begin_move_insn (rtx insn, rtx last)
154*e4b17023SJohn Marino {
155*e4b17023SJohn Marino   if (BLOCK_FOR_INSN (insn) == last_bb
156*e4b17023SJohn Marino       /* INSN is a jump in the last block, ...  */
157*e4b17023SJohn Marino       && control_flow_insn_p (insn)
158*e4b17023SJohn Marino       /* that is going to be moved over some instructions.  */
159*e4b17023SJohn Marino       && last != PREV_INSN (insn))
160*e4b17023SJohn Marino     {
161*e4b17023SJohn Marino       edge e;
162*e4b17023SJohn Marino       basic_block bb;
163*e4b17023SJohn Marino 
164*e4b17023SJohn Marino       /* An obscure special case, where we do have partially dead
165*e4b17023SJohn Marino 	 instruction scheduled after last control flow instruction.
166*e4b17023SJohn Marino 	 In this case we can create new basic block.  It is
167*e4b17023SJohn Marino 	 always exactly one basic block last in the sequence.  */
168*e4b17023SJohn Marino 
169*e4b17023SJohn Marino       e = find_fallthru_edge (last_bb->succs);
170*e4b17023SJohn Marino 
171*e4b17023SJohn Marino       gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
172*e4b17023SJohn Marino 
173*e4b17023SJohn Marino       gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
174*e4b17023SJohn Marino 			   && !IS_SPECULATION_CHECK_P (insn)
175*e4b17023SJohn Marino 			   && BB_HEAD (last_bb) != insn
176*e4b17023SJohn Marino 			   && BB_END (last_bb) == insn);
177*e4b17023SJohn Marino 
178*e4b17023SJohn Marino       {
179*e4b17023SJohn Marino 	rtx x;
180*e4b17023SJohn Marino 
181*e4b17023SJohn Marino 	x = NEXT_INSN (insn);
182*e4b17023SJohn Marino 	if (e)
183*e4b17023SJohn Marino 	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
184*e4b17023SJohn Marino 	else
185*e4b17023SJohn Marino 	  gcc_checking_assert (BARRIER_P (x));
186*e4b17023SJohn Marino       }
187*e4b17023SJohn Marino 
188*e4b17023SJohn Marino       if (e)
189*e4b17023SJohn Marino 	{
190*e4b17023SJohn Marino 	  bb = split_edge (e);
191*e4b17023SJohn Marino 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
192*e4b17023SJohn Marino 	}
193*e4b17023SJohn Marino       else
194*e4b17023SJohn Marino 	{
195*e4b17023SJohn Marino 	  /* Create an empty unreachable block after the INSN.  */
196*e4b17023SJohn Marino 	  rtx next = NEXT_INSN (insn);
197*e4b17023SJohn Marino 	  if (next && BARRIER_P (next))
198*e4b17023SJohn Marino 	    next = NEXT_INSN (next);
199*e4b17023SJohn Marino 	  bb = create_basic_block (next, NULL_RTX, last_bb);
200*e4b17023SJohn Marino 	}
201*e4b17023SJohn Marino 
202*e4b17023SJohn Marino       /* split_edge () creates BB before E->DEST.  Keep in mind, that
203*e4b17023SJohn Marino 	 this operation extends scheduling region till the end of BB.
204*e4b17023SJohn Marino 	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
205*e4b17023SJohn Marino 	 of the scheduling region.  */
206*e4b17023SJohn Marino       current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
207*e4b17023SJohn Marino       gcc_assert (current_sched_info->next_tail);
208*e4b17023SJohn Marino 
209*e4b17023SJohn Marino       /* Append new basic block to the end of the ebb.  */
210*e4b17023SJohn Marino       sched_init_only_bb (bb, last_bb);
211*e4b17023SJohn Marino       gcc_assert (last_bb == bb);
212*e4b17023SJohn Marino     }
213*e4b17023SJohn Marino }
214*e4b17023SJohn Marino 
215*e4b17023SJohn Marino /* Return a string that contains the insn uid and optionally anything else
216*e4b17023SJohn Marino    necessary to identify this insn in an output.  It's valid to use a
217*e4b17023SJohn Marino    static buffer for this.  The ALIGNED parameter should cause the string
218*e4b17023SJohn Marino    to be formatted so that multiple output lines will line up nicely.  */
219*e4b17023SJohn Marino 
220*e4b17023SJohn Marino static const char *
ebb_print_insn(const_rtx insn,int aligned ATTRIBUTE_UNUSED)221*e4b17023SJohn Marino ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
222*e4b17023SJohn Marino {
223*e4b17023SJohn Marino   static char tmp[80];
224*e4b17023SJohn Marino 
225*e4b17023SJohn Marino   /* '+' before insn means it is a new cycle start.  */
226*e4b17023SJohn Marino   if (GET_MODE (insn) == TImode)
227*e4b17023SJohn Marino     sprintf (tmp, "+ %4d", INSN_UID (insn));
228*e4b17023SJohn Marino   else
229*e4b17023SJohn Marino     sprintf (tmp, "  %4d", INSN_UID (insn));
230*e4b17023SJohn Marino 
231*e4b17023SJohn Marino   return tmp;
232*e4b17023SJohn Marino }
233*e4b17023SJohn Marino 
234*e4b17023SJohn Marino /* Compare priority of two insns.  Return a positive number if the second
235*e4b17023SJohn Marino    insn is to be preferred for scheduling, and a negative one if the first
236*e4b17023SJohn Marino    is to be preferred.  Zero if they are equally good.  */
237*e4b17023SJohn Marino 
238*e4b17023SJohn Marino static int
rank(rtx insn1,rtx insn2)239*e4b17023SJohn Marino rank (rtx insn1, rtx insn2)
240*e4b17023SJohn Marino {
241*e4b17023SJohn Marino   basic_block bb1 = BLOCK_FOR_INSN (insn1);
242*e4b17023SJohn Marino   basic_block bb2 = BLOCK_FOR_INSN (insn2);
243*e4b17023SJohn Marino 
244*e4b17023SJohn Marino   if (bb1->count > bb2->count
245*e4b17023SJohn Marino       || bb1->frequency > bb2->frequency)
246*e4b17023SJohn Marino     return -1;
247*e4b17023SJohn Marino   if (bb1->count < bb2->count
248*e4b17023SJohn Marino       || bb1->frequency < bb2->frequency)
249*e4b17023SJohn Marino     return 1;
250*e4b17023SJohn Marino   return 0;
251*e4b17023SJohn Marino }
252*e4b17023SJohn Marino 
253*e4b17023SJohn Marino /* NEXT is an instruction that depends on INSN (a backward dependence);
254*e4b17023SJohn Marino    return nonzero if we should include this dependence in priority
255*e4b17023SJohn Marino    calculations.  */
256*e4b17023SJohn Marino 
257*e4b17023SJohn Marino static int
ebb_contributes_to_priority(rtx next ATTRIBUTE_UNUSED,rtx insn ATTRIBUTE_UNUSED)258*e4b17023SJohn Marino ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
259*e4b17023SJohn Marino                              rtx insn ATTRIBUTE_UNUSED)
260*e4b17023SJohn Marino {
261*e4b17023SJohn Marino   return 1;
262*e4b17023SJohn Marino }
263*e4b17023SJohn Marino 
264*e4b17023SJohn Marino  /* INSN is a JUMP_INSN.  Store the set of registers that
265*e4b17023SJohn Marino     must be considered as used by this jump in USED.  */
266*e4b17023SJohn Marino 
267*e4b17023SJohn Marino void
ebb_compute_jump_reg_dependencies(rtx insn,regset used)268*e4b17023SJohn Marino ebb_compute_jump_reg_dependencies (rtx insn, regset used)
269*e4b17023SJohn Marino {
270*e4b17023SJohn Marino   basic_block b = BLOCK_FOR_INSN (insn);
271*e4b17023SJohn Marino   edge e;
272*e4b17023SJohn Marino   edge_iterator ei;
273*e4b17023SJohn Marino 
274*e4b17023SJohn Marino   FOR_EACH_EDGE (e, ei, b->succs)
275*e4b17023SJohn Marino     if ((e->flags & EDGE_FALLTHRU) == 0)
276*e4b17023SJohn Marino       bitmap_ior_into (used, df_get_live_in (e->dest));
277*e4b17023SJohn Marino }
278*e4b17023SJohn Marino 
279*e4b17023SJohn Marino /* Used in schedule_insns to initialize current_sched_info for scheduling
280*e4b17023SJohn Marino    regions (or single basic blocks).  */
281*e4b17023SJohn Marino 
282*e4b17023SJohn Marino static struct common_sched_info_def ebb_common_sched_info;
283*e4b17023SJohn Marino 
284*e4b17023SJohn Marino static struct sched_deps_info_def ebb_sched_deps_info =
285*e4b17023SJohn Marino   {
286*e4b17023SJohn Marino     ebb_compute_jump_reg_dependencies,
287*e4b17023SJohn Marino     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
288*e4b17023SJohn Marino     NULL,
289*e4b17023SJohn Marino     1, 0, 0
290*e4b17023SJohn Marino   };
291*e4b17023SJohn Marino 
292*e4b17023SJohn Marino static struct haifa_sched_info ebb_sched_info =
293*e4b17023SJohn Marino {
294*e4b17023SJohn Marino   init_ready_list,
295*e4b17023SJohn Marino   NULL,
296*e4b17023SJohn Marino   schedule_more_p,
297*e4b17023SJohn Marino   NULL,
298*e4b17023SJohn Marino   rank,
299*e4b17023SJohn Marino   ebb_print_insn,
300*e4b17023SJohn Marino   ebb_contributes_to_priority,
301*e4b17023SJohn Marino   NULL, /* insn_finishes_block_p */
302*e4b17023SJohn Marino 
303*e4b17023SJohn Marino   NULL, NULL,
304*e4b17023SJohn Marino   NULL, NULL,
305*e4b17023SJohn Marino   1, 0,
306*e4b17023SJohn Marino 
307*e4b17023SJohn Marino   ebb_add_remove_insn,
308*e4b17023SJohn Marino   begin_schedule_ready,
309*e4b17023SJohn Marino   begin_move_insn,
310*e4b17023SJohn Marino   advance_target_bb,
311*e4b17023SJohn Marino 
312*e4b17023SJohn Marino   save_ebb_state,
313*e4b17023SJohn Marino   restore_ebb_state,
314*e4b17023SJohn Marino 
315*e4b17023SJohn Marino   SCHED_EBB
316*e4b17023SJohn Marino   /* We can create new blocks in begin_schedule_ready ().  */
317*e4b17023SJohn Marino   | NEW_BBS
318*e4b17023SJohn Marino };
319*e4b17023SJohn Marino 
320*e4b17023SJohn Marino /* Returns the earliest block in EBB currently being processed where a
321*e4b17023SJohn Marino    "similar load" 'insn2' is found, and hence LOAD_INSN can move
322*e4b17023SJohn Marino    speculatively into the found block.  All the following must hold:
323*e4b17023SJohn Marino 
324*e4b17023SJohn Marino    (1) both loads have 1 base register (PFREE_CANDIDATEs).
325*e4b17023SJohn Marino    (2) load_insn and load2 have a def-use dependence upon
326*e4b17023SJohn Marino    the same insn 'insn1'.
327*e4b17023SJohn Marino 
328*e4b17023SJohn Marino    From all these we can conclude that the two loads access memory
329*e4b17023SJohn Marino    addresses that differ at most by a constant, and hence if moving
330*e4b17023SJohn Marino    load_insn would cause an exception, it would have been caused by
331*e4b17023SJohn Marino    load2 anyhow.
332*e4b17023SJohn Marino 
333*e4b17023SJohn Marino    The function uses list (given by LAST_BLOCK) of already processed
334*e4b17023SJohn Marino    blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
335*e4b17023SJohn Marino 
336*e4b17023SJohn Marino static basic_block
earliest_block_with_similiar_load(basic_block last_block,rtx load_insn)337*e4b17023SJohn Marino earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
338*e4b17023SJohn Marino {
339*e4b17023SJohn Marino   sd_iterator_def back_sd_it;
340*e4b17023SJohn Marino   dep_t back_dep;
341*e4b17023SJohn Marino   basic_block bb, earliest_block = NULL;
342*e4b17023SJohn Marino 
343*e4b17023SJohn Marino   FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
344*e4b17023SJohn Marino     {
345*e4b17023SJohn Marino       rtx insn1 = DEP_PRO (back_dep);
346*e4b17023SJohn Marino 
347*e4b17023SJohn Marino       if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
348*e4b17023SJohn Marino 	/* Found a DEF-USE dependence (insn1, load_insn).  */
349*e4b17023SJohn Marino 	{
350*e4b17023SJohn Marino 	  sd_iterator_def fore_sd_it;
351*e4b17023SJohn Marino 	  dep_t fore_dep;
352*e4b17023SJohn Marino 
353*e4b17023SJohn Marino 	  FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
354*e4b17023SJohn Marino 	    {
355*e4b17023SJohn Marino 	      rtx insn2 = DEP_CON (fore_dep);
356*e4b17023SJohn Marino 	      basic_block insn2_block = BLOCK_FOR_INSN (insn2);
357*e4b17023SJohn Marino 
358*e4b17023SJohn Marino 	      if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
359*e4b17023SJohn Marino 		{
360*e4b17023SJohn Marino 		  if (earliest_block != NULL
361*e4b17023SJohn Marino 		      && earliest_block->index < insn2_block->index)
362*e4b17023SJohn Marino 		    continue;
363*e4b17023SJohn Marino 
364*e4b17023SJohn Marino 		  /* Found a DEF-USE dependence (insn1, insn2).  */
365*e4b17023SJohn Marino 		  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
366*e4b17023SJohn Marino 		    /* insn2 not guaranteed to be a 1 base reg load.  */
367*e4b17023SJohn Marino 		    continue;
368*e4b17023SJohn Marino 
369*e4b17023SJohn Marino 		  for (bb = last_block; bb; bb = (basic_block) bb->aux)
370*e4b17023SJohn Marino 		    if (insn2_block == bb)
371*e4b17023SJohn Marino 		      break;
372*e4b17023SJohn Marino 
373*e4b17023SJohn Marino 		  if (!bb)
374*e4b17023SJohn Marino 		    /* insn2 is the similar load.  */
375*e4b17023SJohn Marino 		    earliest_block = insn2_block;
376*e4b17023SJohn Marino 		}
377*e4b17023SJohn Marino 	    }
378*e4b17023SJohn Marino 	}
379*e4b17023SJohn Marino     }
380*e4b17023SJohn Marino 
381*e4b17023SJohn Marino   return earliest_block;
382*e4b17023SJohn Marino }
383*e4b17023SJohn Marino 
384*e4b17023SJohn Marino /* The following function adds dependencies between jumps and risky
385*e4b17023SJohn Marino    insns in given ebb.  */
386*e4b17023SJohn Marino 
387*e4b17023SJohn Marino static void
add_deps_for_risky_insns(rtx head,rtx tail)388*e4b17023SJohn Marino add_deps_for_risky_insns (rtx head, rtx tail)
389*e4b17023SJohn Marino {
390*e4b17023SJohn Marino   rtx insn, prev;
391*e4b17023SJohn Marino   int classification;
392*e4b17023SJohn Marino   rtx last_jump = NULL_RTX;
393*e4b17023SJohn Marino   rtx next_tail = NEXT_INSN (tail);
394*e4b17023SJohn Marino   basic_block last_block = NULL, bb;
395*e4b17023SJohn Marino 
396*e4b17023SJohn Marino   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
397*e4b17023SJohn Marino     {
398*e4b17023SJohn Marino       add_delay_dependencies (insn);
399*e4b17023SJohn Marino       if (control_flow_insn_p (insn))
400*e4b17023SJohn Marino 	{
401*e4b17023SJohn Marino 	  bb = BLOCK_FOR_INSN (insn);
402*e4b17023SJohn Marino 	  bb->aux = last_block;
403*e4b17023SJohn Marino 	  last_block = bb;
404*e4b17023SJohn Marino 	  /* Ensure blocks stay in the same order.  */
405*e4b17023SJohn Marino 	  if (last_jump)
406*e4b17023SJohn Marino 	    add_dependence (insn, last_jump, REG_DEP_ANTI);
407*e4b17023SJohn Marino 	  last_jump = insn;
408*e4b17023SJohn Marino 	}
409*e4b17023SJohn Marino       else if (INSN_P (insn) && last_jump != NULL_RTX)
410*e4b17023SJohn Marino 	{
411*e4b17023SJohn Marino 	  classification = haifa_classify_insn (insn);
412*e4b17023SJohn Marino 	  prev = last_jump;
413*e4b17023SJohn Marino 
414*e4b17023SJohn Marino 	  switch (classification)
415*e4b17023SJohn Marino 	    {
416*e4b17023SJohn Marino 	    case PFREE_CANDIDATE:
417*e4b17023SJohn Marino 	      if (flag_schedule_speculative_load)
418*e4b17023SJohn Marino 		{
419*e4b17023SJohn Marino 		  bb = earliest_block_with_similiar_load (last_block, insn);
420*e4b17023SJohn Marino 		  if (bb)
421*e4b17023SJohn Marino 		    {
422*e4b17023SJohn Marino 		      bb = (basic_block) bb->aux;
423*e4b17023SJohn Marino 		      if (!bb)
424*e4b17023SJohn Marino 			break;
425*e4b17023SJohn Marino 		      prev = BB_END (bb);
426*e4b17023SJohn Marino 		    }
427*e4b17023SJohn Marino 		}
428*e4b17023SJohn Marino 	      /* Fall through.  */
429*e4b17023SJohn Marino 	    case TRAP_RISKY:
430*e4b17023SJohn Marino 	    case IRISKY:
431*e4b17023SJohn Marino 	    case PRISKY_CANDIDATE:
432*e4b17023SJohn Marino 	      /* ??? We could implement better checking PRISKY_CANDIDATEs
433*e4b17023SJohn Marino 		 analogous to sched-rgn.c.  */
434*e4b17023SJohn Marino 	      /* We can not change the mode of the backward
435*e4b17023SJohn Marino 		 dependency because REG_DEP_ANTI has the lowest
436*e4b17023SJohn Marino 		 rank.  */
437*e4b17023SJohn Marino 	      if (! sched_insns_conditions_mutex_p (insn, prev))
438*e4b17023SJohn Marino 		{
439*e4b17023SJohn Marino 		  if ((current_sched_info->flags & DO_SPECULATION)
440*e4b17023SJohn Marino 		      && (spec_info->mask & BEGIN_CONTROL))
441*e4b17023SJohn Marino 		    {
442*e4b17023SJohn Marino 		      dep_def _dep, *dep = &_dep;
443*e4b17023SJohn Marino 
444*e4b17023SJohn Marino 		      init_dep (dep, prev, insn, REG_DEP_ANTI);
445*e4b17023SJohn Marino 
446*e4b17023SJohn Marino 		      if (current_sched_info->flags & USE_DEPS_LIST)
447*e4b17023SJohn Marino 			{
448*e4b17023SJohn Marino 			  DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
449*e4b17023SJohn Marino 							   MAX_DEP_WEAK);
450*e4b17023SJohn Marino 
451*e4b17023SJohn Marino 			}
452*e4b17023SJohn Marino 		      sd_add_or_update_dep (dep, false);
453*e4b17023SJohn Marino 		    }
454*e4b17023SJohn Marino 		  else
455*e4b17023SJohn Marino 		    add_dependence (insn, prev, REG_DEP_CONTROL);
456*e4b17023SJohn Marino 		}
457*e4b17023SJohn Marino 
458*e4b17023SJohn Marino 	      break;
459*e4b17023SJohn Marino 
460*e4b17023SJohn Marino 	    default:
461*e4b17023SJohn Marino 	      break;
462*e4b17023SJohn Marino 	    }
463*e4b17023SJohn Marino 	}
464*e4b17023SJohn Marino     }
465*e4b17023SJohn Marino   /* Maintain the invariant that bb->aux is clear after use.  */
466*e4b17023SJohn Marino   while (last_block)
467*e4b17023SJohn Marino     {
468*e4b17023SJohn Marino       bb = (basic_block) last_block->aux;
469*e4b17023SJohn Marino       last_block->aux = NULL;
470*e4b17023SJohn Marino       last_block = bb;
471*e4b17023SJohn Marino     }
472*e4b17023SJohn Marino }
473*e4b17023SJohn Marino 
474*e4b17023SJohn Marino /* Schedule a single extended basic block, defined by the boundaries
475*e4b17023SJohn Marino    HEAD and TAIL.
476*e4b17023SJohn Marino 
477*e4b17023SJohn Marino    We change our expectations about scheduler behaviour depending on
478*e4b17023SJohn Marino    whether MODULO_SCHEDULING is true.  If it is, we expect that the
479*e4b17023SJohn Marino    caller has already called set_modulo_params and created delay pairs
480*e4b17023SJohn Marino    as appropriate.  If the modulo schedule failed, we return
481*e4b17023SJohn Marino    NULL_RTX.  */
482*e4b17023SJohn Marino 
483*e4b17023SJohn Marino basic_block
schedule_ebb(rtx head,rtx tail,bool modulo_scheduling)484*e4b17023SJohn Marino schedule_ebb (rtx head, rtx tail, bool modulo_scheduling)
485*e4b17023SJohn Marino {
486*e4b17023SJohn Marino   basic_block first_bb, target_bb;
487*e4b17023SJohn Marino   struct deps_desc tmp_deps;
488*e4b17023SJohn Marino   bool success;
489*e4b17023SJohn Marino 
490*e4b17023SJohn Marino   /* Blah.  We should fix the rest of the code not to get confused by
491*e4b17023SJohn Marino      a note or two.  */
492*e4b17023SJohn Marino   while (head != tail)
493*e4b17023SJohn Marino     {
494*e4b17023SJohn Marino       if (NOTE_P (head) || DEBUG_INSN_P (head))
495*e4b17023SJohn Marino 	head = NEXT_INSN (head);
496*e4b17023SJohn Marino       else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
497*e4b17023SJohn Marino 	tail = PREV_INSN (tail);
498*e4b17023SJohn Marino       else if (LABEL_P (head))
499*e4b17023SJohn Marino 	head = NEXT_INSN (head);
500*e4b17023SJohn Marino       else
501*e4b17023SJohn Marino 	break;
502*e4b17023SJohn Marino     }
503*e4b17023SJohn Marino 
504*e4b17023SJohn Marino   first_bb = BLOCK_FOR_INSN (head);
505*e4b17023SJohn Marino   last_bb = BLOCK_FOR_INSN (tail);
506*e4b17023SJohn Marino 
507*e4b17023SJohn Marino   if (no_real_insns_p (head, tail))
508*e4b17023SJohn Marino     return BLOCK_FOR_INSN (tail);
509*e4b17023SJohn Marino 
510*e4b17023SJohn Marino   gcc_assert (INSN_P (head) && INSN_P (tail));
511*e4b17023SJohn Marino 
512*e4b17023SJohn Marino   if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
513*e4b17023SJohn Marino     {
514*e4b17023SJohn Marino       init_deps_global ();
515*e4b17023SJohn Marino 
516*e4b17023SJohn Marino       /* Compute dependencies.  */
517*e4b17023SJohn Marino       init_deps (&tmp_deps, false);
518*e4b17023SJohn Marino       sched_analyze (&tmp_deps, head, tail);
519*e4b17023SJohn Marino       free_deps (&tmp_deps);
520*e4b17023SJohn Marino 
521*e4b17023SJohn Marino       add_deps_for_risky_insns (head, tail);
522*e4b17023SJohn Marino 
523*e4b17023SJohn Marino       if (targetm.sched.dependencies_evaluation_hook)
524*e4b17023SJohn Marino         targetm.sched.dependencies_evaluation_hook (head, tail);
525*e4b17023SJohn Marino 
526*e4b17023SJohn Marino       finish_deps_global ();
527*e4b17023SJohn Marino     }
528*e4b17023SJohn Marino   else
529*e4b17023SJohn Marino     /* Only recovery blocks can have their dependencies already calculated,
530*e4b17023SJohn Marino        and they always are single block ebbs.  */
531*e4b17023SJohn Marino     gcc_assert (first_bb == last_bb);
532*e4b17023SJohn Marino 
533*e4b17023SJohn Marino   /* Set priorities.  */
534*e4b17023SJohn Marino   current_sched_info->sched_max_insns_priority = 0;
535*e4b17023SJohn Marino   rgn_n_insns = set_priorities (head, tail);
536*e4b17023SJohn Marino   current_sched_info->sched_max_insns_priority++;
537*e4b17023SJohn Marino 
538*e4b17023SJohn Marino   current_sched_info->prev_head = PREV_INSN (head);
539*e4b17023SJohn Marino   current_sched_info->next_tail = NEXT_INSN (tail);
540*e4b17023SJohn Marino 
541*e4b17023SJohn Marino   remove_notes (head, tail);
542*e4b17023SJohn Marino 
543*e4b17023SJohn Marino   unlink_bb_notes (first_bb, last_bb);
544*e4b17023SJohn Marino 
545*e4b17023SJohn Marino   target_bb = first_bb;
546*e4b17023SJohn Marino 
547*e4b17023SJohn Marino   /* Make ready list big enough to hold all the instructions from the ebb.  */
548*e4b17023SJohn Marino   sched_extend_ready_list (rgn_n_insns);
549*e4b17023SJohn Marino   success = schedule_block (&target_bb);
550*e4b17023SJohn Marino   gcc_assert (success || modulo_scheduling);
551*e4b17023SJohn Marino 
552*e4b17023SJohn Marino   /* Free ready list.  */
553*e4b17023SJohn Marino   sched_finish_ready_list ();
554*e4b17023SJohn Marino 
555*e4b17023SJohn Marino   /* We might pack all instructions into fewer blocks,
556*e4b17023SJohn Marino      so we may made some of them empty.  Can't assert (b == last_bb).  */
557*e4b17023SJohn Marino 
558*e4b17023SJohn Marino   /* Sanity check: verify that all region insns were scheduled.  */
559*e4b17023SJohn Marino   gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
560*e4b17023SJohn Marino 
561*e4b17023SJohn Marino   /* Free dependencies.  */
562*e4b17023SJohn Marino   sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
563*e4b17023SJohn Marino 
564*e4b17023SJohn Marino   gcc_assert (haifa_recovery_bb_ever_added_p
565*e4b17023SJohn Marino 	      || deps_pools_are_empty_p ());
566*e4b17023SJohn Marino 
567*e4b17023SJohn Marino   if (EDGE_COUNT (last_bb->preds) == 0)
568*e4b17023SJohn Marino     /* LAST_BB is unreachable.  */
569*e4b17023SJohn Marino     {
570*e4b17023SJohn Marino       gcc_assert (first_bb != last_bb
571*e4b17023SJohn Marino 		  && EDGE_COUNT (last_bb->succs) == 0);
572*e4b17023SJohn Marino       last_bb = last_bb->prev_bb;
573*e4b17023SJohn Marino       delete_basic_block (last_bb->next_bb);
574*e4b17023SJohn Marino     }
575*e4b17023SJohn Marino 
576*e4b17023SJohn Marino   return success ? last_bb : NULL;
577*e4b17023SJohn Marino }
578*e4b17023SJohn Marino 
579*e4b17023SJohn Marino /* Perform initializations before running schedule_ebbs or a single
580*e4b17023SJohn Marino    schedule_ebb.  */
581*e4b17023SJohn Marino void
schedule_ebbs_init(void)582*e4b17023SJohn Marino schedule_ebbs_init (void)
583*e4b17023SJohn Marino {
584*e4b17023SJohn Marino   /* Setup infos.  */
585*e4b17023SJohn Marino   {
586*e4b17023SJohn Marino     memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
587*e4b17023SJohn Marino 	    sizeof (ebb_common_sched_info));
588*e4b17023SJohn Marino 
589*e4b17023SJohn Marino     ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
590*e4b17023SJohn Marino     ebb_common_sched_info.add_block = ebb_add_block;
591*e4b17023SJohn Marino     ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
592*e4b17023SJohn Marino 
593*e4b17023SJohn Marino     common_sched_info = &ebb_common_sched_info;
594*e4b17023SJohn Marino     sched_deps_info = &ebb_sched_deps_info;
595*e4b17023SJohn Marino     current_sched_info = &ebb_sched_info;
596*e4b17023SJohn Marino   }
597*e4b17023SJohn Marino 
598*e4b17023SJohn Marino   haifa_sched_init ();
599*e4b17023SJohn Marino 
600*e4b17023SJohn Marino   compute_bb_for_insn ();
601*e4b17023SJohn Marino 
602*e4b17023SJohn Marino   /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
603*e4b17023SJohn Marino   bitmap_initialize (&dont_calc_deps, 0);
604*e4b17023SJohn Marino   bitmap_clear (&dont_calc_deps);
605*e4b17023SJohn Marino }
606*e4b17023SJohn Marino 
607*e4b17023SJohn Marino /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb.  */
608*e4b17023SJohn Marino void
schedule_ebbs_finish(void)609*e4b17023SJohn Marino schedule_ebbs_finish (void)
610*e4b17023SJohn Marino {
611*e4b17023SJohn Marino   bitmap_clear (&dont_calc_deps);
612*e4b17023SJohn Marino 
613*e4b17023SJohn Marino   /* Reposition the prologue and epilogue notes in case we moved the
614*e4b17023SJohn Marino      prologue/epilogue insns.  */
615*e4b17023SJohn Marino   if (reload_completed)
616*e4b17023SJohn Marino     reposition_prologue_and_epilogue_notes ();
617*e4b17023SJohn Marino 
618*e4b17023SJohn Marino   haifa_sched_finish ();
619*e4b17023SJohn Marino }
620*e4b17023SJohn Marino 
621*e4b17023SJohn Marino /* The main entry point in this file.  */
622*e4b17023SJohn Marino 
623*e4b17023SJohn Marino void
schedule_ebbs(void)624*e4b17023SJohn Marino schedule_ebbs (void)
625*e4b17023SJohn Marino {
626*e4b17023SJohn Marino   basic_block bb;
627*e4b17023SJohn Marino   int probability_cutoff;
628*e4b17023SJohn Marino   rtx tail;
629*e4b17023SJohn Marino 
630*e4b17023SJohn Marino   /* Taking care of this degenerate case makes the rest of
631*e4b17023SJohn Marino      this code simpler.  */
632*e4b17023SJohn Marino   if (n_basic_blocks == NUM_FIXED_BLOCKS)
633*e4b17023SJohn Marino     return;
634*e4b17023SJohn Marino 
635*e4b17023SJohn Marino   if (profile_info && flag_branch_probabilities)
636*e4b17023SJohn Marino     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
637*e4b17023SJohn Marino   else
638*e4b17023SJohn Marino     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
639*e4b17023SJohn Marino   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
640*e4b17023SJohn Marino 
641*e4b17023SJohn Marino   schedule_ebbs_init ();
642*e4b17023SJohn Marino 
643*e4b17023SJohn Marino   /* Schedule every region in the subroutine.  */
644*e4b17023SJohn Marino   FOR_EACH_BB (bb)
645*e4b17023SJohn Marino     {
646*e4b17023SJohn Marino       rtx head = BB_HEAD (bb);
647*e4b17023SJohn Marino 
648*e4b17023SJohn Marino       if (bb->flags & BB_DISABLE_SCHEDULE)
649*e4b17023SJohn Marino 	continue;
650*e4b17023SJohn Marino 
651*e4b17023SJohn Marino       for (;;)
652*e4b17023SJohn Marino 	{
653*e4b17023SJohn Marino 	  edge e;
654*e4b17023SJohn Marino 	  tail = BB_END (bb);
655*e4b17023SJohn Marino 	  if (bb->next_bb == EXIT_BLOCK_PTR
656*e4b17023SJohn Marino 	      || LABEL_P (BB_HEAD (bb->next_bb)))
657*e4b17023SJohn Marino 	    break;
658*e4b17023SJohn Marino 	  e = find_fallthru_edge (bb->succs);
659*e4b17023SJohn Marino 	  if (! e)
660*e4b17023SJohn Marino 	    break;
661*e4b17023SJohn Marino 	  if (e->probability <= probability_cutoff)
662*e4b17023SJohn Marino 	    break;
663*e4b17023SJohn Marino 	  if (e->dest->flags & BB_DISABLE_SCHEDULE)
664*e4b17023SJohn Marino  	    break;
665*e4b17023SJohn Marino 	  bb = bb->next_bb;
666*e4b17023SJohn Marino 	}
667*e4b17023SJohn Marino 
668*e4b17023SJohn Marino       bb = schedule_ebb (head, tail, false);
669*e4b17023SJohn Marino     }
670*e4b17023SJohn Marino   schedule_ebbs_finish ();
671*e4b17023SJohn Marino }
672*e4b17023SJohn Marino 
673*e4b17023SJohn Marino /* INSN has been added to/removed from current ebb.  */
674*e4b17023SJohn Marino static void
ebb_add_remove_insn(rtx insn ATTRIBUTE_UNUSED,int remove_p)675*e4b17023SJohn Marino ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
676*e4b17023SJohn Marino {
677*e4b17023SJohn Marino   if (!remove_p)
678*e4b17023SJohn Marino     rgn_n_insns++;
679*e4b17023SJohn Marino   else
680*e4b17023SJohn Marino     rgn_n_insns--;
681*e4b17023SJohn Marino }
682*e4b17023SJohn Marino 
683*e4b17023SJohn Marino /* BB was added to ebb after AFTER.  */
684*e4b17023SJohn Marino static void
ebb_add_block(basic_block bb,basic_block after)685*e4b17023SJohn Marino ebb_add_block (basic_block bb, basic_block after)
686*e4b17023SJohn Marino {
687*e4b17023SJohn Marino   /* Recovery blocks are always bounded by BARRIERS,
688*e4b17023SJohn Marino      therefore, they always form single block EBB,
689*e4b17023SJohn Marino      therefore, we can use rec->index to identify such EBBs.  */
690*e4b17023SJohn Marino   if (after == EXIT_BLOCK_PTR)
691*e4b17023SJohn Marino     bitmap_set_bit (&dont_calc_deps, bb->index);
692*e4b17023SJohn Marino   else if (after == last_bb)
693*e4b17023SJohn Marino     last_bb = bb;
694*e4b17023SJohn Marino }
695*e4b17023SJohn Marino 
696*e4b17023SJohn Marino /* Return next block in ebb chain.  For parameter meaning please refer to
697*e4b17023SJohn Marino    sched-int.h: struct sched_info: advance_target_bb.  */
698*e4b17023SJohn Marino static basic_block
advance_target_bb(basic_block bb,rtx insn)699*e4b17023SJohn Marino advance_target_bb (basic_block bb, rtx insn)
700*e4b17023SJohn Marino {
701*e4b17023SJohn Marino   if (insn)
702*e4b17023SJohn Marino     {
703*e4b17023SJohn Marino       if (BLOCK_FOR_INSN (insn) != bb
704*e4b17023SJohn Marino 	  && control_flow_insn_p (insn)
705*e4b17023SJohn Marino 	  /* We handle interblock movement of the speculation check
706*e4b17023SJohn Marino 	     or over a speculation check in
707*e4b17023SJohn Marino 	     haifa-sched.c: move_block_after_check ().  */
708*e4b17023SJohn Marino 	  && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
709*e4b17023SJohn Marino 	  && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
710*e4b17023SJohn Marino 	{
711*e4b17023SJohn Marino 	  /* Assert that we don't move jumps across blocks.  */
712*e4b17023SJohn Marino 	  gcc_assert (!control_flow_insn_p (BB_END (bb))
713*e4b17023SJohn Marino 		      && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
714*e4b17023SJohn Marino 	  return bb;
715*e4b17023SJohn Marino 	}
716*e4b17023SJohn Marino       else
717*e4b17023SJohn Marino 	return 0;
718*e4b17023SJohn Marino     }
719*e4b17023SJohn Marino   else
720*e4b17023SJohn Marino     /* Return next non empty block.  */
721*e4b17023SJohn Marino     {
722*e4b17023SJohn Marino       do
723*e4b17023SJohn Marino 	{
724*e4b17023SJohn Marino 	  gcc_assert (bb != last_bb);
725*e4b17023SJohn Marino 
726*e4b17023SJohn Marino 	  bb = bb->next_bb;
727*e4b17023SJohn Marino 	}
728*e4b17023SJohn Marino       while (bb_note (bb) == BB_END (bb));
729*e4b17023SJohn Marino 
730*e4b17023SJohn Marino       return bb;
731*e4b17023SJohn Marino     }
732*e4b17023SJohn Marino }
733*e4b17023SJohn Marino 
734*e4b17023SJohn Marino /* Fix internal data after interblock movement of jump instruction.
735*e4b17023SJohn Marino    For parameter meaning please refer to
736*e4b17023SJohn Marino    sched-int.h: struct sched_info: fix_recovery_cfg.  */
737*e4b17023SJohn Marino static void
ebb_fix_recovery_cfg(int bbi ATTRIBUTE_UNUSED,int jump_bbi,int jump_bb_nexti)738*e4b17023SJohn Marino ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
739*e4b17023SJohn Marino 		      int jump_bb_nexti)
740*e4b17023SJohn Marino {
741*e4b17023SJohn Marino   gcc_assert (last_bb->index != bbi);
742*e4b17023SJohn Marino 
743*e4b17023SJohn Marino   if (jump_bb_nexti == last_bb->index)
744*e4b17023SJohn Marino     last_bb = BASIC_BLOCK (jump_bbi);
745*e4b17023SJohn Marino }
746*e4b17023SJohn Marino 
747*e4b17023SJohn Marino #endif /* INSN_SCHEDULING */
748