1*38fd1498Szrj /* Perform instruction reorganizations for delay slot filling.
2*38fd1498Szrj Copyright (C) 1992-2018 Free Software Foundation, Inc.
3*38fd1498Szrj Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
4*38fd1498Szrj Hacked by Michael Tiemann (tiemann@cygnus.com).
5*38fd1498Szrj
6*38fd1498Szrj This file is part of GCC.
7*38fd1498Szrj
8*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
9*38fd1498Szrj the terms of the GNU General Public License as published by the Free
10*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
11*38fd1498Szrj version.
12*38fd1498Szrj
13*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
15*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16*38fd1498Szrj for more details.
17*38fd1498Szrj
18*38fd1498Szrj You should have received a copy of the GNU General Public License
19*38fd1498Szrj along with GCC; see the file COPYING3. If not see
20*38fd1498Szrj <http://www.gnu.org/licenses/>. */
21*38fd1498Szrj
22*38fd1498Szrj /* Instruction reorganization pass.
23*38fd1498Szrj
24*38fd1498Szrj This pass runs after register allocation and final jump
25*38fd1498Szrj optimization. It should be the last pass to run before peephole.
26*38fd1498Szrj It serves primarily to fill delay slots of insns, typically branch
27*38fd1498Szrj and call insns. Other insns typically involve more complicated
28*38fd1498Szrj interactions of data dependencies and resource constraints, and
29*38fd1498Szrj are better handled by scheduling before register allocation (by the
30*38fd1498Szrj function `schedule_insns').
31*38fd1498Szrj
32*38fd1498Szrj The Branch Penalty is the number of extra cycles that are needed to
33*38fd1498Szrj execute a branch insn. On an ideal machine, branches take a single
34*38fd1498Szrj cycle, and the Branch Penalty is 0. Several RISC machines approach
35*38fd1498Szrj branch delays differently:
36*38fd1498Szrj
37*38fd1498Szrj The MIPS has a single branch delay slot. Most insns
38*38fd1498Szrj (except other branches) can be used to fill this slot. When the
39*38fd1498Szrj slot is filled, two insns execute in two cycles, reducing the
40*38fd1498Szrj branch penalty to zero.
41*38fd1498Szrj
42*38fd1498Szrj The SPARC always has a branch delay slot, but its effects can be
43*38fd1498Szrj annulled when the branch is not taken. This means that failing to
44*38fd1498Szrj find other sources of insns, we can hoist an insn from the branch
45*38fd1498Szrj target that would only be safe to execute knowing that the branch
46*38fd1498Szrj is taken.
47*38fd1498Szrj
48*38fd1498Szrj The HP-PA always has a branch delay slot. For unconditional branches
49*38fd1498Szrj its effects can be annulled when the branch is taken. The effects
50*38fd1498Szrj of the delay slot in a conditional branch can be nullified for forward
51*38fd1498Szrj taken branches, or for untaken backward branches. This means
52*38fd1498Szrj we can hoist insns from the fall-through path for forward branches or
53*38fd1498Szrj steal insns from the target of backward branches.
54*38fd1498Szrj
55*38fd1498Szrj The TMS320C3x and C4x have three branch delay slots. When the three
56*38fd1498Szrj slots are filled, the branch penalty is zero. Most insns can fill the
57*38fd1498Szrj delay slots except jump insns.
58*38fd1498Szrj
59*38fd1498Szrj Three techniques for filling delay slots have been implemented so far:
60*38fd1498Szrj
61*38fd1498Szrj (1) `fill_simple_delay_slots' is the simplest, most efficient way
62*38fd1498Szrj to fill delay slots. This pass first looks for insns which come
63*38fd1498Szrj from before the branch and which are safe to execute after the
64*38fd1498Szrj branch. Then it searches after the insn requiring delay slots or,
65*38fd1498Szrj in the case of a branch, for insns that are after the point at
66*38fd1498Szrj which the branch merges into the fallthrough code, if such a point
67*38fd1498Szrj exists. When such insns are found, the branch penalty decreases
68*38fd1498Szrj and no code expansion takes place.
69*38fd1498Szrj
70*38fd1498Szrj (2) `fill_eager_delay_slots' is more complicated: it is used for
71*38fd1498Szrj scheduling conditional jumps, or for scheduling jumps which cannot
72*38fd1498Szrj be filled using (1). A machine need not have annulled jumps to use
73*38fd1498Szrj this strategy, but it helps (by keeping more options open).
74*38fd1498Szrj `fill_eager_delay_slots' tries to guess the direction the branch
75*38fd1498Szrj will go; if it guesses right 100% of the time, it can reduce the
76*38fd1498Szrj branch penalty as much as `fill_simple_delay_slots' does. If it
77*38fd1498Szrj guesses wrong 100% of the time, it might as well schedule nops. When
78*38fd1498Szrj `fill_eager_delay_slots' takes insns from the fall-through path of
79*38fd1498Szrj the jump, usually there is no code expansion; when it takes insns
80*38fd1498Szrj from the branch target, there is code expansion if it is not the
81*38fd1498Szrj only way to reach that target.
82*38fd1498Szrj
83*38fd1498Szrj (3) `relax_delay_slots' uses a set of rules to simplify code that
84*38fd1498Szrj has been reorganized by (1) and (2). It finds cases where
85*38fd1498Szrj conditional test can be eliminated, jumps can be threaded, extra
86*38fd1498Szrj insns can be eliminated, etc. It is the job of (1) and (2) to do a
87*38fd1498Szrj good job of scheduling locally; `relax_delay_slots' takes care of
88*38fd1498Szrj making the various individual schedules work well together. It is
89*38fd1498Szrj especially tuned to handle the control flow interactions of branch
90*38fd1498Szrj insns. It does nothing for insns with delay slots that do not
91*38fd1498Szrj branch.
92*38fd1498Szrj
93*38fd1498Szrj On machines that use CC0, we are very conservative. We will not make
94*38fd1498Szrj a copy of an insn involving CC0 since we want to maintain a 1-1
95*38fd1498Szrj correspondence between the insn that sets and uses CC0. The insns are
96*38fd1498Szrj allowed to be separated by placing an insn that sets CC0 (but not an insn
97*38fd1498Szrj that uses CC0; we could do this, but it doesn't seem worthwhile) in a
98*38fd1498Szrj delay slot. In that case, we point each insn at the other with REG_CC_USER
99*38fd1498Szrj and REG_CC_SETTER notes. Note that these restrictions affect very few
100*38fd1498Szrj machines because most RISC machines with delay slots will not use CC0
101*38fd1498Szrj (the RT is the only known exception at this point). */
102*38fd1498Szrj
103*38fd1498Szrj #include "config.h"
104*38fd1498Szrj #include "system.h"
105*38fd1498Szrj #include "coretypes.h"
106*38fd1498Szrj #include "backend.h"
107*38fd1498Szrj #include "target.h"
108*38fd1498Szrj #include "rtl.h"
109*38fd1498Szrj #include "tree.h"
110*38fd1498Szrj #include "predict.h"
111*38fd1498Szrj #include "memmodel.h"
112*38fd1498Szrj #include "tm_p.h"
113*38fd1498Szrj #include "expmed.h"
114*38fd1498Szrj #include "insn-config.h"
115*38fd1498Szrj #include "emit-rtl.h"
116*38fd1498Szrj #include "recog.h"
117*38fd1498Szrj #include "insn-attr.h"
118*38fd1498Szrj #include "resource.h"
119*38fd1498Szrj #include "params.h"
120*38fd1498Szrj #include "tree-pass.h"
121*38fd1498Szrj
122*38fd1498Szrj
123*38fd1498Szrj /* First, some functions that were used before GCC got a control flow graph.
124*38fd1498Szrj These functions are now only used here in reorg.c, and have therefore
125*38fd1498Szrj been moved here to avoid inadvertent misuse elsewhere in the compiler. */
126*38fd1498Szrj
127*38fd1498Szrj /* Return the last label to mark the same position as LABEL. Return LABEL
128*38fd1498Szrj itself if it is null or any return rtx. */
129*38fd1498Szrj
130*38fd1498Szrj static rtx
skip_consecutive_labels(rtx label_or_return)131*38fd1498Szrj skip_consecutive_labels (rtx label_or_return)
132*38fd1498Szrj {
133*38fd1498Szrj rtx_insn *insn;
134*38fd1498Szrj
135*38fd1498Szrj if (label_or_return && ANY_RETURN_P (label_or_return))
136*38fd1498Szrj return label_or_return;
137*38fd1498Szrj
138*38fd1498Szrj rtx_insn *label = as_a <rtx_insn *> (label_or_return);
139*38fd1498Szrj
140*38fd1498Szrj for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn))
141*38fd1498Szrj if (LABEL_P (insn))
142*38fd1498Szrj label = insn;
143*38fd1498Szrj
144*38fd1498Szrj return label;
145*38fd1498Szrj }
146*38fd1498Szrj
147*38fd1498Szrj /* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
148*38fd1498Szrj and REG_CC_USER notes so we can find it. */
149*38fd1498Szrj
150*38fd1498Szrj static void
link_cc0_insns(rtx_insn * insn)151*38fd1498Szrj link_cc0_insns (rtx_insn *insn)
152*38fd1498Szrj {
153*38fd1498Szrj rtx user = next_nonnote_insn (insn);
154*38fd1498Szrj
155*38fd1498Szrj if (NONJUMP_INSN_P (user) && GET_CODE (PATTERN (user)) == SEQUENCE)
156*38fd1498Szrj user = XVECEXP (PATTERN (user), 0, 0);
157*38fd1498Szrj
158*38fd1498Szrj add_reg_note (user, REG_CC_SETTER, insn);
159*38fd1498Szrj add_reg_note (insn, REG_CC_USER, user);
160*38fd1498Szrj }
161*38fd1498Szrj
162*38fd1498Szrj /* Insns which have delay slots that have not yet been filled. */
163*38fd1498Szrj
164*38fd1498Szrj static struct obstack unfilled_slots_obstack;
165*38fd1498Szrj static rtx *unfilled_firstobj;
166*38fd1498Szrj
167*38fd1498Szrj /* Define macros to refer to the first and last slot containing unfilled
168*38fd1498Szrj insns. These are used because the list may move and its address
169*38fd1498Szrj should be recomputed at each use. */
170*38fd1498Szrj
171*38fd1498Szrj #define unfilled_slots_base \
172*38fd1498Szrj ((rtx_insn **) obstack_base (&unfilled_slots_obstack))
173*38fd1498Szrj
174*38fd1498Szrj #define unfilled_slots_next \
175*38fd1498Szrj ((rtx_insn **) obstack_next_free (&unfilled_slots_obstack))
176*38fd1498Szrj
177*38fd1498Szrj /* Points to the label before the end of the function, or before a
178*38fd1498Szrj return insn. */
179*38fd1498Szrj static rtx_code_label *function_return_label;
180*38fd1498Szrj /* Likewise for a simple_return. */
181*38fd1498Szrj static rtx_code_label *function_simple_return_label;
182*38fd1498Szrj
183*38fd1498Szrj /* Mapping between INSN_UID's and position in the code since INSN_UID's do
184*38fd1498Szrj not always monotonically increase. */
185*38fd1498Szrj static int *uid_to_ruid;
186*38fd1498Szrj
187*38fd1498Szrj /* Highest valid index in `uid_to_ruid'. */
188*38fd1498Szrj static int max_uid;
189*38fd1498Szrj
190*38fd1498Szrj static int stop_search_p (rtx_insn *, int);
191*38fd1498Szrj static int resource_conflicts_p (struct resources *, struct resources *);
192*38fd1498Szrj static int insn_references_resource_p (rtx, struct resources *, bool);
193*38fd1498Szrj static int insn_sets_resource_p (rtx, struct resources *, bool);
194*38fd1498Szrj static rtx_code_label *find_end_label (rtx);
195*38fd1498Szrj static rtx_insn *emit_delay_sequence (rtx_insn *, const vec<rtx_insn *> &,
196*38fd1498Szrj int);
197*38fd1498Szrj static void add_to_delay_list (rtx_insn *, vec<rtx_insn *> *);
198*38fd1498Szrj static rtx_insn *delete_from_delay_slot (rtx_insn *);
199*38fd1498Szrj static void delete_scheduled_jump (rtx_insn *);
200*38fd1498Szrj static void note_delay_statistics (int, int);
201*38fd1498Szrj static int get_jump_flags (const rtx_insn *, rtx);
202*38fd1498Szrj static int mostly_true_jump (rtx);
203*38fd1498Szrj static rtx get_branch_condition (const rtx_insn *, rtx);
204*38fd1498Szrj static int condition_dominates_p (rtx, const rtx_insn *);
205*38fd1498Szrj static int redirect_with_delay_slots_safe_p (rtx_insn *, rtx, rtx);
206*38fd1498Szrj static int redirect_with_delay_list_safe_p (rtx_insn *, rtx,
207*38fd1498Szrj const vec<rtx_insn *> &);
208*38fd1498Szrj static int check_annul_list_true_false (int, const vec<rtx_insn *> &);
209*38fd1498Szrj static void steal_delay_list_from_target (rtx_insn *, rtx, rtx_sequence *,
210*38fd1498Szrj vec<rtx_insn *> *,
211*38fd1498Szrj struct resources *,
212*38fd1498Szrj struct resources *,
213*38fd1498Szrj struct resources *,
214*38fd1498Szrj int, int *, int *,
215*38fd1498Szrj rtx *);
216*38fd1498Szrj static void steal_delay_list_from_fallthrough (rtx_insn *, rtx, rtx_sequence *,
217*38fd1498Szrj vec<rtx_insn *> *,
218*38fd1498Szrj struct resources *,
219*38fd1498Szrj struct resources *,
220*38fd1498Szrj struct resources *,
221*38fd1498Szrj int, int *, int *);
222*38fd1498Szrj static void try_merge_delay_insns (rtx_insn *, rtx_insn *);
223*38fd1498Szrj static rtx_insn *redundant_insn (rtx, rtx_insn *, const vec<rtx_insn *> &);
224*38fd1498Szrj static int own_thread_p (rtx, rtx, int);
225*38fd1498Szrj static void update_block (rtx_insn *, rtx_insn *);
226*38fd1498Szrj static int reorg_redirect_jump (rtx_jump_insn *, rtx);
227*38fd1498Szrj static void update_reg_dead_notes (rtx_insn *, rtx_insn *);
228*38fd1498Szrj static void fix_reg_dead_note (rtx_insn *, rtx);
229*38fd1498Szrj static void update_reg_unused_notes (rtx_insn *, rtx);
230*38fd1498Szrj static void fill_simple_delay_slots (int);
231*38fd1498Szrj static void fill_slots_from_thread (rtx_jump_insn *, rtx, rtx, rtx,
232*38fd1498Szrj int, int, int, int,
233*38fd1498Szrj int *, vec<rtx_insn *> *);
234*38fd1498Szrj static void fill_eager_delay_slots (void);
235*38fd1498Szrj static void relax_delay_slots (rtx_insn *);
236*38fd1498Szrj static void make_return_insns (rtx_insn *);
237*38fd1498Szrj
238*38fd1498Szrj /* A wrapper around next_active_insn which takes care to return ret_rtx
239*38fd1498Szrj unchanged. */
240*38fd1498Szrj
241*38fd1498Szrj static rtx
first_active_target_insn(rtx insn)242*38fd1498Szrj first_active_target_insn (rtx insn)
243*38fd1498Szrj {
244*38fd1498Szrj if (ANY_RETURN_P (insn))
245*38fd1498Szrj return insn;
246*38fd1498Szrj return next_active_insn (as_a <rtx_insn *> (insn));
247*38fd1498Szrj }
248*38fd1498Szrj
249*38fd1498Szrj /* Return true iff INSN is a simplejump, or any kind of return insn. */
250*38fd1498Szrj
251*38fd1498Szrj static bool
simplejump_or_return_p(rtx insn)252*38fd1498Szrj simplejump_or_return_p (rtx insn)
253*38fd1498Szrj {
254*38fd1498Szrj return (JUMP_P (insn)
255*38fd1498Szrj && (simplejump_p (as_a <rtx_insn *> (insn))
256*38fd1498Szrj || ANY_RETURN_P (PATTERN (insn))));
257*38fd1498Szrj }
258*38fd1498Szrj
259*38fd1498Szrj /* Return TRUE if this insn should stop the search for insn to fill delay
260*38fd1498Szrj slots. LABELS_P indicates that labels should terminate the search.
261*38fd1498Szrj In all cases, jumps terminate the search. */
262*38fd1498Szrj
263*38fd1498Szrj static int
stop_search_p(rtx_insn * insn,int labels_p)264*38fd1498Szrj stop_search_p (rtx_insn *insn, int labels_p)
265*38fd1498Szrj {
266*38fd1498Szrj if (insn == 0)
267*38fd1498Szrj return 1;
268*38fd1498Szrj
269*38fd1498Szrj /* If the insn can throw an exception that is caught within the function,
270*38fd1498Szrj it may effectively perform a jump from the viewpoint of the function.
271*38fd1498Szrj Therefore act like for a jump. */
272*38fd1498Szrj if (can_throw_internal (insn))
273*38fd1498Szrj return 1;
274*38fd1498Szrj
275*38fd1498Szrj switch (GET_CODE (insn))
276*38fd1498Szrj {
277*38fd1498Szrj case NOTE:
278*38fd1498Szrj case CALL_INSN:
279*38fd1498Szrj case DEBUG_INSN:
280*38fd1498Szrj return 0;
281*38fd1498Szrj
282*38fd1498Szrj case CODE_LABEL:
283*38fd1498Szrj return labels_p;
284*38fd1498Szrj
285*38fd1498Szrj case JUMP_INSN:
286*38fd1498Szrj case BARRIER:
287*38fd1498Szrj return 1;
288*38fd1498Szrj
289*38fd1498Szrj case INSN:
290*38fd1498Szrj /* OK unless it contains a delay slot or is an `asm' insn of some type.
291*38fd1498Szrj We don't know anything about these. */
292*38fd1498Szrj return (GET_CODE (PATTERN (insn)) == SEQUENCE
293*38fd1498Szrj || GET_CODE (PATTERN (insn)) == ASM_INPUT
294*38fd1498Szrj || asm_noperands (PATTERN (insn)) >= 0);
295*38fd1498Szrj
296*38fd1498Szrj default:
297*38fd1498Szrj gcc_unreachable ();
298*38fd1498Szrj }
299*38fd1498Szrj }
300*38fd1498Szrj
301*38fd1498Szrj /* Return TRUE if any resources are marked in both RES1 and RES2 or if either
302*38fd1498Szrj resource set contains a volatile memory reference. Otherwise, return FALSE. */
303*38fd1498Szrj
304*38fd1498Szrj static int
resource_conflicts_p(struct resources * res1,struct resources * res2)305*38fd1498Szrj resource_conflicts_p (struct resources *res1, struct resources *res2)
306*38fd1498Szrj {
307*38fd1498Szrj if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
308*38fd1498Szrj || res1->volatil || res2->volatil)
309*38fd1498Szrj return 1;
310*38fd1498Szrj
311*38fd1498Szrj return hard_reg_set_intersect_p (res1->regs, res2->regs);
312*38fd1498Szrj }
313*38fd1498Szrj
314*38fd1498Szrj /* Return TRUE if any resource marked in RES, a `struct resources', is
315*38fd1498Szrj referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
316*38fd1498Szrj routine is using those resources.
317*38fd1498Szrj
318*38fd1498Szrj We compute this by computing all the resources referenced by INSN and
319*38fd1498Szrj seeing if this conflicts with RES. It might be faster to directly check
320*38fd1498Szrj ourselves, and this is the way it used to work, but it means duplicating
321*38fd1498Szrj a large block of complex code. */
322*38fd1498Szrj
323*38fd1498Szrj static int
insn_references_resource_p(rtx insn,struct resources * res,bool include_delayed_effects)324*38fd1498Szrj insn_references_resource_p (rtx insn, struct resources *res,
325*38fd1498Szrj bool include_delayed_effects)
326*38fd1498Szrj {
327*38fd1498Szrj struct resources insn_res;
328*38fd1498Szrj
329*38fd1498Szrj CLEAR_RESOURCE (&insn_res);
330*38fd1498Szrj mark_referenced_resources (insn, &insn_res, include_delayed_effects);
331*38fd1498Szrj return resource_conflicts_p (&insn_res, res);
332*38fd1498Szrj }
333*38fd1498Szrj
334*38fd1498Szrj /* Return TRUE if INSN modifies resources that are marked in RES.
335*38fd1498Szrj INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
336*38fd1498Szrj included. CC0 is only modified if it is explicitly set; see comments
337*38fd1498Szrj in front of mark_set_resources for details. */
338*38fd1498Szrj
339*38fd1498Szrj static int
insn_sets_resource_p(rtx insn,struct resources * res,bool include_delayed_effects)340*38fd1498Szrj insn_sets_resource_p (rtx insn, struct resources *res,
341*38fd1498Szrj bool include_delayed_effects)
342*38fd1498Szrj {
343*38fd1498Szrj struct resources insn_sets;
344*38fd1498Szrj
345*38fd1498Szrj CLEAR_RESOURCE (&insn_sets);
346*38fd1498Szrj mark_set_resources (insn, &insn_sets, 0,
347*38fd1498Szrj (include_delayed_effects
348*38fd1498Szrj ? MARK_SRC_DEST_CALL
349*38fd1498Szrj : MARK_SRC_DEST));
350*38fd1498Szrj return resource_conflicts_p (&insn_sets, res);
351*38fd1498Szrj }
352*38fd1498Szrj
353*38fd1498Szrj /* Find a label at the end of the function or before a RETURN. If there
354*38fd1498Szrj is none, try to make one. If that fails, returns 0.
355*38fd1498Szrj
356*38fd1498Szrj The property of such a label is that it is placed just before the
357*38fd1498Szrj epilogue or a bare RETURN insn, so that another bare RETURN can be
358*38fd1498Szrj turned into a jump to the label unconditionally. In particular, the
359*38fd1498Szrj label cannot be placed before a RETURN insn with a filled delay slot.
360*38fd1498Szrj
361*38fd1498Szrj ??? There may be a problem with the current implementation. Suppose
362*38fd1498Szrj we start with a bare RETURN insn and call find_end_label. It may set
363*38fd1498Szrj function_return_label just before the RETURN. Suppose the machinery
364*38fd1498Szrj is able to fill the delay slot of the RETURN insn afterwards. Then
365*38fd1498Szrj function_return_label is no longer valid according to the property
366*38fd1498Szrj described above and find_end_label will still return it unmodified.
367*38fd1498Szrj Note that this is probably mitigated by the following observation:
368*38fd1498Szrj once function_return_label is made, it is very likely the target of
369*38fd1498Szrj a jump, so filling the delay slot of the RETURN will be much more
370*38fd1498Szrj difficult.
371*38fd1498Szrj KIND is either simple_return_rtx or ret_rtx, indicating which type of
372*38fd1498Szrj return we're looking for. */
373*38fd1498Szrj
374*38fd1498Szrj static rtx_code_label *
find_end_label(rtx kind)375*38fd1498Szrj find_end_label (rtx kind)
376*38fd1498Szrj {
377*38fd1498Szrj rtx_insn *insn;
378*38fd1498Szrj rtx_code_label **plabel;
379*38fd1498Szrj
380*38fd1498Szrj if (kind == ret_rtx)
381*38fd1498Szrj plabel = &function_return_label;
382*38fd1498Szrj else
383*38fd1498Szrj {
384*38fd1498Szrj gcc_assert (kind == simple_return_rtx);
385*38fd1498Szrj plabel = &function_simple_return_label;
386*38fd1498Szrj }
387*38fd1498Szrj
388*38fd1498Szrj /* If we found one previously, return it. */
389*38fd1498Szrj if (*plabel)
390*38fd1498Szrj return *plabel;
391*38fd1498Szrj
392*38fd1498Szrj /* Otherwise, see if there is a label at the end of the function. If there
393*38fd1498Szrj is, it must be that RETURN insns aren't needed, so that is our return
394*38fd1498Szrj label and we don't have to do anything else. */
395*38fd1498Szrj
396*38fd1498Szrj insn = get_last_insn ();
397*38fd1498Szrj while (NOTE_P (insn)
398*38fd1498Szrj || (NONJUMP_INSN_P (insn)
399*38fd1498Szrj && (GET_CODE (PATTERN (insn)) == USE
400*38fd1498Szrj || GET_CODE (PATTERN (insn)) == CLOBBER)))
401*38fd1498Szrj insn = PREV_INSN (insn);
402*38fd1498Szrj
403*38fd1498Szrj /* When a target threads its epilogue we might already have a
404*38fd1498Szrj suitable return insn. If so put a label before it for the
405*38fd1498Szrj function_return_label. */
406*38fd1498Szrj if (BARRIER_P (insn)
407*38fd1498Szrj && JUMP_P (PREV_INSN (insn))
408*38fd1498Szrj && PATTERN (PREV_INSN (insn)) == kind)
409*38fd1498Szrj {
410*38fd1498Szrj rtx_insn *temp = PREV_INSN (PREV_INSN (insn));
411*38fd1498Szrj rtx_code_label *label = gen_label_rtx ();
412*38fd1498Szrj LABEL_NUSES (label) = 0;
413*38fd1498Szrj
414*38fd1498Szrj /* Put the label before any USE insns that may precede the RETURN
415*38fd1498Szrj insn. */
416*38fd1498Szrj while (GET_CODE (temp) == USE)
417*38fd1498Szrj temp = PREV_INSN (temp);
418*38fd1498Szrj
419*38fd1498Szrj emit_label_after (label, temp);
420*38fd1498Szrj *plabel = label;
421*38fd1498Szrj }
422*38fd1498Szrj
423*38fd1498Szrj else if (LABEL_P (insn))
424*38fd1498Szrj *plabel = as_a <rtx_code_label *> (insn);
425*38fd1498Szrj else
426*38fd1498Szrj {
427*38fd1498Szrj rtx_code_label *label = gen_label_rtx ();
428*38fd1498Szrj LABEL_NUSES (label) = 0;
429*38fd1498Szrj /* If the basic block reorder pass moves the return insn to
430*38fd1498Szrj some other place try to locate it again and put our
431*38fd1498Szrj function_return_label there. */
432*38fd1498Szrj while (insn && ! (JUMP_P (insn) && (PATTERN (insn) == kind)))
433*38fd1498Szrj insn = PREV_INSN (insn);
434*38fd1498Szrj if (insn)
435*38fd1498Szrj {
436*38fd1498Szrj insn = PREV_INSN (insn);
437*38fd1498Szrj
438*38fd1498Szrj /* Put the label before any USE insns that may precede the
439*38fd1498Szrj RETURN insn. */
440*38fd1498Szrj while (GET_CODE (insn) == USE)
441*38fd1498Szrj insn = PREV_INSN (insn);
442*38fd1498Szrj
443*38fd1498Szrj emit_label_after (label, insn);
444*38fd1498Szrj }
445*38fd1498Szrj else
446*38fd1498Szrj {
447*38fd1498Szrj if (targetm.have_epilogue () && ! targetm.have_return ())
448*38fd1498Szrj /* The RETURN insn has its delay slot filled so we cannot
449*38fd1498Szrj emit the label just before it. Since we already have
450*38fd1498Szrj an epilogue and cannot emit a new RETURN, we cannot
451*38fd1498Szrj emit the label at all. */
452*38fd1498Szrj return NULL;
453*38fd1498Szrj
454*38fd1498Szrj /* Otherwise, make a new label and emit a RETURN and BARRIER,
455*38fd1498Szrj if needed. */
456*38fd1498Szrj emit_label (label);
457*38fd1498Szrj if (targetm.have_return ())
458*38fd1498Szrj {
459*38fd1498Szrj /* The return we make may have delay slots too. */
460*38fd1498Szrj rtx_insn *pat = targetm.gen_return ();
461*38fd1498Szrj rtx_insn *insn = emit_jump_insn (pat);
462*38fd1498Szrj set_return_jump_label (insn);
463*38fd1498Szrj emit_barrier ();
464*38fd1498Szrj if (num_delay_slots (insn) > 0)
465*38fd1498Szrj obstack_ptr_grow (&unfilled_slots_obstack, insn);
466*38fd1498Szrj }
467*38fd1498Szrj }
468*38fd1498Szrj *plabel = label;
469*38fd1498Szrj }
470*38fd1498Szrj
471*38fd1498Szrj /* Show one additional use for this label so it won't go away until
472*38fd1498Szrj we are done. */
473*38fd1498Szrj ++LABEL_NUSES (*plabel);
474*38fd1498Szrj
475*38fd1498Szrj return *plabel;
476*38fd1498Szrj }
477*38fd1498Szrj
478*38fd1498Szrj /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
479*38fd1498Szrj the pattern of INSN with the SEQUENCE.
480*38fd1498Szrj
481*38fd1498Szrj Returns the insn containing the SEQUENCE that replaces INSN. */
482*38fd1498Szrj
483*38fd1498Szrj static rtx_insn *
emit_delay_sequence(rtx_insn * insn,const vec<rtx_insn * > & list,int length)484*38fd1498Szrj emit_delay_sequence (rtx_insn *insn, const vec<rtx_insn *> &list, int length)
485*38fd1498Szrj {
486*38fd1498Szrj /* Allocate the rtvec to hold the insns and the SEQUENCE. */
487*38fd1498Szrj rtvec seqv = rtvec_alloc (length + 1);
488*38fd1498Szrj rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
489*38fd1498Szrj rtx_insn *seq_insn = make_insn_raw (seq);
490*38fd1498Szrj
491*38fd1498Szrj /* If DELAY_INSN has a location, use it for SEQ_INSN. If DELAY_INSN does
492*38fd1498Szrj not have a location, but one of the delayed insns does, we pick up a
493*38fd1498Szrj location from there later. */
494*38fd1498Szrj INSN_LOCATION (seq_insn) = INSN_LOCATION (insn);
495*38fd1498Szrj
496*38fd1498Szrj /* Unlink INSN from the insn chain, so that we can put it into
497*38fd1498Szrj the SEQUENCE. Remember where we want to emit SEQUENCE in AFTER. */
498*38fd1498Szrj rtx_insn *after = PREV_INSN (insn);
499*38fd1498Szrj remove_insn (insn);
500*38fd1498Szrj SET_NEXT_INSN (insn) = SET_PREV_INSN (insn) = NULL;
501*38fd1498Szrj
502*38fd1498Szrj /* Build our SEQUENCE and rebuild the insn chain. */
503*38fd1498Szrj start_sequence ();
504*38fd1498Szrj XVECEXP (seq, 0, 0) = emit_insn (insn);
505*38fd1498Szrj
506*38fd1498Szrj unsigned int delay_insns = list.length ();
507*38fd1498Szrj gcc_assert (delay_insns == (unsigned int) length);
508*38fd1498Szrj for (unsigned int i = 0; i < delay_insns; i++)
509*38fd1498Szrj {
510*38fd1498Szrj rtx_insn *tem = list[i];
511*38fd1498Szrj rtx note, next;
512*38fd1498Szrj
513*38fd1498Szrj /* Show that this copy of the insn isn't deleted. */
514*38fd1498Szrj tem->set_undeleted ();
515*38fd1498Szrj
516*38fd1498Szrj /* Unlink insn from its original place, and re-emit it into
517*38fd1498Szrj the sequence. */
518*38fd1498Szrj SET_NEXT_INSN (tem) = SET_PREV_INSN (tem) = NULL;
519*38fd1498Szrj XVECEXP (seq, 0, i + 1) = emit_insn (tem);
520*38fd1498Szrj
521*38fd1498Szrj /* SPARC assembler, for instance, emit warning when debug info is output
522*38fd1498Szrj into the delay slot. */
523*38fd1498Szrj if (INSN_LOCATION (tem) && !INSN_LOCATION (seq_insn))
524*38fd1498Szrj INSN_LOCATION (seq_insn) = INSN_LOCATION (tem);
525*38fd1498Szrj INSN_LOCATION (tem) = 0;
526*38fd1498Szrj
527*38fd1498Szrj for (note = REG_NOTES (tem); note; note = next)
528*38fd1498Szrj {
529*38fd1498Szrj next = XEXP (note, 1);
530*38fd1498Szrj switch (REG_NOTE_KIND (note))
531*38fd1498Szrj {
532*38fd1498Szrj case REG_DEAD:
533*38fd1498Szrj /* Remove any REG_DEAD notes because we can't rely on them now
534*38fd1498Szrj that the insn has been moved. */
535*38fd1498Szrj remove_note (tem, note);
536*38fd1498Szrj break;
537*38fd1498Szrj
538*38fd1498Szrj case REG_LABEL_OPERAND:
539*38fd1498Szrj case REG_LABEL_TARGET:
540*38fd1498Szrj /* Keep the label reference count up to date. */
541*38fd1498Szrj if (LABEL_P (XEXP (note, 0)))
542*38fd1498Szrj LABEL_NUSES (XEXP (note, 0)) ++;
543*38fd1498Szrj break;
544*38fd1498Szrj
545*38fd1498Szrj default:
546*38fd1498Szrj break;
547*38fd1498Szrj }
548*38fd1498Szrj }
549*38fd1498Szrj }
550*38fd1498Szrj end_sequence ();
551*38fd1498Szrj
552*38fd1498Szrj /* Splice our SEQUENCE into the insn stream where INSN used to be. */
553*38fd1498Szrj add_insn_after (seq_insn, after, NULL);
554*38fd1498Szrj
555*38fd1498Szrj return seq_insn;
556*38fd1498Szrj }
557*38fd1498Szrj
558*38fd1498Szrj /* Add INSN to DELAY_LIST and return the head of the new list. The list must
559*38fd1498Szrj be in the order in which the insns are to be executed. */
560*38fd1498Szrj
561*38fd1498Szrj static void
add_to_delay_list(rtx_insn * insn,vec<rtx_insn * > * delay_list)562*38fd1498Szrj add_to_delay_list (rtx_insn *insn, vec<rtx_insn *> *delay_list)
563*38fd1498Szrj {
564*38fd1498Szrj /* If INSN has its block number recorded, clear it since we may
565*38fd1498Szrj be moving the insn to a new block. */
566*38fd1498Szrj clear_hashed_info_for_insn (insn);
567*38fd1498Szrj delay_list->safe_push (insn);
568*38fd1498Szrj }
569*38fd1498Szrj
570*38fd1498Szrj /* Delete INSN from the delay slot of the insn that it is in, which may
571*38fd1498Szrj produce an insn with no delay slots. Return the new insn. */
572*38fd1498Szrj
573*38fd1498Szrj static rtx_insn *
delete_from_delay_slot(rtx_insn * insn)574*38fd1498Szrj delete_from_delay_slot (rtx_insn *insn)
575*38fd1498Szrj {
576*38fd1498Szrj rtx_insn *trial, *seq_insn, *prev;
577*38fd1498Szrj rtx_sequence *seq;
578*38fd1498Szrj int i;
579*38fd1498Szrj int had_barrier = 0;
580*38fd1498Szrj
581*38fd1498Szrj /* We first must find the insn containing the SEQUENCE with INSN in its
582*38fd1498Szrj delay slot. Do this by finding an insn, TRIAL, where
583*38fd1498Szrj PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
584*38fd1498Szrj
585*38fd1498Szrj for (trial = insn;
586*38fd1498Szrj PREV_INSN (NEXT_INSN (trial)) == trial;
587*38fd1498Szrj trial = NEXT_INSN (trial))
588*38fd1498Szrj ;
589*38fd1498Szrj
590*38fd1498Szrj seq_insn = PREV_INSN (NEXT_INSN (trial));
591*38fd1498Szrj seq = as_a <rtx_sequence *> (PATTERN (seq_insn));
592*38fd1498Szrj
593*38fd1498Szrj if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
594*38fd1498Szrj had_barrier = 1;
595*38fd1498Szrj
596*38fd1498Szrj /* Create a delay list consisting of all the insns other than the one
597*38fd1498Szrj we are deleting (unless we were the only one). */
598*38fd1498Szrj auto_vec<rtx_insn *, 5> delay_list;
599*38fd1498Szrj if (seq->len () > 2)
600*38fd1498Szrj for (i = 1; i < seq->len (); i++)
601*38fd1498Szrj if (seq->insn (i) != insn)
602*38fd1498Szrj add_to_delay_list (seq->insn (i), &delay_list);
603*38fd1498Szrj
604*38fd1498Szrj /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
605*38fd1498Szrj list, and rebuild the delay list if non-empty. */
606*38fd1498Szrj prev = PREV_INSN (seq_insn);
607*38fd1498Szrj trial = seq->insn (0);
608*38fd1498Szrj delete_related_insns (seq_insn);
609*38fd1498Szrj add_insn_after (trial, prev, NULL);
610*38fd1498Szrj
611*38fd1498Szrj /* If there was a barrier after the old SEQUENCE, remit it. */
612*38fd1498Szrj if (had_barrier)
613*38fd1498Szrj emit_barrier_after (trial);
614*38fd1498Szrj
615*38fd1498Szrj /* If there are any delay insns, remit them. Otherwise clear the
616*38fd1498Szrj annul flag. */
617*38fd1498Szrj if (!delay_list.is_empty ())
618*38fd1498Szrj trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
619*38fd1498Szrj else if (JUMP_P (trial))
620*38fd1498Szrj INSN_ANNULLED_BRANCH_P (trial) = 0;
621*38fd1498Szrj
622*38fd1498Szrj INSN_FROM_TARGET_P (insn) = 0;
623*38fd1498Szrj
624*38fd1498Szrj /* Show we need to fill this insn again. */
625*38fd1498Szrj obstack_ptr_grow (&unfilled_slots_obstack, trial);
626*38fd1498Szrj
627*38fd1498Szrj return trial;
628*38fd1498Szrj }
629*38fd1498Szrj
630*38fd1498Szrj /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
631*38fd1498Szrj the insn that sets CC0 for it and delete it too. */
632*38fd1498Szrj
633*38fd1498Szrj static void
delete_scheduled_jump(rtx_insn * insn)634*38fd1498Szrj delete_scheduled_jump (rtx_insn *insn)
635*38fd1498Szrj {
636*38fd1498Szrj /* Delete the insn that sets cc0 for us. On machines without cc0, we could
637*38fd1498Szrj delete the insn that sets the condition code, but it is hard to find it.
638*38fd1498Szrj Since this case is rare anyway, don't bother trying; there would likely
639*38fd1498Szrj be other insns that became dead anyway, which we wouldn't know to
640*38fd1498Szrj delete. */
641*38fd1498Szrj
642*38fd1498Szrj if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, insn))
643*38fd1498Szrj {
644*38fd1498Szrj rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
645*38fd1498Szrj
646*38fd1498Szrj /* If a reg-note was found, it points to an insn to set CC0. This
647*38fd1498Szrj insn is in the delay list of some other insn. So delete it from
648*38fd1498Szrj the delay list it was in. */
649*38fd1498Szrj if (note)
650*38fd1498Szrj {
651*38fd1498Szrj if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
652*38fd1498Szrj && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
653*38fd1498Szrj delete_from_delay_slot (as_a <rtx_insn *> (XEXP (note, 0)));
654*38fd1498Szrj }
655*38fd1498Szrj else
656*38fd1498Szrj {
657*38fd1498Szrj /* The insn setting CC0 is our previous insn, but it may be in
658*38fd1498Szrj a delay slot. It will be the last insn in the delay slot, if
659*38fd1498Szrj it is. */
660*38fd1498Szrj rtx_insn *trial = previous_insn (insn);
661*38fd1498Szrj if (NOTE_P (trial))
662*38fd1498Szrj trial = prev_nonnote_insn (trial);
663*38fd1498Szrj if (sets_cc0_p (PATTERN (trial)) != 1
664*38fd1498Szrj || FIND_REG_INC_NOTE (trial, NULL_RTX))
665*38fd1498Szrj return;
666*38fd1498Szrj if (PREV_INSN (NEXT_INSN (trial)) == trial)
667*38fd1498Szrj delete_related_insns (trial);
668*38fd1498Szrj else
669*38fd1498Szrj delete_from_delay_slot (trial);
670*38fd1498Szrj }
671*38fd1498Szrj }
672*38fd1498Szrj
673*38fd1498Szrj delete_related_insns (insn);
674*38fd1498Szrj }
675*38fd1498Szrj
676*38fd1498Szrj /* Counters for delay-slot filling. */
677*38fd1498Szrj
678*38fd1498Szrj #define NUM_REORG_FUNCTIONS 2
679*38fd1498Szrj #define MAX_DELAY_HISTOGRAM 3
680*38fd1498Szrj #define MAX_REORG_PASSES 2
681*38fd1498Szrj
682*38fd1498Szrj static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
683*38fd1498Szrj
684*38fd1498Szrj static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
685*38fd1498Szrj
686*38fd1498Szrj static int reorg_pass_number;
687*38fd1498Szrj
688*38fd1498Szrj static void
note_delay_statistics(int slots_filled,int index)689*38fd1498Szrj note_delay_statistics (int slots_filled, int index)
690*38fd1498Szrj {
691*38fd1498Szrj num_insns_needing_delays[index][reorg_pass_number]++;
692*38fd1498Szrj if (slots_filled > MAX_DELAY_HISTOGRAM)
693*38fd1498Szrj slots_filled = MAX_DELAY_HISTOGRAM;
694*38fd1498Szrj num_filled_delays[index][slots_filled][reorg_pass_number]++;
695*38fd1498Szrj }
696*38fd1498Szrj
697*38fd1498Szrj /* Optimize the following cases:
698*38fd1498Szrj
699*38fd1498Szrj 1. When a conditional branch skips over only one instruction,
700*38fd1498Szrj use an annulling branch and put that insn in the delay slot.
701*38fd1498Szrj Use either a branch that annuls when the condition if true or
702*38fd1498Szrj invert the test with a branch that annuls when the condition is
703*38fd1498Szrj false. This saves insns, since otherwise we must copy an insn
704*38fd1498Szrj from the L1 target.
705*38fd1498Szrj
706*38fd1498Szrj (orig) (skip) (otherwise)
707*38fd1498Szrj Bcc.n L1 Bcc',a L1 Bcc,a L1'
708*38fd1498Szrj insn insn insn2
709*38fd1498Szrj L1: L1: L1:
710*38fd1498Szrj insn2 insn2 insn2
711*38fd1498Szrj insn3 insn3 L1':
712*38fd1498Szrj insn3
713*38fd1498Szrj
714*38fd1498Szrj 2. When a conditional branch skips over only one instruction,
715*38fd1498Szrj and after that, it unconditionally branches somewhere else,
716*38fd1498Szrj perform the similar optimization. This saves executing the
717*38fd1498Szrj second branch in the case where the inverted condition is true.
718*38fd1498Szrj
719*38fd1498Szrj Bcc.n L1 Bcc',a L2
720*38fd1498Szrj insn insn
721*38fd1498Szrj L1: L1:
722*38fd1498Szrj Bra L2 Bra L2
723*38fd1498Szrj
724*38fd1498Szrj INSN is a JUMP_INSN.
725*38fd1498Szrj
726*38fd1498Szrj This should be expanded to skip over N insns, where N is the number
727*38fd1498Szrj of delay slots required. */
728*38fd1498Szrj
729*38fd1498Szrj static void
optimize_skip(rtx_jump_insn * insn,vec<rtx_insn * > * delay_list)730*38fd1498Szrj optimize_skip (rtx_jump_insn *insn, vec<rtx_insn *> *delay_list)
731*38fd1498Szrj {
732*38fd1498Szrj rtx_insn *trial = next_nonnote_insn (insn);
733*38fd1498Szrj rtx_insn *next_trial = next_active_insn (trial);
734*38fd1498Szrj int flags;
735*38fd1498Szrj
736*38fd1498Szrj flags = get_jump_flags (insn, JUMP_LABEL (insn));
737*38fd1498Szrj
738*38fd1498Szrj if (trial == 0
739*38fd1498Szrj || !NONJUMP_INSN_P (trial)
740*38fd1498Szrj || GET_CODE (PATTERN (trial)) == SEQUENCE
741*38fd1498Szrj || recog_memoized (trial) < 0
742*38fd1498Szrj || (! eligible_for_annul_false (insn, 0, trial, flags)
743*38fd1498Szrj && ! eligible_for_annul_true (insn, 0, trial, flags))
744*38fd1498Szrj || RTX_FRAME_RELATED_P (trial)
745*38fd1498Szrj || can_throw_internal (trial))
746*38fd1498Szrj return;
747*38fd1498Szrj
748*38fd1498Szrj /* There are two cases where we are just executing one insn (we assume
749*38fd1498Szrj here that a branch requires only one insn; this should be generalized
750*38fd1498Szrj at some point): Where the branch goes around a single insn or where
751*38fd1498Szrj we have one insn followed by a branch to the same label we branch to.
752*38fd1498Szrj In both of these cases, inverting the jump and annulling the delay
753*38fd1498Szrj slot give the same effect in fewer insns. */
754*38fd1498Szrj if (next_trial == next_active_insn (JUMP_LABEL_AS_INSN (insn))
755*38fd1498Szrj || (next_trial != 0
756*38fd1498Szrj && simplejump_or_return_p (next_trial)
757*38fd1498Szrj && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)))
758*38fd1498Szrj {
759*38fd1498Szrj if (eligible_for_annul_false (insn, 0, trial, flags))
760*38fd1498Szrj {
761*38fd1498Szrj if (invert_jump (insn, JUMP_LABEL (insn), 1))
762*38fd1498Szrj INSN_FROM_TARGET_P (trial) = 1;
763*38fd1498Szrj else if (! eligible_for_annul_true (insn, 0, trial, flags))
764*38fd1498Szrj return;
765*38fd1498Szrj }
766*38fd1498Szrj
767*38fd1498Szrj add_to_delay_list (trial, delay_list);
768*38fd1498Szrj next_trial = next_active_insn (trial);
769*38fd1498Szrj update_block (trial, trial);
770*38fd1498Szrj delete_related_insns (trial);
771*38fd1498Szrj
772*38fd1498Szrj /* Also, if we are targeting an unconditional
773*38fd1498Szrj branch, thread our jump to the target of that branch. Don't
774*38fd1498Szrj change this into a RETURN here, because it may not accept what
775*38fd1498Szrj we have in the delay slot. We'll fix this up later. */
776*38fd1498Szrj if (next_trial && simplejump_or_return_p (next_trial))
777*38fd1498Szrj {
778*38fd1498Szrj rtx target_label = JUMP_LABEL (next_trial);
779*38fd1498Szrj if (ANY_RETURN_P (target_label))
780*38fd1498Szrj target_label = find_end_label (target_label);
781*38fd1498Szrj
782*38fd1498Szrj if (target_label)
783*38fd1498Szrj {
784*38fd1498Szrj /* Recompute the flags based on TARGET_LABEL since threading
785*38fd1498Szrj the jump to TARGET_LABEL may change the direction of the
786*38fd1498Szrj jump (which may change the circumstances in which the
787*38fd1498Szrj delay slot is nullified). */
788*38fd1498Szrj flags = get_jump_flags (insn, target_label);
789*38fd1498Szrj if (eligible_for_annul_true (insn, 0, trial, flags))
790*38fd1498Szrj reorg_redirect_jump (insn, target_label);
791*38fd1498Szrj }
792*38fd1498Szrj }
793*38fd1498Szrj
794*38fd1498Szrj INSN_ANNULLED_BRANCH_P (insn) = 1;
795*38fd1498Szrj }
796*38fd1498Szrj }
797*38fd1498Szrj
798*38fd1498Szrj /* Encode and return branch direction and prediction information for
799*38fd1498Szrj INSN assuming it will jump to LABEL.
800*38fd1498Szrj
801*38fd1498Szrj Non conditional branches return no direction information and
802*38fd1498Szrj are predicted as very likely taken. */
803*38fd1498Szrj
804*38fd1498Szrj static int
get_jump_flags(const rtx_insn * insn,rtx label)805*38fd1498Szrj get_jump_flags (const rtx_insn *insn, rtx label)
806*38fd1498Szrj {
807*38fd1498Szrj int flags;
808*38fd1498Szrj
809*38fd1498Szrj /* get_jump_flags can be passed any insn with delay slots, these may
810*38fd1498Szrj be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
811*38fd1498Szrj direction information, and only if they are conditional jumps.
812*38fd1498Szrj
813*38fd1498Szrj If LABEL is a return, then there is no way to determine the branch
814*38fd1498Szrj direction. */
815*38fd1498Szrj if (JUMP_P (insn)
816*38fd1498Szrj && (condjump_p (insn) || condjump_in_parallel_p (insn))
817*38fd1498Szrj && !ANY_RETURN_P (label)
818*38fd1498Szrj && INSN_UID (insn) <= max_uid
819*38fd1498Szrj && INSN_UID (label) <= max_uid)
820*38fd1498Szrj flags
821*38fd1498Szrj = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
822*38fd1498Szrj ? ATTR_FLAG_forward : ATTR_FLAG_backward;
823*38fd1498Szrj /* No valid direction information. */
824*38fd1498Szrj else
825*38fd1498Szrj flags = 0;
826*38fd1498Szrj
827*38fd1498Szrj return flags;
828*38fd1498Szrj }
829*38fd1498Szrj
830*38fd1498Szrj /* Return truth value of the statement that this branch
831*38fd1498Szrj is mostly taken. If we think that the branch is extremely likely
832*38fd1498Szrj to be taken, we return 2. If the branch is slightly more likely to be
833*38fd1498Szrj taken, return 1. If the branch is slightly less likely to be taken,
834*38fd1498Szrj return 0 and if the branch is highly unlikely to be taken, return -1. */
835*38fd1498Szrj
836*38fd1498Szrj static int
mostly_true_jump(rtx jump_insn)837*38fd1498Szrj mostly_true_jump (rtx jump_insn)
838*38fd1498Szrj {
839*38fd1498Szrj /* If branch probabilities are available, then use that number since it
840*38fd1498Szrj always gives a correct answer. */
841*38fd1498Szrj rtx note = find_reg_note (jump_insn, REG_BR_PROB, 0);
842*38fd1498Szrj if (note)
843*38fd1498Szrj {
844*38fd1498Szrj int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
845*38fd1498Szrj .to_reg_br_prob_base ();
846*38fd1498Szrj
847*38fd1498Szrj if (prob >= REG_BR_PROB_BASE * 9 / 10)
848*38fd1498Szrj return 2;
849*38fd1498Szrj else if (prob >= REG_BR_PROB_BASE / 2)
850*38fd1498Szrj return 1;
851*38fd1498Szrj else if (prob >= REG_BR_PROB_BASE / 10)
852*38fd1498Szrj return 0;
853*38fd1498Szrj else
854*38fd1498Szrj return -1;
855*38fd1498Szrj }
856*38fd1498Szrj
857*38fd1498Szrj /* If there is no note, assume branches are not taken.
858*38fd1498Szrj This should be rare. */
859*38fd1498Szrj return 0;
860*38fd1498Szrj }
861*38fd1498Szrj
862*38fd1498Szrj /* Return the condition under which INSN will branch to TARGET. If TARGET
863*38fd1498Szrj is zero, return the condition under which INSN will return. If INSN is
864*38fd1498Szrj an unconditional branch, return const_true_rtx. If INSN isn't a simple
865*38fd1498Szrj type of jump, or it doesn't go to TARGET, return 0. */
866*38fd1498Szrj
867*38fd1498Szrj static rtx
get_branch_condition(const rtx_insn * insn,rtx target)868*38fd1498Szrj get_branch_condition (const rtx_insn *insn, rtx target)
869*38fd1498Szrj {
870*38fd1498Szrj rtx pat = PATTERN (insn);
871*38fd1498Szrj rtx src;
872*38fd1498Szrj
873*38fd1498Szrj if (condjump_in_parallel_p (insn))
874*38fd1498Szrj pat = XVECEXP (pat, 0, 0);
875*38fd1498Szrj
876*38fd1498Szrj if (ANY_RETURN_P (pat) && pat == target)
877*38fd1498Szrj return const_true_rtx;
878*38fd1498Szrj
879*38fd1498Szrj if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
880*38fd1498Szrj return 0;
881*38fd1498Szrj
882*38fd1498Szrj src = SET_SRC (pat);
883*38fd1498Szrj if (GET_CODE (src) == LABEL_REF && label_ref_label (src) == target)
884*38fd1498Szrj return const_true_rtx;
885*38fd1498Szrj
886*38fd1498Szrj else if (GET_CODE (src) == IF_THEN_ELSE
887*38fd1498Szrj && XEXP (src, 2) == pc_rtx
888*38fd1498Szrj && ((GET_CODE (XEXP (src, 1)) == LABEL_REF
889*38fd1498Szrj && label_ref_label (XEXP (src, 1)) == target)
890*38fd1498Szrj || (ANY_RETURN_P (XEXP (src, 1)) && XEXP (src, 1) == target)))
891*38fd1498Szrj return XEXP (src, 0);
892*38fd1498Szrj
893*38fd1498Szrj else if (GET_CODE (src) == IF_THEN_ELSE
894*38fd1498Szrj && XEXP (src, 1) == pc_rtx
895*38fd1498Szrj && ((GET_CODE (XEXP (src, 2)) == LABEL_REF
896*38fd1498Szrj && label_ref_label (XEXP (src, 2)) == target)
897*38fd1498Szrj || (ANY_RETURN_P (XEXP (src, 2)) && XEXP (src, 2) == target)))
898*38fd1498Szrj {
899*38fd1498Szrj enum rtx_code rev;
900*38fd1498Szrj rev = reversed_comparison_code (XEXP (src, 0), insn);
901*38fd1498Szrj if (rev != UNKNOWN)
902*38fd1498Szrj return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)),
903*38fd1498Szrj XEXP (XEXP (src, 0), 0),
904*38fd1498Szrj XEXP (XEXP (src, 0), 1));
905*38fd1498Szrj }
906*38fd1498Szrj
907*38fd1498Szrj return 0;
908*38fd1498Szrj }
909*38fd1498Szrj
910*38fd1498Szrj /* Return nonzero if CONDITION is more strict than the condition of
911*38fd1498Szrj INSN, i.e., if INSN will always branch if CONDITION is true. */
912*38fd1498Szrj
913*38fd1498Szrj static int
condition_dominates_p(rtx condition,const rtx_insn * insn)914*38fd1498Szrj condition_dominates_p (rtx condition, const rtx_insn *insn)
915*38fd1498Szrj {
916*38fd1498Szrj rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
917*38fd1498Szrj enum rtx_code code = GET_CODE (condition);
918*38fd1498Szrj enum rtx_code other_code;
919*38fd1498Szrj
920*38fd1498Szrj if (rtx_equal_p (condition, other_condition)
921*38fd1498Szrj || other_condition == const_true_rtx)
922*38fd1498Szrj return 1;
923*38fd1498Szrj
924*38fd1498Szrj else if (condition == const_true_rtx || other_condition == 0)
925*38fd1498Szrj return 0;
926*38fd1498Szrj
927*38fd1498Szrj other_code = GET_CODE (other_condition);
928*38fd1498Szrj if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
929*38fd1498Szrj || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
930*38fd1498Szrj || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
931*38fd1498Szrj return 0;
932*38fd1498Szrj
933*38fd1498Szrj return comparison_dominates_p (code, other_code);
934*38fd1498Szrj }
935*38fd1498Szrj
936*38fd1498Szrj /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
937*38fd1498Szrj any insns already in the delay slot of JUMP. */
938*38fd1498Szrj
939*38fd1498Szrj static int
redirect_with_delay_slots_safe_p(rtx_insn * jump,rtx newlabel,rtx seq)940*38fd1498Szrj redirect_with_delay_slots_safe_p (rtx_insn *jump, rtx newlabel, rtx seq)
941*38fd1498Szrj {
942*38fd1498Szrj int flags, i;
943*38fd1498Szrj rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (seq));
944*38fd1498Szrj
945*38fd1498Szrj /* Make sure all the delay slots of this jump would still
946*38fd1498Szrj be valid after threading the jump. If they are still
947*38fd1498Szrj valid, then return nonzero. */
948*38fd1498Szrj
949*38fd1498Szrj flags = get_jump_flags (jump, newlabel);
950*38fd1498Szrj for (i = 1; i < pat->len (); i++)
951*38fd1498Szrj if (! (
952*38fd1498Szrj #if ANNUL_IFFALSE_SLOTS
953*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump)
954*38fd1498Szrj && INSN_FROM_TARGET_P (pat->insn (i)))
955*38fd1498Szrj ? eligible_for_annul_false (jump, i - 1, pat->insn (i), flags) :
956*38fd1498Szrj #endif
957*38fd1498Szrj #if ANNUL_IFTRUE_SLOTS
958*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump)
959*38fd1498Szrj && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
960*38fd1498Szrj ? eligible_for_annul_true (jump, i - 1, pat->insn (i), flags) :
961*38fd1498Szrj #endif
962*38fd1498Szrj eligible_for_delay (jump, i - 1, pat->insn (i), flags)))
963*38fd1498Szrj break;
964*38fd1498Szrj
965*38fd1498Szrj return (i == pat->len ());
966*38fd1498Szrj }
967*38fd1498Szrj
968*38fd1498Szrj /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
969*38fd1498Szrj any insns we wish to place in the delay slot of JUMP. */
970*38fd1498Szrj
971*38fd1498Szrj static int
redirect_with_delay_list_safe_p(rtx_insn * jump,rtx newlabel,const vec<rtx_insn * > & delay_list)972*38fd1498Szrj redirect_with_delay_list_safe_p (rtx_insn *jump, rtx newlabel,
973*38fd1498Szrj const vec<rtx_insn *> &delay_list)
974*38fd1498Szrj {
975*38fd1498Szrj /* Make sure all the insns in DELAY_LIST would still be
976*38fd1498Szrj valid after threading the jump. If they are still
977*38fd1498Szrj valid, then return nonzero. */
978*38fd1498Szrj
979*38fd1498Szrj int flags = get_jump_flags (jump, newlabel);
980*38fd1498Szrj unsigned int delay_insns = delay_list.length ();
981*38fd1498Szrj unsigned int i = 0;
982*38fd1498Szrj for (; i < delay_insns; i++)
983*38fd1498Szrj if (! (
984*38fd1498Szrj #if ANNUL_IFFALSE_SLOTS
985*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump)
986*38fd1498Szrj && INSN_FROM_TARGET_P (delay_list[i]))
987*38fd1498Szrj ? eligible_for_annul_false (jump, i, delay_list[i], flags) :
988*38fd1498Szrj #endif
989*38fd1498Szrj #if ANNUL_IFTRUE_SLOTS
990*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump)
991*38fd1498Szrj && ! INSN_FROM_TARGET_P (delay_list[i]))
992*38fd1498Szrj ? eligible_for_annul_true (jump, i, delay_list[i], flags) :
993*38fd1498Szrj #endif
994*38fd1498Szrj eligible_for_delay (jump, i, delay_list[i], flags)))
995*38fd1498Szrj break;
996*38fd1498Szrj
997*38fd1498Szrj return i == delay_insns;
998*38fd1498Szrj }
999*38fd1498Szrj
1000*38fd1498Szrj /* DELAY_LIST is a list of insns that have already been placed into delay
1001*38fd1498Szrj slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
1002*38fd1498Szrj If not, return 0; otherwise return 1. */
1003*38fd1498Szrj
1004*38fd1498Szrj static int
check_annul_list_true_false(int annul_true_p,const vec<rtx_insn * > & delay_list)1005*38fd1498Szrj check_annul_list_true_false (int annul_true_p,
1006*38fd1498Szrj const vec<rtx_insn *> &delay_list)
1007*38fd1498Szrj {
1008*38fd1498Szrj rtx_insn *trial;
1009*38fd1498Szrj unsigned int i;
1010*38fd1498Szrj FOR_EACH_VEC_ELT (delay_list, i, trial)
1011*38fd1498Szrj if ((annul_true_p && INSN_FROM_TARGET_P (trial))
1012*38fd1498Szrj || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
1013*38fd1498Szrj return 0;
1014*38fd1498Szrj
1015*38fd1498Szrj return 1;
1016*38fd1498Szrj }
1017*38fd1498Szrj
1018*38fd1498Szrj /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
1019*38fd1498Szrj the condition tested by INSN is CONDITION and the resources shown in
1020*38fd1498Szrj OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
1021*38fd1498Szrj from SEQ's delay list, in addition to whatever insns it may execute
1022*38fd1498Szrj (in DELAY_LIST). SETS and NEEDED are denote resources already set and
1023*38fd1498Szrj needed while searching for delay slot insns. Return the concatenated
1024*38fd1498Szrj delay list if possible, otherwise, return 0.
1025*38fd1498Szrj
1026*38fd1498Szrj SLOTS_TO_FILL is the total number of slots required by INSN, and
1027*38fd1498Szrj PSLOTS_FILLED points to the number filled so far (also the number of
1028*38fd1498Szrj insns in DELAY_LIST). It is updated with the number that have been
1029*38fd1498Szrj filled from the SEQUENCE, if any.
1030*38fd1498Szrj
1031*38fd1498Szrj PANNUL_P points to a nonzero value if we already know that we need
1032*38fd1498Szrj to annul INSN. If this routine determines that annulling is needed,
1033*38fd1498Szrj it may set that value nonzero.
1034*38fd1498Szrj
1035*38fd1498Szrj PNEW_THREAD points to a location that is to receive the place at which
1036*38fd1498Szrj execution should continue. */
1037*38fd1498Szrj
1038*38fd1498Szrj static void
steal_delay_list_from_target(rtx_insn * insn,rtx condition,rtx_sequence * seq,vec<rtx_insn * > * delay_list,struct resources * sets,struct resources * needed,struct resources * other_needed,int slots_to_fill,int * pslots_filled,int * pannul_p,rtx * pnew_thread)1039*38fd1498Szrj steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq,
1040*38fd1498Szrj vec<rtx_insn *> *delay_list,
1041*38fd1498Szrj struct resources *sets,
1042*38fd1498Szrj struct resources *needed,
1043*38fd1498Szrj struct resources *other_needed,
1044*38fd1498Szrj int slots_to_fill, int *pslots_filled,
1045*38fd1498Szrj int *pannul_p, rtx *pnew_thread)
1046*38fd1498Szrj {
1047*38fd1498Szrj int slots_remaining = slots_to_fill - *pslots_filled;
1048*38fd1498Szrj int total_slots_filled = *pslots_filled;
1049*38fd1498Szrj auto_vec<rtx_insn *, 5> new_delay_list;
1050*38fd1498Szrj int must_annul = *pannul_p;
1051*38fd1498Szrj int used_annul = 0;
1052*38fd1498Szrj int i;
1053*38fd1498Szrj struct resources cc_set;
1054*38fd1498Szrj rtx_insn **redundant;
1055*38fd1498Szrj
1056*38fd1498Szrj /* We can't do anything if there are more delay slots in SEQ than we
1057*38fd1498Szrj can handle, or if we don't know that it will be a taken branch.
1058*38fd1498Szrj We know that it will be a taken branch if it is either an unconditional
1059*38fd1498Szrj branch or a conditional branch with a stricter branch condition.
1060*38fd1498Szrj
1061*38fd1498Szrj Also, exit if the branch has more than one set, since then it is computing
1062*38fd1498Szrj other results that can't be ignored, e.g. the HPPA mov&branch instruction.
1063*38fd1498Szrj ??? It may be possible to move other sets into INSN in addition to
1064*38fd1498Szrj moving the instructions in the delay slots.
1065*38fd1498Szrj
1066*38fd1498Szrj We can not steal the delay list if one of the instructions in the
1067*38fd1498Szrj current delay_list modifies the condition codes and the jump in the
1068*38fd1498Szrj sequence is a conditional jump. We can not do this because we can
1069*38fd1498Szrj not change the direction of the jump because the condition codes
1070*38fd1498Szrj will effect the direction of the jump in the sequence. */
1071*38fd1498Szrj
1072*38fd1498Szrj CLEAR_RESOURCE (&cc_set);
1073*38fd1498Szrj
1074*38fd1498Szrj rtx_insn *trial;
1075*38fd1498Szrj FOR_EACH_VEC_ELT (*delay_list, i, trial)
1076*38fd1498Szrj {
1077*38fd1498Szrj mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
1078*38fd1498Szrj if (insn_references_resource_p (seq->insn (0), &cc_set, false))
1079*38fd1498Szrj return;
1080*38fd1498Szrj }
1081*38fd1498Szrj
1082*38fd1498Szrj if (XVECLEN (seq, 0) - 1 > slots_remaining
1083*38fd1498Szrj || ! condition_dominates_p (condition, seq->insn (0))
1084*38fd1498Szrj || ! single_set (seq->insn (0)))
1085*38fd1498Szrj return;
1086*38fd1498Szrj
1087*38fd1498Szrj /* On some targets, branches with delay slots can have a limited
1088*38fd1498Szrj displacement. Give the back end a chance to tell us we can't do
1089*38fd1498Szrj this. */
1090*38fd1498Szrj if (! targetm.can_follow_jump (insn, seq->insn (0)))
1091*38fd1498Szrj return;
1092*38fd1498Szrj
1093*38fd1498Szrj redundant = XALLOCAVEC (rtx_insn *, XVECLEN (seq, 0));
1094*38fd1498Szrj for (i = 1; i < seq->len (); i++)
1095*38fd1498Szrj {
1096*38fd1498Szrj rtx_insn *trial = seq->insn (i);
1097*38fd1498Szrj int flags;
1098*38fd1498Szrj
1099*38fd1498Szrj if (insn_references_resource_p (trial, sets, false)
1100*38fd1498Szrj || insn_sets_resource_p (trial, needed, false)
1101*38fd1498Szrj || insn_sets_resource_p (trial, sets, false)
1102*38fd1498Szrj /* If TRIAL sets CC0, we can't copy it, so we can't steal this
1103*38fd1498Szrj delay list. */
1104*38fd1498Szrj || (HAVE_cc0 && find_reg_note (trial, REG_CC_USER, NULL_RTX))
1105*38fd1498Szrj /* If TRIAL is from the fallthrough code of an annulled branch insn
1106*38fd1498Szrj in SEQ, we cannot use it. */
1107*38fd1498Szrj || (INSN_ANNULLED_BRANCH_P (seq->insn (0))
1108*38fd1498Szrj && ! INSN_FROM_TARGET_P (trial)))
1109*38fd1498Szrj return;
1110*38fd1498Szrj
1111*38fd1498Szrj /* If this insn was already done (usually in a previous delay slot),
1112*38fd1498Szrj pretend we put it in our delay slot. */
1113*38fd1498Szrj redundant[i] = redundant_insn (trial, insn, new_delay_list);
1114*38fd1498Szrj if (redundant[i])
1115*38fd1498Szrj continue;
1116*38fd1498Szrj
1117*38fd1498Szrj /* We will end up re-vectoring this branch, so compute flags
1118*38fd1498Szrj based on jumping to the new label. */
1119*38fd1498Szrj flags = get_jump_flags (insn, JUMP_LABEL (seq->insn (0)));
1120*38fd1498Szrj
1121*38fd1498Szrj if (! must_annul
1122*38fd1498Szrj && ((condition == const_true_rtx
1123*38fd1498Szrj || (! insn_sets_resource_p (trial, other_needed, false)
1124*38fd1498Szrj && ! may_trap_or_fault_p (PATTERN (trial)))))
1125*38fd1498Szrj ? eligible_for_delay (insn, total_slots_filled, trial, flags)
1126*38fd1498Szrj : (must_annul || (delay_list->is_empty () && new_delay_list.is_empty ()))
1127*38fd1498Szrj && (must_annul = 1,
1128*38fd1498Szrj check_annul_list_true_false (0, *delay_list)
1129*38fd1498Szrj && check_annul_list_true_false (0, new_delay_list)
1130*38fd1498Szrj && eligible_for_annul_false (insn, total_slots_filled,
1131*38fd1498Szrj trial, flags)))
1132*38fd1498Szrj {
1133*38fd1498Szrj if (must_annul)
1134*38fd1498Szrj {
1135*38fd1498Szrj /* Frame related instructions cannot go into annulled delay
1136*38fd1498Szrj slots, it messes up the dwarf info. */
1137*38fd1498Szrj if (RTX_FRAME_RELATED_P (trial))
1138*38fd1498Szrj return;
1139*38fd1498Szrj used_annul = 1;
1140*38fd1498Szrj }
1141*38fd1498Szrj rtx_insn *temp = copy_delay_slot_insn (trial);
1142*38fd1498Szrj INSN_FROM_TARGET_P (temp) = 1;
1143*38fd1498Szrj add_to_delay_list (temp, &new_delay_list);
1144*38fd1498Szrj total_slots_filled++;
1145*38fd1498Szrj
1146*38fd1498Szrj if (--slots_remaining == 0)
1147*38fd1498Szrj break;
1148*38fd1498Szrj }
1149*38fd1498Szrj else
1150*38fd1498Szrj return;
1151*38fd1498Szrj }
1152*38fd1498Szrj
1153*38fd1498Szrj /* Record the effect of the instructions that were redundant and which
1154*38fd1498Szrj we therefore decided not to copy. */
1155*38fd1498Szrj for (i = 1; i < seq->len (); i++)
1156*38fd1498Szrj if (redundant[i])
1157*38fd1498Szrj {
1158*38fd1498Szrj fix_reg_dead_note (redundant[i], insn);
1159*38fd1498Szrj update_block (seq->insn (i), insn);
1160*38fd1498Szrj }
1161*38fd1498Szrj
1162*38fd1498Szrj /* Show the place to which we will be branching. */
1163*38fd1498Szrj *pnew_thread = first_active_target_insn (JUMP_LABEL (seq->insn (0)));
1164*38fd1498Szrj
1165*38fd1498Szrj /* Add any new insns to the delay list and update the count of the
1166*38fd1498Szrj number of slots filled. */
1167*38fd1498Szrj *pslots_filled = total_slots_filled;
1168*38fd1498Szrj if (used_annul)
1169*38fd1498Szrj *pannul_p = 1;
1170*38fd1498Szrj
1171*38fd1498Szrj rtx_insn *temp;
1172*38fd1498Szrj FOR_EACH_VEC_ELT (new_delay_list, i, temp)
1173*38fd1498Szrj add_to_delay_list (temp, delay_list);
1174*38fd1498Szrj }
1175*38fd1498Szrj
1176*38fd1498Szrj /* Similar to steal_delay_list_from_target except that SEQ is on the
1177*38fd1498Szrj fallthrough path of INSN. Here we only do something if the delay insn
1178*38fd1498Szrj of SEQ is an unconditional branch. In that case we steal its delay slot
1179*38fd1498Szrj for INSN since unconditional branches are much easier to fill. */
1180*38fd1498Szrj
1181*38fd1498Szrj static void
steal_delay_list_from_fallthrough(rtx_insn * insn,rtx condition,rtx_sequence * seq,vec<rtx_insn * > * delay_list,struct resources * sets,struct resources * needed,struct resources * other_needed,int slots_to_fill,int * pslots_filled,int * pannul_p)1182*38fd1498Szrj steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition,
1183*38fd1498Szrj rtx_sequence *seq,
1184*38fd1498Szrj vec<rtx_insn *> *delay_list,
1185*38fd1498Szrj struct resources *sets,
1186*38fd1498Szrj struct resources *needed,
1187*38fd1498Szrj struct resources *other_needed,
1188*38fd1498Szrj int slots_to_fill, int *pslots_filled,
1189*38fd1498Szrj int *pannul_p)
1190*38fd1498Szrj {
1191*38fd1498Szrj int i;
1192*38fd1498Szrj int flags;
1193*38fd1498Szrj int must_annul = *pannul_p;
1194*38fd1498Szrj int used_annul = 0;
1195*38fd1498Szrj
1196*38fd1498Szrj flags = get_jump_flags (insn, JUMP_LABEL (insn));
1197*38fd1498Szrj
1198*38fd1498Szrj /* We can't do anything if SEQ's delay insn isn't an
1199*38fd1498Szrj unconditional branch. */
1200*38fd1498Szrj
1201*38fd1498Szrj if (! simplejump_or_return_p (seq->insn (0)))
1202*38fd1498Szrj return;
1203*38fd1498Szrj
1204*38fd1498Szrj for (i = 1; i < seq->len (); i++)
1205*38fd1498Szrj {
1206*38fd1498Szrj rtx_insn *trial = seq->insn (i);
1207*38fd1498Szrj rtx_insn *prior_insn;
1208*38fd1498Szrj
1209*38fd1498Szrj /* If TRIAL sets CC0, stealing it will move it too far from the use
1210*38fd1498Szrj of CC0. */
1211*38fd1498Szrj if (insn_references_resource_p (trial, sets, false)
1212*38fd1498Szrj || insn_sets_resource_p (trial, needed, false)
1213*38fd1498Szrj || insn_sets_resource_p (trial, sets, false)
1214*38fd1498Szrj || (HAVE_cc0 && sets_cc0_p (PATTERN (trial))))
1215*38fd1498Szrj
1216*38fd1498Szrj break;
1217*38fd1498Szrj
1218*38fd1498Szrj /* If this insn was already done, we don't need it. */
1219*38fd1498Szrj if ((prior_insn = redundant_insn (trial, insn, *delay_list)))
1220*38fd1498Szrj {
1221*38fd1498Szrj fix_reg_dead_note (prior_insn, insn);
1222*38fd1498Szrj update_block (trial, insn);
1223*38fd1498Szrj delete_from_delay_slot (trial);
1224*38fd1498Szrj continue;
1225*38fd1498Szrj }
1226*38fd1498Szrj
1227*38fd1498Szrj if (! must_annul
1228*38fd1498Szrj && ((condition == const_true_rtx
1229*38fd1498Szrj || (! insn_sets_resource_p (trial, other_needed, false)
1230*38fd1498Szrj && ! may_trap_or_fault_p (PATTERN (trial)))))
1231*38fd1498Szrj ? eligible_for_delay (insn, *pslots_filled, trial, flags)
1232*38fd1498Szrj : (must_annul || delay_list->is_empty ()) && (must_annul = 1,
1233*38fd1498Szrj check_annul_list_true_false (1, *delay_list)
1234*38fd1498Szrj && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
1235*38fd1498Szrj {
1236*38fd1498Szrj if (must_annul)
1237*38fd1498Szrj used_annul = 1;
1238*38fd1498Szrj delete_from_delay_slot (trial);
1239*38fd1498Szrj add_to_delay_list (trial, delay_list);
1240*38fd1498Szrj
1241*38fd1498Szrj if (++(*pslots_filled) == slots_to_fill)
1242*38fd1498Szrj break;
1243*38fd1498Szrj }
1244*38fd1498Szrj else
1245*38fd1498Szrj break;
1246*38fd1498Szrj }
1247*38fd1498Szrj
1248*38fd1498Szrj if (used_annul)
1249*38fd1498Szrj *pannul_p = 1;
1250*38fd1498Szrj }
1251*38fd1498Szrj
1252*38fd1498Szrj /* Try merging insns starting at THREAD which match exactly the insns in
1253*38fd1498Szrj INSN's delay list.
1254*38fd1498Szrj
1255*38fd1498Szrj If all insns were matched and the insn was previously annulling, the
1256*38fd1498Szrj annul bit will be cleared.
1257*38fd1498Szrj
1258*38fd1498Szrj For each insn that is merged, if the branch is or will be non-annulling,
1259*38fd1498Szrj we delete the merged insn. */
1260*38fd1498Szrj
1261*38fd1498Szrj static void
try_merge_delay_insns(rtx_insn * insn,rtx_insn * thread)1262*38fd1498Szrj try_merge_delay_insns (rtx_insn *insn, rtx_insn *thread)
1263*38fd1498Szrj {
1264*38fd1498Szrj rtx_insn *trial, *next_trial;
1265*38fd1498Szrj rtx_insn *delay_insn = as_a <rtx_insn *> (XVECEXP (PATTERN (insn), 0, 0));
1266*38fd1498Szrj int annul_p = JUMP_P (delay_insn) && INSN_ANNULLED_BRANCH_P (delay_insn);
1267*38fd1498Szrj int slot_number = 1;
1268*38fd1498Szrj int num_slots = XVECLEN (PATTERN (insn), 0);
1269*38fd1498Szrj rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1270*38fd1498Szrj struct resources set, needed, modified;
1271*38fd1498Szrj auto_vec<std::pair<rtx_insn *, bool>, 10> merged_insns;
1272*38fd1498Szrj int flags;
1273*38fd1498Szrj
1274*38fd1498Szrj flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
1275*38fd1498Szrj
1276*38fd1498Szrj CLEAR_RESOURCE (&needed);
1277*38fd1498Szrj CLEAR_RESOURCE (&set);
1278*38fd1498Szrj
1279*38fd1498Szrj /* If this is not an annulling branch, take into account anything needed in
1280*38fd1498Szrj INSN's delay slot. This prevents two increments from being incorrectly
1281*38fd1498Szrj folded into one. If we are annulling, this would be the correct
1282*38fd1498Szrj thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
1283*38fd1498Szrj will essentially disable this optimization. This method is somewhat of
1284*38fd1498Szrj a kludge, but I don't see a better way.) */
1285*38fd1498Szrj if (! annul_p)
1286*38fd1498Szrj for (int i = 1; i < num_slots; i++)
1287*38fd1498Szrj if (XVECEXP (PATTERN (insn), 0, i))
1288*38fd1498Szrj mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed,
1289*38fd1498Szrj true);
1290*38fd1498Szrj
1291*38fd1498Szrj for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
1292*38fd1498Szrj {
1293*38fd1498Szrj rtx pat = PATTERN (trial);
1294*38fd1498Szrj rtx oldtrial = trial;
1295*38fd1498Szrj
1296*38fd1498Szrj next_trial = next_nonnote_insn (trial);
1297*38fd1498Szrj
1298*38fd1498Szrj /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
1299*38fd1498Szrj if (NONJUMP_INSN_P (trial)
1300*38fd1498Szrj && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
1301*38fd1498Szrj continue;
1302*38fd1498Szrj
1303*38fd1498Szrj if (GET_CODE (next_to_match) == GET_CODE (trial)
1304*38fd1498Szrj /* We can't share an insn that sets cc0. */
1305*38fd1498Szrj && (!HAVE_cc0 || ! sets_cc0_p (pat))
1306*38fd1498Szrj && ! insn_references_resource_p (trial, &set, true)
1307*38fd1498Szrj && ! insn_sets_resource_p (trial, &set, true)
1308*38fd1498Szrj && ! insn_sets_resource_p (trial, &needed, true)
1309*38fd1498Szrj && (trial = try_split (pat, trial, 0)) != 0
1310*38fd1498Szrj /* Update next_trial, in case try_split succeeded. */
1311*38fd1498Szrj && (next_trial = next_nonnote_insn (trial))
1312*38fd1498Szrj /* Likewise THREAD. */
1313*38fd1498Szrj && (thread = oldtrial == thread ? trial : thread)
1314*38fd1498Szrj && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
1315*38fd1498Szrj /* Have to test this condition if annul condition is different
1316*38fd1498Szrj from (and less restrictive than) non-annulling one. */
1317*38fd1498Szrj && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
1318*38fd1498Szrj {
1319*38fd1498Szrj
1320*38fd1498Szrj if (! annul_p)
1321*38fd1498Szrj {
1322*38fd1498Szrj update_block (trial, thread);
1323*38fd1498Szrj if (trial == thread)
1324*38fd1498Szrj thread = next_active_insn (thread);
1325*38fd1498Szrj
1326*38fd1498Szrj delete_related_insns (trial);
1327*38fd1498Szrj INSN_FROM_TARGET_P (next_to_match) = 0;
1328*38fd1498Szrj }
1329*38fd1498Szrj else
1330*38fd1498Szrj merged_insns.safe_push (std::pair<rtx_insn *, bool> (trial, false));
1331*38fd1498Szrj
1332*38fd1498Szrj if (++slot_number == num_slots)
1333*38fd1498Szrj break;
1334*38fd1498Szrj
1335*38fd1498Szrj next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1336*38fd1498Szrj }
1337*38fd1498Szrj
1338*38fd1498Szrj mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
1339*38fd1498Szrj mark_referenced_resources (trial, &needed, true);
1340*38fd1498Szrj }
1341*38fd1498Szrj
1342*38fd1498Szrj /* See if we stopped on a filled insn. If we did, try to see if its
1343*38fd1498Szrj delay slots match. */
1344*38fd1498Szrj if (slot_number != num_slots
1345*38fd1498Szrj && trial && NONJUMP_INSN_P (trial)
1346*38fd1498Szrj && GET_CODE (PATTERN (trial)) == SEQUENCE
1347*38fd1498Szrj && !(JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
1348*38fd1498Szrj && INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))))
1349*38fd1498Szrj {
1350*38fd1498Szrj rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (trial));
1351*38fd1498Szrj rtx filled_insn = XVECEXP (pat, 0, 0);
1352*38fd1498Szrj
1353*38fd1498Szrj /* Account for resources set/needed by the filled insn. */
1354*38fd1498Szrj mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
1355*38fd1498Szrj mark_referenced_resources (filled_insn, &needed, true);
1356*38fd1498Szrj
1357*38fd1498Szrj for (int i = 1; i < pat->len (); i++)
1358*38fd1498Szrj {
1359*38fd1498Szrj rtx_insn *dtrial = pat->insn (i);
1360*38fd1498Szrj
1361*38fd1498Szrj CLEAR_RESOURCE (&modified);
1362*38fd1498Szrj /* Account for resources set by the insn following NEXT_TO_MATCH
1363*38fd1498Szrj inside INSN's delay list. */
1364*38fd1498Szrj for (int j = 1; slot_number + j < num_slots; j++)
1365*38fd1498Szrj mark_set_resources (XVECEXP (PATTERN (insn), 0, slot_number + j),
1366*38fd1498Szrj &modified, 0, MARK_SRC_DEST_CALL);
1367*38fd1498Szrj /* Account for resources set by the insn before DTRIAL and inside
1368*38fd1498Szrj TRIAL's delay list. */
1369*38fd1498Szrj for (int j = 1; j < i; j++)
1370*38fd1498Szrj mark_set_resources (XVECEXP (pat, 0, j),
1371*38fd1498Szrj &modified, 0, MARK_SRC_DEST_CALL);
1372*38fd1498Szrj if (! insn_references_resource_p (dtrial, &set, true)
1373*38fd1498Szrj && ! insn_sets_resource_p (dtrial, &set, true)
1374*38fd1498Szrj && ! insn_sets_resource_p (dtrial, &needed, true)
1375*38fd1498Szrj && (!HAVE_cc0 || ! sets_cc0_p (PATTERN (dtrial)))
1376*38fd1498Szrj && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
1377*38fd1498Szrj /* Check that DTRIAL and NEXT_TO_MATCH does not reference a
1378*38fd1498Szrj resource modified between them (only dtrial is checked because
1379*38fd1498Szrj next_to_match and dtrial shall to be equal in order to hit
1380*38fd1498Szrj this line) */
1381*38fd1498Szrj && ! insn_references_resource_p (dtrial, &modified, true)
1382*38fd1498Szrj && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
1383*38fd1498Szrj {
1384*38fd1498Szrj if (! annul_p)
1385*38fd1498Szrj {
1386*38fd1498Szrj rtx_insn *new_rtx;
1387*38fd1498Szrj
1388*38fd1498Szrj update_block (dtrial, thread);
1389*38fd1498Szrj new_rtx = delete_from_delay_slot (dtrial);
1390*38fd1498Szrj if (thread->deleted ())
1391*38fd1498Szrj thread = new_rtx;
1392*38fd1498Szrj INSN_FROM_TARGET_P (next_to_match) = 0;
1393*38fd1498Szrj }
1394*38fd1498Szrj else
1395*38fd1498Szrj merged_insns.safe_push (std::pair<rtx_insn *, bool> (dtrial,
1396*38fd1498Szrj true));
1397*38fd1498Szrj
1398*38fd1498Szrj if (++slot_number == num_slots)
1399*38fd1498Szrj break;
1400*38fd1498Szrj
1401*38fd1498Szrj next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
1402*38fd1498Szrj }
1403*38fd1498Szrj else
1404*38fd1498Szrj {
1405*38fd1498Szrj /* Keep track of the set/referenced resources for the delay
1406*38fd1498Szrj slots of any trial insns we encounter. */
1407*38fd1498Szrj mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL);
1408*38fd1498Szrj mark_referenced_resources (dtrial, &needed, true);
1409*38fd1498Szrj }
1410*38fd1498Szrj }
1411*38fd1498Szrj }
1412*38fd1498Szrj
1413*38fd1498Szrj /* If all insns in the delay slot have been matched and we were previously
1414*38fd1498Szrj annulling the branch, we need not any more. In that case delete all the
1415*38fd1498Szrj merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
1416*38fd1498Szrj the delay list so that we know that it isn't only being used at the
1417*38fd1498Szrj target. */
1418*38fd1498Szrj if (slot_number == num_slots && annul_p)
1419*38fd1498Szrj {
1420*38fd1498Szrj unsigned int len = merged_insns.length ();
1421*38fd1498Szrj for (unsigned int i = len - 1; i < len; i--)
1422*38fd1498Szrj if (merged_insns[i].second)
1423*38fd1498Szrj {
1424*38fd1498Szrj update_block (merged_insns[i].first, thread);
1425*38fd1498Szrj rtx_insn *new_rtx = delete_from_delay_slot (merged_insns[i].first);
1426*38fd1498Szrj if (thread->deleted ())
1427*38fd1498Szrj thread = new_rtx;
1428*38fd1498Szrj }
1429*38fd1498Szrj else
1430*38fd1498Szrj {
1431*38fd1498Szrj update_block (merged_insns[i].first, thread);
1432*38fd1498Szrj delete_related_insns (merged_insns[i].first);
1433*38fd1498Szrj }
1434*38fd1498Szrj
1435*38fd1498Szrj INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
1436*38fd1498Szrj
1437*38fd1498Szrj for (int i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1438*38fd1498Szrj INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
1439*38fd1498Szrj }
1440*38fd1498Szrj }
1441*38fd1498Szrj
1442*38fd1498Szrj /* See if INSN is redundant with an insn in front of TARGET. Often this
1443*38fd1498Szrj is called when INSN is a candidate for a delay slot of TARGET.
1444*38fd1498Szrj DELAY_LIST are insns that will be placed in delay slots of TARGET in front
1445*38fd1498Szrj of INSN. Often INSN will be redundant with an insn in a delay slot of
1446*38fd1498Szrj some previous insn. This happens when we have a series of branches to the
1447*38fd1498Szrj same label; in that case the first insn at the target might want to go
1448*38fd1498Szrj into each of the delay slots.
1449*38fd1498Szrj
1450*38fd1498Szrj If we are not careful, this routine can take up a significant fraction
1451*38fd1498Szrj of the total compilation time (4%), but only wins rarely. Hence we
1452*38fd1498Szrj speed this routine up by making two passes. The first pass goes back
1453*38fd1498Szrj until it hits a label and sees if it finds an insn with an identical
1454*38fd1498Szrj pattern. Only in this (relatively rare) event does it check for
1455*38fd1498Szrj data conflicts.
1456*38fd1498Szrj
1457*38fd1498Szrj We do not split insns we encounter. This could cause us not to find a
1458*38fd1498Szrj redundant insn, but the cost of splitting seems greater than the possible
1459*38fd1498Szrj gain in rare cases. */
1460*38fd1498Szrj
1461*38fd1498Szrj static rtx_insn *
redundant_insn(rtx insn,rtx_insn * target,const vec<rtx_insn * > & delay_list)1462*38fd1498Szrj redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
1463*38fd1498Szrj {
1464*38fd1498Szrj rtx target_main = target;
1465*38fd1498Szrj rtx ipat = PATTERN (insn);
1466*38fd1498Szrj rtx_insn *trial;
1467*38fd1498Szrj rtx pat;
1468*38fd1498Szrj struct resources needed, set;
1469*38fd1498Szrj int i;
1470*38fd1498Szrj unsigned insns_to_search;
1471*38fd1498Szrj
1472*38fd1498Szrj /* If INSN has any REG_UNUSED notes, it can't match anything since we
1473*38fd1498Szrj are allowed to not actually assign to such a register. */
1474*38fd1498Szrj if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
1475*38fd1498Szrj return 0;
1476*38fd1498Szrj
1477*38fd1498Szrj /* Scan backwards looking for a match. */
1478*38fd1498Szrj for (trial = PREV_INSN (target),
1479*38fd1498Szrj insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1480*38fd1498Szrj trial && insns_to_search > 0;
1481*38fd1498Szrj trial = PREV_INSN (trial))
1482*38fd1498Szrj {
1483*38fd1498Szrj /* (use (insn))s can come immediately after a barrier if the
1484*38fd1498Szrj label that used to precede them has been deleted as dead.
1485*38fd1498Szrj See delete_related_insns. */
1486*38fd1498Szrj if (LABEL_P (trial) || BARRIER_P (trial))
1487*38fd1498Szrj return 0;
1488*38fd1498Szrj
1489*38fd1498Szrj if (!INSN_P (trial))
1490*38fd1498Szrj continue;
1491*38fd1498Szrj --insns_to_search;
1492*38fd1498Szrj
1493*38fd1498Szrj pat = PATTERN (trial);
1494*38fd1498Szrj if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1495*38fd1498Szrj continue;
1496*38fd1498Szrj
1497*38fd1498Szrj if (GET_CODE (trial) == DEBUG_INSN)
1498*38fd1498Szrj continue;
1499*38fd1498Szrj
1500*38fd1498Szrj if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
1501*38fd1498Szrj {
1502*38fd1498Szrj /* Stop for a CALL and its delay slots because it is difficult to
1503*38fd1498Szrj track its resource needs correctly. */
1504*38fd1498Szrj if (CALL_P (seq->element (0)))
1505*38fd1498Szrj return 0;
1506*38fd1498Szrj
1507*38fd1498Szrj /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
1508*38fd1498Szrj slots because it is difficult to track its resource needs
1509*38fd1498Szrj correctly. */
1510*38fd1498Szrj
1511*38fd1498Szrj if (INSN_SETS_ARE_DELAYED (seq->insn (0)))
1512*38fd1498Szrj return 0;
1513*38fd1498Szrj
1514*38fd1498Szrj if (INSN_REFERENCES_ARE_DELAYED (seq->insn (0)))
1515*38fd1498Szrj return 0;
1516*38fd1498Szrj
1517*38fd1498Szrj /* See if any of the insns in the delay slot match, updating
1518*38fd1498Szrj resource requirements as we go. */
1519*38fd1498Szrj for (i = seq->len () - 1; i > 0; i--)
1520*38fd1498Szrj if (GET_CODE (seq->element (i)) == GET_CODE (insn)
1521*38fd1498Szrj && rtx_equal_p (PATTERN (seq->element (i)), ipat)
1522*38fd1498Szrj && ! find_reg_note (seq->element (i), REG_UNUSED, NULL_RTX))
1523*38fd1498Szrj break;
1524*38fd1498Szrj
1525*38fd1498Szrj /* If found a match, exit this loop early. */
1526*38fd1498Szrj if (i > 0)
1527*38fd1498Szrj break;
1528*38fd1498Szrj }
1529*38fd1498Szrj
1530*38fd1498Szrj else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
1531*38fd1498Szrj && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
1532*38fd1498Szrj break;
1533*38fd1498Szrj }
1534*38fd1498Szrj
1535*38fd1498Szrj /* If we didn't find an insn that matches, return 0. */
1536*38fd1498Szrj if (trial == 0)
1537*38fd1498Szrj return 0;
1538*38fd1498Szrj
1539*38fd1498Szrj /* See what resources this insn sets and needs. If they overlap, or
1540*38fd1498Szrj if this insn references CC0, it can't be redundant. */
1541*38fd1498Szrj
1542*38fd1498Szrj CLEAR_RESOURCE (&needed);
1543*38fd1498Szrj CLEAR_RESOURCE (&set);
1544*38fd1498Szrj mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
1545*38fd1498Szrj mark_referenced_resources (insn, &needed, true);
1546*38fd1498Szrj
1547*38fd1498Szrj /* If TARGET is a SEQUENCE, get the main insn. */
1548*38fd1498Szrj if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1549*38fd1498Szrj target_main = XVECEXP (PATTERN (target), 0, 0);
1550*38fd1498Szrj
1551*38fd1498Szrj if (resource_conflicts_p (&needed, &set)
1552*38fd1498Szrj || (HAVE_cc0 && reg_mentioned_p (cc0_rtx, ipat))
1553*38fd1498Szrj /* The insn requiring the delay may not set anything needed or set by
1554*38fd1498Szrj INSN. */
1555*38fd1498Szrj || insn_sets_resource_p (target_main, &needed, true)
1556*38fd1498Szrj || insn_sets_resource_p (target_main, &set, true))
1557*38fd1498Szrj return 0;
1558*38fd1498Szrj
1559*38fd1498Szrj /* Insns we pass may not set either NEEDED or SET, so merge them for
1560*38fd1498Szrj simpler tests. */
1561*38fd1498Szrj needed.memory |= set.memory;
1562*38fd1498Szrj IOR_HARD_REG_SET (needed.regs, set.regs);
1563*38fd1498Szrj
1564*38fd1498Szrj /* This insn isn't redundant if it conflicts with an insn that either is
1565*38fd1498Szrj or will be in a delay slot of TARGET. */
1566*38fd1498Szrj
1567*38fd1498Szrj unsigned int j;
1568*38fd1498Szrj rtx_insn *temp;
1569*38fd1498Szrj FOR_EACH_VEC_ELT (delay_list, j, temp)
1570*38fd1498Szrj if (insn_sets_resource_p (temp, &needed, true))
1571*38fd1498Szrj return 0;
1572*38fd1498Szrj
1573*38fd1498Szrj if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
1574*38fd1498Szrj for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
1575*38fd1498Szrj if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed,
1576*38fd1498Szrj true))
1577*38fd1498Szrj return 0;
1578*38fd1498Szrj
1579*38fd1498Szrj /* Scan backwards until we reach a label or an insn that uses something
1580*38fd1498Szrj INSN sets or sets something insn uses or sets. */
1581*38fd1498Szrj
1582*38fd1498Szrj for (trial = PREV_INSN (target),
1583*38fd1498Szrj insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
1584*38fd1498Szrj trial && !LABEL_P (trial) && insns_to_search > 0;
1585*38fd1498Szrj trial = PREV_INSN (trial))
1586*38fd1498Szrj {
1587*38fd1498Szrj if (!INSN_P (trial))
1588*38fd1498Szrj continue;
1589*38fd1498Szrj --insns_to_search;
1590*38fd1498Szrj
1591*38fd1498Szrj pat = PATTERN (trial);
1592*38fd1498Szrj if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
1593*38fd1498Szrj continue;
1594*38fd1498Szrj
1595*38fd1498Szrj if (GET_CODE (trial) == DEBUG_INSN)
1596*38fd1498Szrj continue;
1597*38fd1498Szrj
1598*38fd1498Szrj if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
1599*38fd1498Szrj {
1600*38fd1498Szrj bool annul_p = false;
1601*38fd1498Szrj rtx_insn *control = seq->insn (0);
1602*38fd1498Szrj
1603*38fd1498Szrj /* If this is a CALL_INSN and its delay slots, it is hard to track
1604*38fd1498Szrj the resource needs properly, so give up. */
1605*38fd1498Szrj if (CALL_P (control))
1606*38fd1498Szrj return 0;
1607*38fd1498Szrj
1608*38fd1498Szrj /* If this is an INSN or JUMP_INSN with delayed effects, it
1609*38fd1498Szrj is hard to track the resource needs properly, so give up. */
1610*38fd1498Szrj
1611*38fd1498Szrj if (INSN_SETS_ARE_DELAYED (control))
1612*38fd1498Szrj return 0;
1613*38fd1498Szrj
1614*38fd1498Szrj if (INSN_REFERENCES_ARE_DELAYED (control))
1615*38fd1498Szrj return 0;
1616*38fd1498Szrj
1617*38fd1498Szrj if (JUMP_P (control))
1618*38fd1498Szrj annul_p = INSN_ANNULLED_BRANCH_P (control);
1619*38fd1498Szrj
1620*38fd1498Szrj /* See if any of the insns in the delay slot match, updating
1621*38fd1498Szrj resource requirements as we go. */
1622*38fd1498Szrj for (i = seq->len () - 1; i > 0; i--)
1623*38fd1498Szrj {
1624*38fd1498Szrj rtx_insn *candidate = seq->insn (i);
1625*38fd1498Szrj
1626*38fd1498Szrj /* If an insn will be annulled if the branch is false, it isn't
1627*38fd1498Szrj considered as a possible duplicate insn. */
1628*38fd1498Szrj if (rtx_equal_p (PATTERN (candidate), ipat)
1629*38fd1498Szrj && ! (annul_p && INSN_FROM_TARGET_P (candidate)))
1630*38fd1498Szrj {
1631*38fd1498Szrj /* Show that this insn will be used in the sequel. */
1632*38fd1498Szrj INSN_FROM_TARGET_P (candidate) = 0;
1633*38fd1498Szrj return candidate;
1634*38fd1498Szrj }
1635*38fd1498Szrj
1636*38fd1498Szrj /* Unless this is an annulled insn from the target of a branch,
1637*38fd1498Szrj we must stop if it sets anything needed or set by INSN. */
1638*38fd1498Szrj if ((!annul_p || !INSN_FROM_TARGET_P (candidate))
1639*38fd1498Szrj && insn_sets_resource_p (candidate, &needed, true))
1640*38fd1498Szrj return 0;
1641*38fd1498Szrj }
1642*38fd1498Szrj
1643*38fd1498Szrj /* If the insn requiring the delay slot conflicts with INSN, we
1644*38fd1498Szrj must stop. */
1645*38fd1498Szrj if (insn_sets_resource_p (control, &needed, true))
1646*38fd1498Szrj return 0;
1647*38fd1498Szrj }
1648*38fd1498Szrj else
1649*38fd1498Szrj {
1650*38fd1498Szrj /* See if TRIAL is the same as INSN. */
1651*38fd1498Szrj pat = PATTERN (trial);
1652*38fd1498Szrj if (rtx_equal_p (pat, ipat))
1653*38fd1498Szrj return trial;
1654*38fd1498Szrj
1655*38fd1498Szrj /* Can't go any further if TRIAL conflicts with INSN. */
1656*38fd1498Szrj if (insn_sets_resource_p (trial, &needed, true))
1657*38fd1498Szrj return 0;
1658*38fd1498Szrj }
1659*38fd1498Szrj }
1660*38fd1498Szrj
1661*38fd1498Szrj return 0;
1662*38fd1498Szrj }
1663*38fd1498Szrj
1664*38fd1498Szrj /* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero,
1665*38fd1498Szrj it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
1666*38fd1498Szrj is nonzero, we are allowed to fall into this thread; otherwise, we are
1667*38fd1498Szrj not.
1668*38fd1498Szrj
1669*38fd1498Szrj If LABEL is used more than one or we pass a label other than LABEL before
1670*38fd1498Szrj finding an active insn, we do not own this thread. */
1671*38fd1498Szrj
1672*38fd1498Szrj static int
own_thread_p(rtx thread,rtx label,int allow_fallthrough)1673*38fd1498Szrj own_thread_p (rtx thread, rtx label, int allow_fallthrough)
1674*38fd1498Szrj {
1675*38fd1498Szrj rtx_insn *active_insn;
1676*38fd1498Szrj rtx_insn *insn;
1677*38fd1498Szrj
1678*38fd1498Szrj /* We don't own the function end. */
1679*38fd1498Szrj if (thread == 0 || ANY_RETURN_P (thread))
1680*38fd1498Szrj return 0;
1681*38fd1498Szrj
1682*38fd1498Szrj /* We have a non-NULL insn. */
1683*38fd1498Szrj rtx_insn *thread_insn = as_a <rtx_insn *> (thread);
1684*38fd1498Szrj
1685*38fd1498Szrj /* Get the first active insn, or THREAD_INSN, if it is an active insn. */
1686*38fd1498Szrj active_insn = next_active_insn (PREV_INSN (thread_insn));
1687*38fd1498Szrj
1688*38fd1498Szrj for (insn = thread_insn; insn != active_insn; insn = NEXT_INSN (insn))
1689*38fd1498Szrj if (LABEL_P (insn)
1690*38fd1498Szrj && (insn != label || LABEL_NUSES (insn) != 1))
1691*38fd1498Szrj return 0;
1692*38fd1498Szrj
1693*38fd1498Szrj if (allow_fallthrough)
1694*38fd1498Szrj return 1;
1695*38fd1498Szrj
1696*38fd1498Szrj /* Ensure that we reach a BARRIER before any insn or label. */
1697*38fd1498Szrj for (insn = prev_nonnote_insn (thread_insn);
1698*38fd1498Szrj insn == 0 || !BARRIER_P (insn);
1699*38fd1498Szrj insn = prev_nonnote_insn (insn))
1700*38fd1498Szrj if (insn == 0
1701*38fd1498Szrj || LABEL_P (insn)
1702*38fd1498Szrj || (NONJUMP_INSN_P (insn)
1703*38fd1498Szrj && GET_CODE (PATTERN (insn)) != USE
1704*38fd1498Szrj && GET_CODE (PATTERN (insn)) != CLOBBER))
1705*38fd1498Szrj return 0;
1706*38fd1498Szrj
1707*38fd1498Szrj return 1;
1708*38fd1498Szrj }
1709*38fd1498Szrj
1710*38fd1498Szrj /* Called when INSN is being moved from a location near the target of a jump.
1711*38fd1498Szrj We leave a marker of the form (use (INSN)) immediately in front of WHERE
1712*38fd1498Szrj for mark_target_live_regs. These markers will be deleted at the end.
1713*38fd1498Szrj
1714*38fd1498Szrj We used to try to update the live status of registers if WHERE is at
1715*38fd1498Szrj the start of a basic block, but that can't work since we may remove a
1716*38fd1498Szrj BARRIER in relax_delay_slots. */
1717*38fd1498Szrj
1718*38fd1498Szrj static void
update_block(rtx_insn * insn,rtx_insn * where)1719*38fd1498Szrj update_block (rtx_insn *insn, rtx_insn *where)
1720*38fd1498Szrj {
1721*38fd1498Szrj emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
1722*38fd1498Szrj
1723*38fd1498Szrj /* INSN might be making a value live in a block where it didn't use to
1724*38fd1498Szrj be. So recompute liveness information for this block. */
1725*38fd1498Szrj incr_ticks_for_insn (insn);
1726*38fd1498Szrj }
1727*38fd1498Szrj
1728*38fd1498Szrj /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
1729*38fd1498Szrj the basic block containing the jump. */
1730*38fd1498Szrj
1731*38fd1498Szrj static int
reorg_redirect_jump(rtx_jump_insn * jump,rtx nlabel)1732*38fd1498Szrj reorg_redirect_jump (rtx_jump_insn *jump, rtx nlabel)
1733*38fd1498Szrj {
1734*38fd1498Szrj incr_ticks_for_insn (jump);
1735*38fd1498Szrj return redirect_jump (jump, nlabel, 1);
1736*38fd1498Szrj }
1737*38fd1498Szrj
1738*38fd1498Szrj /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
1739*38fd1498Szrj We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
1740*38fd1498Szrj that reference values used in INSN. If we find one, then we move the
1741*38fd1498Szrj REG_DEAD note to INSN.
1742*38fd1498Szrj
1743*38fd1498Szrj This is needed to handle the case where a later insn (after INSN) has a
1744*38fd1498Szrj REG_DEAD note for a register used by INSN, and this later insn subsequently
1745*38fd1498Szrj gets moved before a CODE_LABEL because it is a redundant insn. In this
1746*38fd1498Szrj case, mark_target_live_regs may be confused into thinking the register
1747*38fd1498Szrj is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
1748*38fd1498Szrj
1749*38fd1498Szrj static void
update_reg_dead_notes(rtx_insn * insn,rtx_insn * delayed_insn)1750*38fd1498Szrj update_reg_dead_notes (rtx_insn *insn, rtx_insn *delayed_insn)
1751*38fd1498Szrj {
1752*38fd1498Szrj rtx link, next;
1753*38fd1498Szrj rtx_insn *p;
1754*38fd1498Szrj
1755*38fd1498Szrj for (p = next_nonnote_insn (insn); p != delayed_insn;
1756*38fd1498Szrj p = next_nonnote_insn (p))
1757*38fd1498Szrj for (link = REG_NOTES (p); link; link = next)
1758*38fd1498Szrj {
1759*38fd1498Szrj next = XEXP (link, 1);
1760*38fd1498Szrj
1761*38fd1498Szrj if (REG_NOTE_KIND (link) != REG_DEAD
1762*38fd1498Szrj || !REG_P (XEXP (link, 0)))
1763*38fd1498Szrj continue;
1764*38fd1498Szrj
1765*38fd1498Szrj if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
1766*38fd1498Szrj {
1767*38fd1498Szrj /* Move the REG_DEAD note from P to INSN. */
1768*38fd1498Szrj remove_note (p, link);
1769*38fd1498Szrj XEXP (link, 1) = REG_NOTES (insn);
1770*38fd1498Szrj REG_NOTES (insn) = link;
1771*38fd1498Szrj }
1772*38fd1498Szrj }
1773*38fd1498Szrj }
1774*38fd1498Szrj
1775*38fd1498Szrj /* Called when an insn redundant with start_insn is deleted. If there
1776*38fd1498Szrj is a REG_DEAD note for the target of start_insn between start_insn
1777*38fd1498Szrj and stop_insn, then the REG_DEAD note needs to be deleted since the
1778*38fd1498Szrj value no longer dies there.
1779*38fd1498Szrj
1780*38fd1498Szrj If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
1781*38fd1498Szrj confused into thinking the register is dead. */
1782*38fd1498Szrj
1783*38fd1498Szrj static void
fix_reg_dead_note(rtx_insn * start_insn,rtx stop_insn)1784*38fd1498Szrj fix_reg_dead_note (rtx_insn *start_insn, rtx stop_insn)
1785*38fd1498Szrj {
1786*38fd1498Szrj rtx link, next;
1787*38fd1498Szrj rtx_insn *p;
1788*38fd1498Szrj
1789*38fd1498Szrj for (p = next_nonnote_insn (start_insn); p != stop_insn;
1790*38fd1498Szrj p = next_nonnote_insn (p))
1791*38fd1498Szrj for (link = REG_NOTES (p); link; link = next)
1792*38fd1498Szrj {
1793*38fd1498Szrj next = XEXP (link, 1);
1794*38fd1498Szrj
1795*38fd1498Szrj if (REG_NOTE_KIND (link) != REG_DEAD
1796*38fd1498Szrj || !REG_P (XEXP (link, 0)))
1797*38fd1498Szrj continue;
1798*38fd1498Szrj
1799*38fd1498Szrj if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
1800*38fd1498Szrj {
1801*38fd1498Szrj remove_note (p, link);
1802*38fd1498Szrj return;
1803*38fd1498Szrj }
1804*38fd1498Szrj }
1805*38fd1498Szrj }
1806*38fd1498Szrj
1807*38fd1498Szrj /* Delete any REG_UNUSED notes that exist on INSN but not on OTHER_INSN.
1808*38fd1498Szrj
1809*38fd1498Szrj This handles the case of udivmodXi4 instructions which optimize their
1810*38fd1498Szrj output depending on whether any REG_UNUSED notes are present. We must
1811*38fd1498Szrj make sure that INSN calculates as many results as OTHER_INSN does. */
1812*38fd1498Szrj
1813*38fd1498Szrj static void
update_reg_unused_notes(rtx_insn * insn,rtx other_insn)1814*38fd1498Szrj update_reg_unused_notes (rtx_insn *insn, rtx other_insn)
1815*38fd1498Szrj {
1816*38fd1498Szrj rtx link, next;
1817*38fd1498Szrj
1818*38fd1498Szrj for (link = REG_NOTES (insn); link; link = next)
1819*38fd1498Szrj {
1820*38fd1498Szrj next = XEXP (link, 1);
1821*38fd1498Szrj
1822*38fd1498Szrj if (REG_NOTE_KIND (link) != REG_UNUSED
1823*38fd1498Szrj || !REG_P (XEXP (link, 0)))
1824*38fd1498Szrj continue;
1825*38fd1498Szrj
1826*38fd1498Szrj if (!find_regno_note (other_insn, REG_UNUSED, REGNO (XEXP (link, 0))))
1827*38fd1498Szrj remove_note (insn, link);
1828*38fd1498Szrj }
1829*38fd1498Szrj }
1830*38fd1498Szrj
1831*38fd1498Szrj static vec <rtx> sibling_labels;
1832*38fd1498Szrj
1833*38fd1498Szrj /* Return the label before INSN, or put a new label there. If SIBLING is
1834*38fd1498Szrj non-zero, it is another label associated with the new label (if any),
1835*38fd1498Szrj typically the former target of the jump that will be redirected to
1836*38fd1498Szrj the new label. */
1837*38fd1498Szrj
1838*38fd1498Szrj static rtx_insn *
get_label_before(rtx_insn * insn,rtx sibling)1839*38fd1498Szrj get_label_before (rtx_insn *insn, rtx sibling)
1840*38fd1498Szrj {
1841*38fd1498Szrj rtx_insn *label;
1842*38fd1498Szrj
1843*38fd1498Szrj /* Find an existing label at this point
1844*38fd1498Szrj or make a new one if there is none. */
1845*38fd1498Szrj label = prev_nonnote_insn (insn);
1846*38fd1498Szrj
1847*38fd1498Szrj if (label == 0 || !LABEL_P (label))
1848*38fd1498Szrj {
1849*38fd1498Szrj rtx_insn *prev = PREV_INSN (insn);
1850*38fd1498Szrj
1851*38fd1498Szrj label = gen_label_rtx ();
1852*38fd1498Szrj emit_label_after (label, prev);
1853*38fd1498Szrj LABEL_NUSES (label) = 0;
1854*38fd1498Szrj if (sibling)
1855*38fd1498Szrj {
1856*38fd1498Szrj sibling_labels.safe_push (label);
1857*38fd1498Szrj sibling_labels.safe_push (sibling);
1858*38fd1498Szrj }
1859*38fd1498Szrj }
1860*38fd1498Szrj return label;
1861*38fd1498Szrj }
1862*38fd1498Szrj
1863*38fd1498Szrj /* Scan a function looking for insns that need a delay slot and find insns to
1864*38fd1498Szrj put into the delay slot.
1865*38fd1498Szrj
1866*38fd1498Szrj NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such
1867*38fd1498Szrj as calls). We do these first since we don't want jump insns (that are
1868*38fd1498Szrj easier to fill) to get the only insns that could be used for non-jump insns.
1869*38fd1498Szrj When it is zero, only try to fill JUMP_INSNs.
1870*38fd1498Szrj
1871*38fd1498Szrj When slots are filled in this manner, the insns (including the
1872*38fd1498Szrj delay_insn) are put together in a SEQUENCE rtx. In this fashion,
1873*38fd1498Szrj it is possible to tell whether a delay slot has really been filled
1874*38fd1498Szrj or not. `final' knows how to deal with this, by communicating
1875*38fd1498Szrj through FINAL_SEQUENCE. */
1876*38fd1498Szrj
1877*38fd1498Szrj static void
fill_simple_delay_slots(int non_jumps_p)1878*38fd1498Szrj fill_simple_delay_slots (int non_jumps_p)
1879*38fd1498Szrj {
1880*38fd1498Szrj rtx_insn *insn, *trial, *next_trial;
1881*38fd1498Szrj rtx pat;
1882*38fd1498Szrj int i;
1883*38fd1498Szrj int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
1884*38fd1498Szrj struct resources needed, set;
1885*38fd1498Szrj int slots_to_fill, slots_filled;
1886*38fd1498Szrj auto_vec<rtx_insn *, 5> delay_list;
1887*38fd1498Szrj
1888*38fd1498Szrj for (i = 0; i < num_unfilled_slots; i++)
1889*38fd1498Szrj {
1890*38fd1498Szrj int flags;
1891*38fd1498Szrj /* Get the next insn to fill. If it has already had any slots assigned,
1892*38fd1498Szrj we can't do anything with it. Maybe we'll improve this later. */
1893*38fd1498Szrj
1894*38fd1498Szrj insn = unfilled_slots_base[i];
1895*38fd1498Szrj if (insn == 0
1896*38fd1498Szrj || insn->deleted ()
1897*38fd1498Szrj || (NONJUMP_INSN_P (insn)
1898*38fd1498Szrj && GET_CODE (PATTERN (insn)) == SEQUENCE)
1899*38fd1498Szrj || (JUMP_P (insn) && non_jumps_p)
1900*38fd1498Szrj || (!JUMP_P (insn) && ! non_jumps_p))
1901*38fd1498Szrj continue;
1902*38fd1498Szrj
1903*38fd1498Szrj /* It may have been that this insn used to need delay slots, but
1904*38fd1498Szrj now doesn't; ignore in that case. This can happen, for example,
1905*38fd1498Szrj on the HP PA RISC, where the number of delay slots depends on
1906*38fd1498Szrj what insns are nearby. */
1907*38fd1498Szrj slots_to_fill = num_delay_slots (insn);
1908*38fd1498Szrj
1909*38fd1498Szrj /* Some machine description have defined instructions to have
1910*38fd1498Szrj delay slots only in certain circumstances which may depend on
1911*38fd1498Szrj nearby insns (which change due to reorg's actions).
1912*38fd1498Szrj
1913*38fd1498Szrj For example, the PA port normally has delay slots for unconditional
1914*38fd1498Szrj jumps.
1915*38fd1498Szrj
1916*38fd1498Szrj However, the PA port claims such jumps do not have a delay slot
1917*38fd1498Szrj if they are immediate successors of certain CALL_INSNs. This
1918*38fd1498Szrj allows the port to favor filling the delay slot of the call with
1919*38fd1498Szrj the unconditional jump. */
1920*38fd1498Szrj if (slots_to_fill == 0)
1921*38fd1498Szrj continue;
1922*38fd1498Szrj
1923*38fd1498Szrj /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
1924*38fd1498Szrj says how many. After initialization, first try optimizing
1925*38fd1498Szrj
1926*38fd1498Szrj call _foo call _foo
1927*38fd1498Szrj nop add %o7,.-L1,%o7
1928*38fd1498Szrj b,a L1
1929*38fd1498Szrj nop
1930*38fd1498Szrj
1931*38fd1498Szrj If this case applies, the delay slot of the call is filled with
1932*38fd1498Szrj the unconditional jump. This is done first to avoid having the
1933*38fd1498Szrj delay slot of the call filled in the backward scan. Also, since
1934*38fd1498Szrj the unconditional jump is likely to also have a delay slot, that
1935*38fd1498Szrj insn must exist when it is subsequently scanned.
1936*38fd1498Szrj
1937*38fd1498Szrj This is tried on each insn with delay slots as some machines
1938*38fd1498Szrj have insns which perform calls, but are not represented as
1939*38fd1498Szrj CALL_INSNs. */
1940*38fd1498Szrj
1941*38fd1498Szrj slots_filled = 0;
1942*38fd1498Szrj delay_list.truncate (0);
1943*38fd1498Szrj
1944*38fd1498Szrj if (JUMP_P (insn))
1945*38fd1498Szrj flags = get_jump_flags (insn, JUMP_LABEL (insn));
1946*38fd1498Szrj else
1947*38fd1498Szrj flags = get_jump_flags (insn, NULL_RTX);
1948*38fd1498Szrj
1949*38fd1498Szrj if ((trial = next_active_insn (insn))
1950*38fd1498Szrj && JUMP_P (trial)
1951*38fd1498Szrj && simplejump_p (trial)
1952*38fd1498Szrj && eligible_for_delay (insn, slots_filled, trial, flags)
1953*38fd1498Szrj && no_labels_between_p (insn, trial)
1954*38fd1498Szrj && ! can_throw_internal (trial))
1955*38fd1498Szrj {
1956*38fd1498Szrj rtx_insn **tmp;
1957*38fd1498Szrj slots_filled++;
1958*38fd1498Szrj add_to_delay_list (trial, &delay_list);
1959*38fd1498Szrj
1960*38fd1498Szrj /* TRIAL may have had its delay slot filled, then unfilled. When
1961*38fd1498Szrj the delay slot is unfilled, TRIAL is placed back on the unfilled
1962*38fd1498Szrj slots obstack. Unfortunately, it is placed on the end of the
1963*38fd1498Szrj obstack, not in its original location. Therefore, we must search
1964*38fd1498Szrj from entry i + 1 to the end of the unfilled slots obstack to
1965*38fd1498Szrj try and find TRIAL. */
1966*38fd1498Szrj tmp = &unfilled_slots_base[i + 1];
1967*38fd1498Szrj while (*tmp != trial && tmp != unfilled_slots_next)
1968*38fd1498Szrj tmp++;
1969*38fd1498Szrj
1970*38fd1498Szrj /* Remove the unconditional jump from consideration for delay slot
1971*38fd1498Szrj filling and unthread it. */
1972*38fd1498Szrj if (*tmp == trial)
1973*38fd1498Szrj *tmp = 0;
1974*38fd1498Szrj {
1975*38fd1498Szrj rtx_insn *next = NEXT_INSN (trial);
1976*38fd1498Szrj rtx_insn *prev = PREV_INSN (trial);
1977*38fd1498Szrj if (prev)
1978*38fd1498Szrj SET_NEXT_INSN (prev) = next;
1979*38fd1498Szrj if (next)
1980*38fd1498Szrj SET_PREV_INSN (next) = prev;
1981*38fd1498Szrj }
1982*38fd1498Szrj }
1983*38fd1498Szrj
1984*38fd1498Szrj /* Now, scan backwards from the insn to search for a potential
1985*38fd1498Szrj delay-slot candidate. Stop searching when a label or jump is hit.
1986*38fd1498Szrj
1987*38fd1498Szrj For each candidate, if it is to go into the delay slot (moved
1988*38fd1498Szrj forward in execution sequence), it must not need or set any resources
1989*38fd1498Szrj that were set by later insns and must not set any resources that
1990*38fd1498Szrj are needed for those insns.
1991*38fd1498Szrj
1992*38fd1498Szrj The delay slot insn itself sets resources unless it is a call
1993*38fd1498Szrj (in which case the called routine, not the insn itself, is doing
1994*38fd1498Szrj the setting). */
1995*38fd1498Szrj
1996*38fd1498Szrj if (slots_filled < slots_to_fill)
1997*38fd1498Szrj {
1998*38fd1498Szrj /* If the flags register is dead after the insn, then we want to be
1999*38fd1498Szrj able to accept a candidate that clobbers it. For this purpose,
2000*38fd1498Szrj we need to filter the flags register during life analysis, so
2001*38fd1498Szrj that it doesn't create RAW and WAW dependencies, while still
2002*38fd1498Szrj creating the necessary WAR dependencies. */
2003*38fd1498Szrj bool filter_flags
2004*38fd1498Szrj = (slots_to_fill == 1
2005*38fd1498Szrj && targetm.flags_regnum != INVALID_REGNUM
2006*38fd1498Szrj && find_regno_note (insn, REG_DEAD, targetm.flags_regnum));
2007*38fd1498Szrj struct resources fset;
2008*38fd1498Szrj CLEAR_RESOURCE (&needed);
2009*38fd1498Szrj CLEAR_RESOURCE (&set);
2010*38fd1498Szrj mark_set_resources (insn, &set, 0, MARK_SRC_DEST);
2011*38fd1498Szrj if (filter_flags)
2012*38fd1498Szrj {
2013*38fd1498Szrj CLEAR_RESOURCE (&fset);
2014*38fd1498Szrj mark_set_resources (insn, &fset, 0, MARK_SRC_DEST);
2015*38fd1498Szrj }
2016*38fd1498Szrj mark_referenced_resources (insn, &needed, false);
2017*38fd1498Szrj
2018*38fd1498Szrj for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
2019*38fd1498Szrj trial = next_trial)
2020*38fd1498Szrj {
2021*38fd1498Szrj next_trial = prev_nonnote_insn (trial);
2022*38fd1498Szrj
2023*38fd1498Szrj /* This must be an INSN or CALL_INSN. */
2024*38fd1498Szrj pat = PATTERN (trial);
2025*38fd1498Szrj
2026*38fd1498Szrj /* Stand-alone USE and CLOBBER are just for flow. */
2027*38fd1498Szrj if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2028*38fd1498Szrj continue;
2029*38fd1498Szrj
2030*38fd1498Szrj /* And DEBUG_INSNs never go into delay slots. */
2031*38fd1498Szrj if (GET_CODE (trial) == DEBUG_INSN)
2032*38fd1498Szrj continue;
2033*38fd1498Szrj
2034*38fd1498Szrj /* Check for resource conflict first, to avoid unnecessary
2035*38fd1498Szrj splitting. */
2036*38fd1498Szrj if (! insn_references_resource_p (trial, &set, true)
2037*38fd1498Szrj && ! insn_sets_resource_p (trial,
2038*38fd1498Szrj filter_flags ? &fset : &set,
2039*38fd1498Szrj true)
2040*38fd1498Szrj && ! insn_sets_resource_p (trial, &needed, true)
2041*38fd1498Szrj /* Can't separate set of cc0 from its use. */
2042*38fd1498Szrj && (!HAVE_cc0 || ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)))
2043*38fd1498Szrj && ! can_throw_internal (trial))
2044*38fd1498Szrj {
2045*38fd1498Szrj trial = try_split (pat, trial, 1);
2046*38fd1498Szrj next_trial = prev_nonnote_insn (trial);
2047*38fd1498Szrj if (eligible_for_delay (insn, slots_filled, trial, flags))
2048*38fd1498Szrj {
2049*38fd1498Szrj /* In this case, we are searching backward, so if we
2050*38fd1498Szrj find insns to put on the delay list, we want
2051*38fd1498Szrj to put them at the head, rather than the
2052*38fd1498Szrj tail, of the list. */
2053*38fd1498Szrj
2054*38fd1498Szrj update_reg_dead_notes (trial, insn);
2055*38fd1498Szrj delay_list.safe_insert (0, trial);
2056*38fd1498Szrj update_block (trial, trial);
2057*38fd1498Szrj delete_related_insns (trial);
2058*38fd1498Szrj if (slots_to_fill == ++slots_filled)
2059*38fd1498Szrj break;
2060*38fd1498Szrj continue;
2061*38fd1498Szrj }
2062*38fd1498Szrj }
2063*38fd1498Szrj
2064*38fd1498Szrj mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2065*38fd1498Szrj if (filter_flags)
2066*38fd1498Szrj {
2067*38fd1498Szrj mark_set_resources (trial, &fset, 0, MARK_SRC_DEST_CALL);
2068*38fd1498Szrj /* If the flags register is set, then it doesn't create RAW
2069*38fd1498Szrj dependencies any longer and it also doesn't create WAW
2070*38fd1498Szrj dependencies since it's dead after the original insn. */
2071*38fd1498Szrj if (TEST_HARD_REG_BIT (fset.regs, targetm.flags_regnum))
2072*38fd1498Szrj {
2073*38fd1498Szrj CLEAR_HARD_REG_BIT (needed.regs, targetm.flags_regnum);
2074*38fd1498Szrj CLEAR_HARD_REG_BIT (fset.regs, targetm.flags_regnum);
2075*38fd1498Szrj }
2076*38fd1498Szrj }
2077*38fd1498Szrj mark_referenced_resources (trial, &needed, true);
2078*38fd1498Szrj }
2079*38fd1498Szrj }
2080*38fd1498Szrj
2081*38fd1498Szrj /* If all needed slots haven't been filled, we come here. */
2082*38fd1498Szrj
2083*38fd1498Szrj /* Try to optimize case of jumping around a single insn. */
2084*38fd1498Szrj if ((ANNUL_IFTRUE_SLOTS || ANNUL_IFFALSE_SLOTS)
2085*38fd1498Szrj && slots_filled != slots_to_fill
2086*38fd1498Szrj && delay_list.is_empty ()
2087*38fd1498Szrj && JUMP_P (insn)
2088*38fd1498Szrj && (condjump_p (insn) || condjump_in_parallel_p (insn))
2089*38fd1498Szrj && !ANY_RETURN_P (JUMP_LABEL (insn)))
2090*38fd1498Szrj {
2091*38fd1498Szrj optimize_skip (as_a <rtx_jump_insn *> (insn), &delay_list);
2092*38fd1498Szrj if (!delay_list.is_empty ())
2093*38fd1498Szrj slots_filled += 1;
2094*38fd1498Szrj }
2095*38fd1498Szrj
2096*38fd1498Szrj /* Try to get insns from beyond the insn needing the delay slot.
2097*38fd1498Szrj These insns can neither set or reference resources set in insns being
2098*38fd1498Szrj skipped, cannot set resources in the insn being skipped, and, if this
2099*38fd1498Szrj is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
2100*38fd1498Szrj call might not return).
2101*38fd1498Szrj
2102*38fd1498Szrj There used to be code which continued past the target label if
2103*38fd1498Szrj we saw all uses of the target label. This code did not work,
2104*38fd1498Szrj because it failed to account for some instructions which were
2105*38fd1498Szrj both annulled and marked as from the target. This can happen as a
2106*38fd1498Szrj result of optimize_skip. Since this code was redundant with
2107*38fd1498Szrj fill_eager_delay_slots anyways, it was just deleted. */
2108*38fd1498Szrj
2109*38fd1498Szrj if (slots_filled != slots_to_fill
2110*38fd1498Szrj /* If this instruction could throw an exception which is
2111*38fd1498Szrj caught in the same function, then it's not safe to fill
2112*38fd1498Szrj the delay slot with an instruction from beyond this
2113*38fd1498Szrj point. For example, consider:
2114*38fd1498Szrj
2115*38fd1498Szrj int i = 2;
2116*38fd1498Szrj
2117*38fd1498Szrj try {
2118*38fd1498Szrj f();
2119*38fd1498Szrj i = 3;
2120*38fd1498Szrj } catch (...) {}
2121*38fd1498Szrj
2122*38fd1498Szrj return i;
2123*38fd1498Szrj
2124*38fd1498Szrj Even though `i' is a local variable, we must be sure not
2125*38fd1498Szrj to put `i = 3' in the delay slot if `f' might throw an
2126*38fd1498Szrj exception.
2127*38fd1498Szrj
2128*38fd1498Szrj Presumably, we should also check to see if we could get
2129*38fd1498Szrj back to this function via `setjmp'. */
2130*38fd1498Szrj && ! can_throw_internal (insn)
2131*38fd1498Szrj && !JUMP_P (insn))
2132*38fd1498Szrj {
2133*38fd1498Szrj int maybe_never = 0;
2134*38fd1498Szrj rtx pat, trial_delay;
2135*38fd1498Szrj
2136*38fd1498Szrj CLEAR_RESOURCE (&needed);
2137*38fd1498Szrj CLEAR_RESOURCE (&set);
2138*38fd1498Szrj mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
2139*38fd1498Szrj mark_referenced_resources (insn, &needed, true);
2140*38fd1498Szrj
2141*38fd1498Szrj if (CALL_P (insn))
2142*38fd1498Szrj maybe_never = 1;
2143*38fd1498Szrj
2144*38fd1498Szrj for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1);
2145*38fd1498Szrj trial = next_trial)
2146*38fd1498Szrj {
2147*38fd1498Szrj next_trial = next_nonnote_insn (trial);
2148*38fd1498Szrj
2149*38fd1498Szrj /* This must be an INSN or CALL_INSN. */
2150*38fd1498Szrj pat = PATTERN (trial);
2151*38fd1498Szrj
2152*38fd1498Szrj /* Stand-alone USE and CLOBBER are just for flow. */
2153*38fd1498Szrj if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2154*38fd1498Szrj continue;
2155*38fd1498Szrj
2156*38fd1498Szrj /* And DEBUG_INSNs do not go in delay slots. */
2157*38fd1498Szrj if (GET_CODE (trial) == DEBUG_INSN)
2158*38fd1498Szrj continue;
2159*38fd1498Szrj
2160*38fd1498Szrj /* If this already has filled delay slots, get the insn needing
2161*38fd1498Szrj the delay slots. */
2162*38fd1498Szrj if (GET_CODE (pat) == SEQUENCE)
2163*38fd1498Szrj trial_delay = XVECEXP (pat, 0, 0);
2164*38fd1498Szrj else
2165*38fd1498Szrj trial_delay = trial;
2166*38fd1498Szrj
2167*38fd1498Szrj /* Stop our search when seeing a jump. */
2168*38fd1498Szrj if (JUMP_P (trial_delay))
2169*38fd1498Szrj break;
2170*38fd1498Szrj
2171*38fd1498Szrj /* See if we have a resource problem before we try to split. */
2172*38fd1498Szrj if (GET_CODE (pat) != SEQUENCE
2173*38fd1498Szrj && ! insn_references_resource_p (trial, &set, true)
2174*38fd1498Szrj && ! insn_sets_resource_p (trial, &set, true)
2175*38fd1498Szrj && ! insn_sets_resource_p (trial, &needed, true)
2176*38fd1498Szrj && (!HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)))
2177*38fd1498Szrj && ! (maybe_never && may_trap_or_fault_p (pat))
2178*38fd1498Szrj && (trial = try_split (pat, trial, 0))
2179*38fd1498Szrj && eligible_for_delay (insn, slots_filled, trial, flags)
2180*38fd1498Szrj && ! can_throw_internal (trial))
2181*38fd1498Szrj {
2182*38fd1498Szrj next_trial = next_nonnote_insn (trial);
2183*38fd1498Szrj add_to_delay_list (trial, &delay_list);
2184*38fd1498Szrj if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
2185*38fd1498Szrj link_cc0_insns (trial);
2186*38fd1498Szrj
2187*38fd1498Szrj delete_related_insns (trial);
2188*38fd1498Szrj if (slots_to_fill == ++slots_filled)
2189*38fd1498Szrj break;
2190*38fd1498Szrj continue;
2191*38fd1498Szrj }
2192*38fd1498Szrj
2193*38fd1498Szrj mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2194*38fd1498Szrj mark_referenced_resources (trial, &needed, true);
2195*38fd1498Szrj
2196*38fd1498Szrj /* Ensure we don't put insns between the setting of cc and the
2197*38fd1498Szrj comparison by moving a setting of cc into an earlier delay
2198*38fd1498Szrj slot since these insns could clobber the condition code. */
2199*38fd1498Szrj set.cc = 1;
2200*38fd1498Szrj
2201*38fd1498Szrj /* If this is a call, we might not get here. */
2202*38fd1498Szrj if (CALL_P (trial_delay))
2203*38fd1498Szrj maybe_never = 1;
2204*38fd1498Szrj }
2205*38fd1498Szrj
2206*38fd1498Szrj /* If there are slots left to fill and our search was stopped by an
2207*38fd1498Szrj unconditional branch, try the insn at the branch target. We can
2208*38fd1498Szrj redirect the branch if it works.
2209*38fd1498Szrj
2210*38fd1498Szrj Don't do this if the insn at the branch target is a branch. */
2211*38fd1498Szrj if (slots_to_fill != slots_filled
2212*38fd1498Szrj && trial
2213*38fd1498Szrj && jump_to_label_p (trial)
2214*38fd1498Szrj && simplejump_p (trial)
2215*38fd1498Szrj && (next_trial = next_active_insn (JUMP_LABEL_AS_INSN (trial))) != 0
2216*38fd1498Szrj && ! (NONJUMP_INSN_P (next_trial)
2217*38fd1498Szrj && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
2218*38fd1498Szrj && !JUMP_P (next_trial)
2219*38fd1498Szrj && ! insn_references_resource_p (next_trial, &set, true)
2220*38fd1498Szrj && ! insn_sets_resource_p (next_trial, &set, true)
2221*38fd1498Szrj && ! insn_sets_resource_p (next_trial, &needed, true)
2222*38fd1498Szrj && (!HAVE_cc0 || ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial)))
2223*38fd1498Szrj && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
2224*38fd1498Szrj && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
2225*38fd1498Szrj && eligible_for_delay (insn, slots_filled, next_trial, flags)
2226*38fd1498Szrj && ! can_throw_internal (trial))
2227*38fd1498Szrj {
2228*38fd1498Szrj /* See comment in relax_delay_slots about necessity of using
2229*38fd1498Szrj next_real_nondebug_insn here. */
2230*38fd1498Szrj rtx_insn *new_label = next_real_nondebug_insn (next_trial);
2231*38fd1498Szrj
2232*38fd1498Szrj if (new_label != 0)
2233*38fd1498Szrj new_label = get_label_before (new_label, JUMP_LABEL (trial));
2234*38fd1498Szrj else
2235*38fd1498Szrj new_label = find_end_label (simple_return_rtx);
2236*38fd1498Szrj
2237*38fd1498Szrj if (new_label)
2238*38fd1498Szrj {
2239*38fd1498Szrj add_to_delay_list (copy_delay_slot_insn (next_trial),
2240*38fd1498Szrj &delay_list);
2241*38fd1498Szrj slots_filled++;
2242*38fd1498Szrj reorg_redirect_jump (as_a <rtx_jump_insn *> (trial),
2243*38fd1498Szrj new_label);
2244*38fd1498Szrj }
2245*38fd1498Szrj }
2246*38fd1498Szrj }
2247*38fd1498Szrj
2248*38fd1498Szrj /* If this is an unconditional jump, then try to get insns from the
2249*38fd1498Szrj target of the jump. */
2250*38fd1498Szrj rtx_jump_insn *jump_insn;
2251*38fd1498Szrj if ((jump_insn = dyn_cast <rtx_jump_insn *> (insn))
2252*38fd1498Szrj && simplejump_p (jump_insn)
2253*38fd1498Szrj && slots_filled != slots_to_fill)
2254*38fd1498Szrj fill_slots_from_thread (jump_insn, const_true_rtx,
2255*38fd1498Szrj next_active_insn (JUMP_LABEL_AS_INSN (insn)),
2256*38fd1498Szrj NULL, 1, 1, own_thread_p (JUMP_LABEL (insn),
2257*38fd1498Szrj JUMP_LABEL (insn), 0),
2258*38fd1498Szrj slots_to_fill, &slots_filled, &delay_list);
2259*38fd1498Szrj
2260*38fd1498Szrj if (!delay_list.is_empty ())
2261*38fd1498Szrj unfilled_slots_base[i]
2262*38fd1498Szrj = emit_delay_sequence (insn, delay_list, slots_filled);
2263*38fd1498Szrj
2264*38fd1498Szrj if (slots_to_fill == slots_filled)
2265*38fd1498Szrj unfilled_slots_base[i] = 0;
2266*38fd1498Szrj
2267*38fd1498Szrj note_delay_statistics (slots_filled, 0);
2268*38fd1498Szrj }
2269*38fd1498Szrj }
2270*38fd1498Szrj
2271*38fd1498Szrj /* Follow any unconditional jump at LABEL, for the purpose of redirecting JUMP;
2272*38fd1498Szrj return the ultimate label reached by any such chain of jumps.
2273*38fd1498Szrj Return a suitable return rtx if the chain ultimately leads to a
2274*38fd1498Szrj return instruction.
2275*38fd1498Szrj If LABEL is not followed by a jump, return LABEL.
2276*38fd1498Szrj If the chain loops or we can't find end, return LABEL,
2277*38fd1498Szrj since that tells caller to avoid changing the insn.
2278*38fd1498Szrj If the returned label is obtained by following a crossing jump,
2279*38fd1498Szrj set *CROSSING to true, otherwise set it to false. */
2280*38fd1498Szrj
2281*38fd1498Szrj static rtx
follow_jumps(rtx label,rtx_insn * jump,bool * crossing)2282*38fd1498Szrj follow_jumps (rtx label, rtx_insn *jump, bool *crossing)
2283*38fd1498Szrj {
2284*38fd1498Szrj rtx_insn *insn;
2285*38fd1498Szrj rtx_insn *next;
2286*38fd1498Szrj int depth;
2287*38fd1498Szrj
2288*38fd1498Szrj *crossing = false;
2289*38fd1498Szrj if (ANY_RETURN_P (label))
2290*38fd1498Szrj return label;
2291*38fd1498Szrj
2292*38fd1498Szrj rtx_insn *value = as_a <rtx_insn *> (label);
2293*38fd1498Szrj
2294*38fd1498Szrj for (depth = 0;
2295*38fd1498Szrj (depth < 10
2296*38fd1498Szrj && (insn = next_active_insn (value)) != 0
2297*38fd1498Szrj && JUMP_P (insn)
2298*38fd1498Szrj && JUMP_LABEL (insn) != NULL_RTX
2299*38fd1498Szrj && ((any_uncondjump_p (insn) && onlyjump_p (insn))
2300*38fd1498Szrj || ANY_RETURN_P (PATTERN (insn)))
2301*38fd1498Szrj && (next = NEXT_INSN (insn))
2302*38fd1498Szrj && BARRIER_P (next));
2303*38fd1498Szrj depth++)
2304*38fd1498Szrj {
2305*38fd1498Szrj rtx this_label_or_return = JUMP_LABEL (insn);
2306*38fd1498Szrj
2307*38fd1498Szrj /* If we have found a cycle, make the insn jump to itself. */
2308*38fd1498Szrj if (this_label_or_return == label)
2309*38fd1498Szrj return label;
2310*38fd1498Szrj
2311*38fd1498Szrj /* Cannot follow returns and cannot look through tablejumps. */
2312*38fd1498Szrj if (ANY_RETURN_P (this_label_or_return))
2313*38fd1498Szrj return this_label_or_return;
2314*38fd1498Szrj
2315*38fd1498Szrj rtx_insn *this_label = as_a <rtx_insn *> (this_label_or_return);
2316*38fd1498Szrj if (NEXT_INSN (this_label)
2317*38fd1498Szrj && JUMP_TABLE_DATA_P (NEXT_INSN (this_label)))
2318*38fd1498Szrj break;
2319*38fd1498Szrj
2320*38fd1498Szrj if (!targetm.can_follow_jump (jump, insn))
2321*38fd1498Szrj break;
2322*38fd1498Szrj if (!*crossing)
2323*38fd1498Szrj *crossing = CROSSING_JUMP_P (jump);
2324*38fd1498Szrj value = this_label;
2325*38fd1498Szrj }
2326*38fd1498Szrj if (depth == 10)
2327*38fd1498Szrj return label;
2328*38fd1498Szrj return value;
2329*38fd1498Szrj }
2330*38fd1498Szrj
2331*38fd1498Szrj /* Try to find insns to place in delay slots.
2332*38fd1498Szrj
2333*38fd1498Szrj INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
2334*38fd1498Szrj or is an unconditional branch if CONDITION is const_true_rtx.
2335*38fd1498Szrj *PSLOTS_FILLED is updated with the number of slots that we have filled.
2336*38fd1498Szrj
2337*38fd1498Szrj THREAD is a flow-of-control, either the insns to be executed if the
2338*38fd1498Szrj branch is true or if the branch is false, THREAD_IF_TRUE says which.
2339*38fd1498Szrj
2340*38fd1498Szrj OPPOSITE_THREAD is the thread in the opposite direction. It is used
2341*38fd1498Szrj to see if any potential delay slot insns set things needed there.
2342*38fd1498Szrj
2343*38fd1498Szrj LIKELY is nonzero if it is extremely likely that the branch will be
2344*38fd1498Szrj taken and THREAD_IF_TRUE is set. This is used for the branch at the
2345*38fd1498Szrj end of a loop back up to the top.
2346*38fd1498Szrj
2347*38fd1498Szrj OWN_THREAD is true if we are the only user of the thread, i.e. it is
2348*38fd1498Szrj the target of the jump when we are the only jump going there.
2349*38fd1498Szrj
2350*38fd1498Szrj If OWN_THREAD is false, it must be the "true" thread of a jump. In that
2351*38fd1498Szrj case, we can only take insns from the head of the thread for our delay
2352*38fd1498Szrj slot. We then adjust the jump to point after the insns we have taken. */
2353*38fd1498Szrj
2354*38fd1498Szrj static void
fill_slots_from_thread(rtx_jump_insn * insn,rtx condition,rtx thread_or_return,rtx opposite_thread,int likely,int thread_if_true,int own_thread,int slots_to_fill,int * pslots_filled,vec<rtx_insn * > * delay_list)2355*38fd1498Szrj fill_slots_from_thread (rtx_jump_insn *insn, rtx condition,
2356*38fd1498Szrj rtx thread_or_return, rtx opposite_thread, int likely,
2357*38fd1498Szrj int thread_if_true, int own_thread, int slots_to_fill,
2358*38fd1498Szrj int *pslots_filled, vec<rtx_insn *> *delay_list)
2359*38fd1498Szrj {
2360*38fd1498Szrj rtx new_thread;
2361*38fd1498Szrj struct resources opposite_needed, set, needed;
2362*38fd1498Szrj rtx_insn *trial;
2363*38fd1498Szrj int lose = 0;
2364*38fd1498Szrj int must_annul = 0;
2365*38fd1498Szrj int flags;
2366*38fd1498Szrj
2367*38fd1498Szrj /* Validate our arguments. */
2368*38fd1498Szrj gcc_assert (condition != const_true_rtx || thread_if_true);
2369*38fd1498Szrj gcc_assert (own_thread || thread_if_true);
2370*38fd1498Szrj
2371*38fd1498Szrj flags = get_jump_flags (insn, JUMP_LABEL (insn));
2372*38fd1498Szrj
2373*38fd1498Szrj /* If our thread is the end of subroutine, we can't get any delay
2374*38fd1498Szrj insns from that. */
2375*38fd1498Szrj if (thread_or_return == NULL_RTX || ANY_RETURN_P (thread_or_return))
2376*38fd1498Szrj return;
2377*38fd1498Szrj
2378*38fd1498Szrj rtx_insn *thread = as_a <rtx_insn *> (thread_or_return);
2379*38fd1498Szrj
2380*38fd1498Szrj /* If this is an unconditional branch, nothing is needed at the
2381*38fd1498Szrj opposite thread. Otherwise, compute what is needed there. */
2382*38fd1498Szrj if (condition == const_true_rtx)
2383*38fd1498Szrj CLEAR_RESOURCE (&opposite_needed);
2384*38fd1498Szrj else
2385*38fd1498Szrj mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
2386*38fd1498Szrj
2387*38fd1498Szrj /* If the insn at THREAD can be split, do it here to avoid having to
2388*38fd1498Szrj update THREAD and NEW_THREAD if it is done in the loop below. Also
2389*38fd1498Szrj initialize NEW_THREAD. */
2390*38fd1498Szrj
2391*38fd1498Szrj new_thread = thread = try_split (PATTERN (thread), thread, 0);
2392*38fd1498Szrj
2393*38fd1498Szrj /* Scan insns at THREAD. We are looking for an insn that can be removed
2394*38fd1498Szrj from THREAD (it neither sets nor references resources that were set
2395*38fd1498Szrj ahead of it and it doesn't set anything needs by the insns ahead of
2396*38fd1498Szrj it) and that either can be placed in an annulling insn or aren't
2397*38fd1498Szrj needed at OPPOSITE_THREAD. */
2398*38fd1498Szrj
2399*38fd1498Szrj CLEAR_RESOURCE (&needed);
2400*38fd1498Szrj CLEAR_RESOURCE (&set);
2401*38fd1498Szrj
2402*38fd1498Szrj /* If we do not own this thread, we must stop as soon as we find
2403*38fd1498Szrj something that we can't put in a delay slot, since all we can do
2404*38fd1498Szrj is branch into THREAD at a later point. Therefore, labels stop
2405*38fd1498Szrj the search if this is not the `true' thread. */
2406*38fd1498Szrj
2407*38fd1498Szrj for (trial = thread;
2408*38fd1498Szrj ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
2409*38fd1498Szrj trial = next_nonnote_insn (trial))
2410*38fd1498Szrj {
2411*38fd1498Szrj rtx pat, old_trial;
2412*38fd1498Szrj
2413*38fd1498Szrj /* If we have passed a label, we no longer own this thread. */
2414*38fd1498Szrj if (LABEL_P (trial))
2415*38fd1498Szrj {
2416*38fd1498Szrj own_thread = 0;
2417*38fd1498Szrj continue;
2418*38fd1498Szrj }
2419*38fd1498Szrj
2420*38fd1498Szrj pat = PATTERN (trial);
2421*38fd1498Szrj if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
2422*38fd1498Szrj continue;
2423*38fd1498Szrj
2424*38fd1498Szrj if (GET_CODE (trial) == DEBUG_INSN)
2425*38fd1498Szrj continue;
2426*38fd1498Szrj
2427*38fd1498Szrj /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
2428*38fd1498Szrj don't separate or copy insns that set and use CC0. */
2429*38fd1498Szrj if (! insn_references_resource_p (trial, &set, true)
2430*38fd1498Szrj && ! insn_sets_resource_p (trial, &set, true)
2431*38fd1498Szrj && ! insn_sets_resource_p (trial, &needed, true)
2432*38fd1498Szrj && (!HAVE_cc0 || (! (reg_mentioned_p (cc0_rtx, pat)
2433*38fd1498Szrj && (! own_thread || ! sets_cc0_p (pat)))))
2434*38fd1498Szrj && ! can_throw_internal (trial))
2435*38fd1498Szrj {
2436*38fd1498Szrj rtx_insn *prior_insn;
2437*38fd1498Szrj
2438*38fd1498Szrj /* If TRIAL is redundant with some insn before INSN, we don't
2439*38fd1498Szrj actually need to add it to the delay list; we can merely pretend
2440*38fd1498Szrj we did. */
2441*38fd1498Szrj if ((prior_insn = redundant_insn (trial, insn, *delay_list)))
2442*38fd1498Szrj {
2443*38fd1498Szrj fix_reg_dead_note (prior_insn, insn);
2444*38fd1498Szrj if (own_thread)
2445*38fd1498Szrj {
2446*38fd1498Szrj update_block (trial, thread);
2447*38fd1498Szrj if (trial == thread)
2448*38fd1498Szrj {
2449*38fd1498Szrj thread = next_active_insn (thread);
2450*38fd1498Szrj if (new_thread == trial)
2451*38fd1498Szrj new_thread = thread;
2452*38fd1498Szrj }
2453*38fd1498Szrj
2454*38fd1498Szrj delete_related_insns (trial);
2455*38fd1498Szrj }
2456*38fd1498Szrj else
2457*38fd1498Szrj {
2458*38fd1498Szrj update_reg_unused_notes (prior_insn, trial);
2459*38fd1498Szrj new_thread = next_active_insn (trial);
2460*38fd1498Szrj }
2461*38fd1498Szrj
2462*38fd1498Szrj continue;
2463*38fd1498Szrj }
2464*38fd1498Szrj
2465*38fd1498Szrj /* There are two ways we can win: If TRIAL doesn't set anything
2466*38fd1498Szrj needed at the opposite thread and can't trap, or if it can
2467*38fd1498Szrj go into an annulled delay slot. But we want neither to copy
2468*38fd1498Szrj nor to speculate frame-related insns. */
2469*38fd1498Szrj if (!must_annul
2470*38fd1498Szrj && ((condition == const_true_rtx
2471*38fd1498Szrj && (own_thread || !RTX_FRAME_RELATED_P (trial)))
2472*38fd1498Szrj || (! insn_sets_resource_p (trial, &opposite_needed, true)
2473*38fd1498Szrj && ! may_trap_or_fault_p (pat)
2474*38fd1498Szrj && ! RTX_FRAME_RELATED_P (trial))))
2475*38fd1498Szrj {
2476*38fd1498Szrj old_trial = trial;
2477*38fd1498Szrj trial = try_split (pat, trial, 0);
2478*38fd1498Szrj if (new_thread == old_trial)
2479*38fd1498Szrj new_thread = trial;
2480*38fd1498Szrj if (thread == old_trial)
2481*38fd1498Szrj thread = trial;
2482*38fd1498Szrj pat = PATTERN (trial);
2483*38fd1498Szrj if (eligible_for_delay (insn, *pslots_filled, trial, flags))
2484*38fd1498Szrj goto winner;
2485*38fd1498Szrj }
2486*38fd1498Szrj else if (!RTX_FRAME_RELATED_P (trial)
2487*38fd1498Szrj && ((ANNUL_IFTRUE_SLOTS && ! thread_if_true)
2488*38fd1498Szrj || (ANNUL_IFFALSE_SLOTS && thread_if_true)))
2489*38fd1498Szrj {
2490*38fd1498Szrj old_trial = trial;
2491*38fd1498Szrj trial = try_split (pat, trial, 0);
2492*38fd1498Szrj if (new_thread == old_trial)
2493*38fd1498Szrj new_thread = trial;
2494*38fd1498Szrj if (thread == old_trial)
2495*38fd1498Szrj thread = trial;
2496*38fd1498Szrj pat = PATTERN (trial);
2497*38fd1498Szrj if ((must_annul || delay_list->is_empty ()) && (thread_if_true
2498*38fd1498Szrj ? check_annul_list_true_false (0, *delay_list)
2499*38fd1498Szrj && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
2500*38fd1498Szrj : check_annul_list_true_false (1, *delay_list)
2501*38fd1498Szrj && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
2502*38fd1498Szrj {
2503*38fd1498Szrj rtx_insn *temp;
2504*38fd1498Szrj
2505*38fd1498Szrj must_annul = 1;
2506*38fd1498Szrj winner:
2507*38fd1498Szrj
2508*38fd1498Szrj if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
2509*38fd1498Szrj link_cc0_insns (trial);
2510*38fd1498Szrj
2511*38fd1498Szrj /* If we own this thread, delete the insn. If this is the
2512*38fd1498Szrj destination of a branch, show that a basic block status
2513*38fd1498Szrj may have been updated. In any case, mark the new
2514*38fd1498Szrj starting point of this thread. */
2515*38fd1498Szrj if (own_thread)
2516*38fd1498Szrj {
2517*38fd1498Szrj rtx note;
2518*38fd1498Szrj
2519*38fd1498Szrj update_block (trial, thread);
2520*38fd1498Szrj if (trial == thread)
2521*38fd1498Szrj {
2522*38fd1498Szrj thread = next_active_insn (thread);
2523*38fd1498Szrj if (new_thread == trial)
2524*38fd1498Szrj new_thread = thread;
2525*38fd1498Szrj }
2526*38fd1498Szrj
2527*38fd1498Szrj /* We are moving this insn, not deleting it. We must
2528*38fd1498Szrj temporarily increment the use count on any referenced
2529*38fd1498Szrj label lest it be deleted by delete_related_insns. */
2530*38fd1498Szrj for (note = REG_NOTES (trial);
2531*38fd1498Szrj note != NULL_RTX;
2532*38fd1498Szrj note = XEXP (note, 1))
2533*38fd1498Szrj if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2534*38fd1498Szrj || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2535*38fd1498Szrj {
2536*38fd1498Szrj /* REG_LABEL_OPERAND could be
2537*38fd1498Szrj NOTE_INSN_DELETED_LABEL too. */
2538*38fd1498Szrj if (LABEL_P (XEXP (note, 0)))
2539*38fd1498Szrj LABEL_NUSES (XEXP (note, 0))++;
2540*38fd1498Szrj else
2541*38fd1498Szrj gcc_assert (REG_NOTE_KIND (note)
2542*38fd1498Szrj == REG_LABEL_OPERAND);
2543*38fd1498Szrj }
2544*38fd1498Szrj if (jump_to_label_p (trial))
2545*38fd1498Szrj LABEL_NUSES (JUMP_LABEL (trial))++;
2546*38fd1498Szrj
2547*38fd1498Szrj delete_related_insns (trial);
2548*38fd1498Szrj
2549*38fd1498Szrj for (note = REG_NOTES (trial);
2550*38fd1498Szrj note != NULL_RTX;
2551*38fd1498Szrj note = XEXP (note, 1))
2552*38fd1498Szrj if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
2553*38fd1498Szrj || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
2554*38fd1498Szrj {
2555*38fd1498Szrj /* REG_LABEL_OPERAND could be
2556*38fd1498Szrj NOTE_INSN_DELETED_LABEL too. */
2557*38fd1498Szrj if (LABEL_P (XEXP (note, 0)))
2558*38fd1498Szrj LABEL_NUSES (XEXP (note, 0))--;
2559*38fd1498Szrj else
2560*38fd1498Szrj gcc_assert (REG_NOTE_KIND (note)
2561*38fd1498Szrj == REG_LABEL_OPERAND);
2562*38fd1498Szrj }
2563*38fd1498Szrj if (jump_to_label_p (trial))
2564*38fd1498Szrj LABEL_NUSES (JUMP_LABEL (trial))--;
2565*38fd1498Szrj }
2566*38fd1498Szrj else
2567*38fd1498Szrj new_thread = next_active_insn (trial);
2568*38fd1498Szrj
2569*38fd1498Szrj temp = own_thread ? trial : copy_delay_slot_insn (trial);
2570*38fd1498Szrj if (thread_if_true)
2571*38fd1498Szrj INSN_FROM_TARGET_P (temp) = 1;
2572*38fd1498Szrj
2573*38fd1498Szrj add_to_delay_list (temp, delay_list);
2574*38fd1498Szrj
2575*38fd1498Szrj if (slots_to_fill == ++(*pslots_filled))
2576*38fd1498Szrj {
2577*38fd1498Szrj /* Even though we have filled all the slots, we
2578*38fd1498Szrj may be branching to a location that has a
2579*38fd1498Szrj redundant insn. Skip any if so. */
2580*38fd1498Szrj while (new_thread && ! own_thread
2581*38fd1498Szrj && ! insn_sets_resource_p (new_thread, &set, true)
2582*38fd1498Szrj && ! insn_sets_resource_p (new_thread, &needed,
2583*38fd1498Szrj true)
2584*38fd1498Szrj && ! insn_references_resource_p (new_thread,
2585*38fd1498Szrj &set, true)
2586*38fd1498Szrj && (prior_insn
2587*38fd1498Szrj = redundant_insn (new_thread, insn,
2588*38fd1498Szrj *delay_list)))
2589*38fd1498Szrj {
2590*38fd1498Szrj /* We know we do not own the thread, so no need
2591*38fd1498Szrj to call update_block and delete_insn. */
2592*38fd1498Szrj fix_reg_dead_note (prior_insn, insn);
2593*38fd1498Szrj update_reg_unused_notes (prior_insn, new_thread);
2594*38fd1498Szrj new_thread
2595*38fd1498Szrj = next_active_insn (as_a<rtx_insn *> (new_thread));
2596*38fd1498Szrj }
2597*38fd1498Szrj break;
2598*38fd1498Szrj }
2599*38fd1498Szrj
2600*38fd1498Szrj continue;
2601*38fd1498Szrj }
2602*38fd1498Szrj }
2603*38fd1498Szrj }
2604*38fd1498Szrj
2605*38fd1498Szrj /* This insn can't go into a delay slot. */
2606*38fd1498Szrj lose = 1;
2607*38fd1498Szrj mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
2608*38fd1498Szrj mark_referenced_resources (trial, &needed, true);
2609*38fd1498Szrj
2610*38fd1498Szrj /* Ensure we don't put insns between the setting of cc and the comparison
2611*38fd1498Szrj by moving a setting of cc into an earlier delay slot since these insns
2612*38fd1498Szrj could clobber the condition code. */
2613*38fd1498Szrj set.cc = 1;
2614*38fd1498Szrj
2615*38fd1498Szrj /* If this insn is a register-register copy and the next insn has
2616*38fd1498Szrj a use of our destination, change it to use our source. That way,
2617*38fd1498Szrj it will become a candidate for our delay slot the next time
2618*38fd1498Szrj through this loop. This case occurs commonly in loops that
2619*38fd1498Szrj scan a list.
2620*38fd1498Szrj
2621*38fd1498Szrj We could check for more complex cases than those tested below,
2622*38fd1498Szrj but it doesn't seem worth it. It might also be a good idea to try
2623*38fd1498Szrj to swap the two insns. That might do better.
2624*38fd1498Szrj
2625*38fd1498Szrj We can't do this if the next insn modifies our destination, because
2626*38fd1498Szrj that would make the replacement into the insn invalid. We also can't
2627*38fd1498Szrj do this if it modifies our source, because it might be an earlyclobber
2628*38fd1498Szrj operand. This latter test also prevents updating the contents of
2629*38fd1498Szrj a PRE_INC. We also can't do this if there's overlap of source and
2630*38fd1498Szrj destination. Overlap may happen for larger-than-register-size modes. */
2631*38fd1498Szrj
2632*38fd1498Szrj if (NONJUMP_INSN_P (trial) && GET_CODE (pat) == SET
2633*38fd1498Szrj && REG_P (SET_SRC (pat))
2634*38fd1498Szrj && REG_P (SET_DEST (pat))
2635*38fd1498Szrj && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat)))
2636*38fd1498Szrj {
2637*38fd1498Szrj rtx_insn *next = next_nonnote_insn (trial);
2638*38fd1498Szrj
2639*38fd1498Szrj if (next && NONJUMP_INSN_P (next)
2640*38fd1498Szrj && GET_CODE (PATTERN (next)) != USE
2641*38fd1498Szrj && ! reg_set_p (SET_DEST (pat), next)
2642*38fd1498Szrj && ! reg_set_p (SET_SRC (pat), next)
2643*38fd1498Szrj && reg_referenced_p (SET_DEST (pat), PATTERN (next))
2644*38fd1498Szrj && ! modified_in_p (SET_DEST (pat), next))
2645*38fd1498Szrj validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
2646*38fd1498Szrj }
2647*38fd1498Szrj }
2648*38fd1498Szrj
2649*38fd1498Szrj /* If we stopped on a branch insn that has delay slots, see if we can
2650*38fd1498Szrj steal some of the insns in those slots. */
2651*38fd1498Szrj if (trial && NONJUMP_INSN_P (trial)
2652*38fd1498Szrj && GET_CODE (PATTERN (trial)) == SEQUENCE
2653*38fd1498Szrj && JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
2654*38fd1498Szrj {
2655*38fd1498Szrj rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (trial));
2656*38fd1498Szrj /* If this is the `true' thread, we will want to follow the jump,
2657*38fd1498Szrj so we can only do this if we have taken everything up to here. */
2658*38fd1498Szrj if (thread_if_true && trial == new_thread)
2659*38fd1498Szrj {
2660*38fd1498Szrj steal_delay_list_from_target (insn, condition, sequence,
2661*38fd1498Szrj delay_list, &set, &needed,
2662*38fd1498Szrj &opposite_needed, slots_to_fill,
2663*38fd1498Szrj pslots_filled, &must_annul,
2664*38fd1498Szrj &new_thread);
2665*38fd1498Szrj /* If we owned the thread and are told that it branched
2666*38fd1498Szrj elsewhere, make sure we own the thread at the new location. */
2667*38fd1498Szrj if (own_thread && trial != new_thread)
2668*38fd1498Szrj own_thread = own_thread_p (new_thread, new_thread, 0);
2669*38fd1498Szrj }
2670*38fd1498Szrj else if (! thread_if_true)
2671*38fd1498Szrj steal_delay_list_from_fallthrough (insn, condition, sequence,
2672*38fd1498Szrj delay_list, &set, &needed,
2673*38fd1498Szrj &opposite_needed, slots_to_fill,
2674*38fd1498Szrj pslots_filled, &must_annul);
2675*38fd1498Szrj }
2676*38fd1498Szrj
2677*38fd1498Szrj /* If we haven't found anything for this delay slot and it is very
2678*38fd1498Szrj likely that the branch will be taken, see if the insn at our target
2679*38fd1498Szrj increments or decrements a register with an increment that does not
2680*38fd1498Szrj depend on the destination register. If so, try to place the opposite
2681*38fd1498Szrj arithmetic insn after the jump insn and put the arithmetic insn in the
2682*38fd1498Szrj delay slot. If we can't do this, return. */
2683*38fd1498Szrj if (delay_list->is_empty () && likely
2684*38fd1498Szrj && new_thread && !ANY_RETURN_P (new_thread)
2685*38fd1498Szrj && NONJUMP_INSN_P (new_thread)
2686*38fd1498Szrj && !RTX_FRAME_RELATED_P (new_thread)
2687*38fd1498Szrj && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
2688*38fd1498Szrj && asm_noperands (PATTERN (new_thread)) < 0)
2689*38fd1498Szrj {
2690*38fd1498Szrj rtx pat = PATTERN (new_thread);
2691*38fd1498Szrj rtx dest;
2692*38fd1498Szrj rtx src;
2693*38fd1498Szrj
2694*38fd1498Szrj /* We know "new_thread" is an insn due to NONJUMP_INSN_P (new_thread)
2695*38fd1498Szrj above. */
2696*38fd1498Szrj trial = as_a <rtx_insn *> (new_thread);
2697*38fd1498Szrj pat = PATTERN (trial);
2698*38fd1498Szrj
2699*38fd1498Szrj if (!NONJUMP_INSN_P (trial)
2700*38fd1498Szrj || GET_CODE (pat) != SET
2701*38fd1498Szrj || ! eligible_for_delay (insn, 0, trial, flags)
2702*38fd1498Szrj || can_throw_internal (trial))
2703*38fd1498Szrj return;
2704*38fd1498Szrj
2705*38fd1498Szrj dest = SET_DEST (pat), src = SET_SRC (pat);
2706*38fd1498Szrj if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
2707*38fd1498Szrj && rtx_equal_p (XEXP (src, 0), dest)
2708*38fd1498Szrj && (!FLOAT_MODE_P (GET_MODE (src))
2709*38fd1498Szrj || flag_unsafe_math_optimizations)
2710*38fd1498Szrj && ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
2711*38fd1498Szrj && ! side_effects_p (pat))
2712*38fd1498Szrj {
2713*38fd1498Szrj rtx other = XEXP (src, 1);
2714*38fd1498Szrj rtx new_arith;
2715*38fd1498Szrj rtx_insn *ninsn;
2716*38fd1498Szrj
2717*38fd1498Szrj /* If this is a constant adjustment, use the same code with
2718*38fd1498Szrj the negated constant. Otherwise, reverse the sense of the
2719*38fd1498Szrj arithmetic. */
2720*38fd1498Szrj if (CONST_INT_P (other))
2721*38fd1498Szrj new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
2722*38fd1498Szrj negate_rtx (GET_MODE (src), other));
2723*38fd1498Szrj else
2724*38fd1498Szrj new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
2725*38fd1498Szrj GET_MODE (src), dest, other);
2726*38fd1498Szrj
2727*38fd1498Szrj ninsn = emit_insn_after (gen_rtx_SET (dest, new_arith), insn);
2728*38fd1498Szrj
2729*38fd1498Szrj if (recog_memoized (ninsn) < 0
2730*38fd1498Szrj || (extract_insn (ninsn),
2731*38fd1498Szrj !constrain_operands (1, get_preferred_alternatives (ninsn))))
2732*38fd1498Szrj {
2733*38fd1498Szrj delete_related_insns (ninsn);
2734*38fd1498Szrj return;
2735*38fd1498Szrj }
2736*38fd1498Szrj
2737*38fd1498Szrj if (own_thread)
2738*38fd1498Szrj {
2739*38fd1498Szrj update_block (trial, thread);
2740*38fd1498Szrj if (trial == thread)
2741*38fd1498Szrj {
2742*38fd1498Szrj thread = next_active_insn (thread);
2743*38fd1498Szrj if (new_thread == trial)
2744*38fd1498Szrj new_thread = thread;
2745*38fd1498Szrj }
2746*38fd1498Szrj delete_related_insns (trial);
2747*38fd1498Szrj }
2748*38fd1498Szrj else
2749*38fd1498Szrj new_thread = next_active_insn (trial);
2750*38fd1498Szrj
2751*38fd1498Szrj ninsn = own_thread ? trial : copy_delay_slot_insn (trial);
2752*38fd1498Szrj if (thread_if_true)
2753*38fd1498Szrj INSN_FROM_TARGET_P (ninsn) = 1;
2754*38fd1498Szrj
2755*38fd1498Szrj add_to_delay_list (ninsn, delay_list);
2756*38fd1498Szrj (*pslots_filled)++;
2757*38fd1498Szrj }
2758*38fd1498Szrj }
2759*38fd1498Szrj
2760*38fd1498Szrj if (!delay_list->is_empty () && must_annul)
2761*38fd1498Szrj INSN_ANNULLED_BRANCH_P (insn) = 1;
2762*38fd1498Szrj
2763*38fd1498Szrj /* If we are to branch into the middle of this thread, find an appropriate
2764*38fd1498Szrj label or make a new one if none, and redirect INSN to it. If we hit the
2765*38fd1498Szrj end of the function, use the end-of-function label. */
2766*38fd1498Szrj if (new_thread != thread)
2767*38fd1498Szrj {
2768*38fd1498Szrj rtx label;
2769*38fd1498Szrj bool crossing = false;
2770*38fd1498Szrj
2771*38fd1498Szrj gcc_assert (thread_if_true);
2772*38fd1498Szrj
2773*38fd1498Szrj if (new_thread && simplejump_or_return_p (new_thread)
2774*38fd1498Szrj && redirect_with_delay_list_safe_p (insn,
2775*38fd1498Szrj JUMP_LABEL (new_thread),
2776*38fd1498Szrj *delay_list))
2777*38fd1498Szrj new_thread = follow_jumps (JUMP_LABEL (new_thread), insn,
2778*38fd1498Szrj &crossing);
2779*38fd1498Szrj
2780*38fd1498Szrj if (ANY_RETURN_P (new_thread))
2781*38fd1498Szrj label = find_end_label (new_thread);
2782*38fd1498Szrj else if (LABEL_P (new_thread))
2783*38fd1498Szrj label = new_thread;
2784*38fd1498Szrj else
2785*38fd1498Szrj label = get_label_before (as_a <rtx_insn *> (new_thread),
2786*38fd1498Szrj JUMP_LABEL (insn));
2787*38fd1498Szrj
2788*38fd1498Szrj if (label)
2789*38fd1498Szrj {
2790*38fd1498Szrj reorg_redirect_jump (insn, label);
2791*38fd1498Szrj if (crossing)
2792*38fd1498Szrj CROSSING_JUMP_P (insn) = 1;
2793*38fd1498Szrj }
2794*38fd1498Szrj }
2795*38fd1498Szrj }
2796*38fd1498Szrj
2797*38fd1498Szrj /* Make another attempt to find insns to place in delay slots.
2798*38fd1498Szrj
2799*38fd1498Szrj We previously looked for insns located in front of the delay insn
2800*38fd1498Szrj and, for non-jump delay insns, located behind the delay insn.
2801*38fd1498Szrj
2802*38fd1498Szrj Here only try to schedule jump insns and try to move insns from either
2803*38fd1498Szrj the target or the following insns into the delay slot. If annulling is
2804*38fd1498Szrj supported, we will be likely to do this. Otherwise, we can do this only
2805*38fd1498Szrj if safe. */
2806*38fd1498Szrj
2807*38fd1498Szrj static void
fill_eager_delay_slots(void)2808*38fd1498Szrj fill_eager_delay_slots (void)
2809*38fd1498Szrj {
2810*38fd1498Szrj rtx_insn *insn;
2811*38fd1498Szrj int i;
2812*38fd1498Szrj int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
2813*38fd1498Szrj
2814*38fd1498Szrj for (i = 0; i < num_unfilled_slots; i++)
2815*38fd1498Szrj {
2816*38fd1498Szrj rtx condition;
2817*38fd1498Szrj rtx target_label, insn_at_target;
2818*38fd1498Szrj rtx_insn *fallthrough_insn;
2819*38fd1498Szrj auto_vec<rtx_insn *, 5> delay_list;
2820*38fd1498Szrj rtx_jump_insn *jump_insn;
2821*38fd1498Szrj int own_target;
2822*38fd1498Szrj int own_fallthrough;
2823*38fd1498Szrj int prediction, slots_to_fill, slots_filled;
2824*38fd1498Szrj
2825*38fd1498Szrj insn = unfilled_slots_base[i];
2826*38fd1498Szrj if (insn == 0
2827*38fd1498Szrj || insn->deleted ()
2828*38fd1498Szrj || ! (jump_insn = dyn_cast <rtx_jump_insn *> (insn))
2829*38fd1498Szrj || ! (condjump_p (jump_insn) || condjump_in_parallel_p (jump_insn)))
2830*38fd1498Szrj continue;
2831*38fd1498Szrj
2832*38fd1498Szrj slots_to_fill = num_delay_slots (jump_insn);
2833*38fd1498Szrj /* Some machine description have defined instructions to have
2834*38fd1498Szrj delay slots only in certain circumstances which may depend on
2835*38fd1498Szrj nearby insns (which change due to reorg's actions).
2836*38fd1498Szrj
2837*38fd1498Szrj For example, the PA port normally has delay slots for unconditional
2838*38fd1498Szrj jumps.
2839*38fd1498Szrj
2840*38fd1498Szrj However, the PA port claims such jumps do not have a delay slot
2841*38fd1498Szrj if they are immediate successors of certain CALL_INSNs. This
2842*38fd1498Szrj allows the port to favor filling the delay slot of the call with
2843*38fd1498Szrj the unconditional jump. */
2844*38fd1498Szrj if (slots_to_fill == 0)
2845*38fd1498Szrj continue;
2846*38fd1498Szrj
2847*38fd1498Szrj slots_filled = 0;
2848*38fd1498Szrj target_label = JUMP_LABEL (jump_insn);
2849*38fd1498Szrj condition = get_branch_condition (jump_insn, target_label);
2850*38fd1498Szrj
2851*38fd1498Szrj if (condition == 0)
2852*38fd1498Szrj continue;
2853*38fd1498Szrj
2854*38fd1498Szrj /* Get the next active fallthrough and target insns and see if we own
2855*38fd1498Szrj them. Then see whether the branch is likely true. We don't need
2856*38fd1498Szrj to do a lot of this for unconditional branches. */
2857*38fd1498Szrj
2858*38fd1498Szrj insn_at_target = first_active_target_insn (target_label);
2859*38fd1498Szrj own_target = own_thread_p (target_label, target_label, 0);
2860*38fd1498Szrj
2861*38fd1498Szrj if (condition == const_true_rtx)
2862*38fd1498Szrj {
2863*38fd1498Szrj own_fallthrough = 0;
2864*38fd1498Szrj fallthrough_insn = 0;
2865*38fd1498Szrj prediction = 2;
2866*38fd1498Szrj }
2867*38fd1498Szrj else
2868*38fd1498Szrj {
2869*38fd1498Szrj fallthrough_insn = next_active_insn (jump_insn);
2870*38fd1498Szrj own_fallthrough = own_thread_p (NEXT_INSN (jump_insn), NULL_RTX, 1);
2871*38fd1498Szrj prediction = mostly_true_jump (jump_insn);
2872*38fd1498Szrj }
2873*38fd1498Szrj
2874*38fd1498Szrj /* If this insn is expected to branch, first try to get insns from our
2875*38fd1498Szrj target, then our fallthrough insns. If it is not expected to branch,
2876*38fd1498Szrj try the other order. */
2877*38fd1498Szrj
2878*38fd1498Szrj if (prediction > 0)
2879*38fd1498Szrj {
2880*38fd1498Szrj fill_slots_from_thread (jump_insn, condition, insn_at_target,
2881*38fd1498Szrj fallthrough_insn, prediction == 2, 1,
2882*38fd1498Szrj own_target,
2883*38fd1498Szrj slots_to_fill, &slots_filled, &delay_list);
2884*38fd1498Szrj
2885*38fd1498Szrj if (delay_list.is_empty () && own_fallthrough)
2886*38fd1498Szrj {
2887*38fd1498Szrj /* Even though we didn't find anything for delay slots,
2888*38fd1498Szrj we might have found a redundant insn which we deleted
2889*38fd1498Szrj from the thread that was filled. So we have to recompute
2890*38fd1498Szrj the next insn at the target. */
2891*38fd1498Szrj target_label = JUMP_LABEL (jump_insn);
2892*38fd1498Szrj insn_at_target = first_active_target_insn (target_label);
2893*38fd1498Szrj
2894*38fd1498Szrj fill_slots_from_thread (jump_insn, condition, fallthrough_insn,
2895*38fd1498Szrj insn_at_target, 0, 0, own_fallthrough,
2896*38fd1498Szrj slots_to_fill, &slots_filled,
2897*38fd1498Szrj &delay_list);
2898*38fd1498Szrj }
2899*38fd1498Szrj }
2900*38fd1498Szrj else
2901*38fd1498Szrj {
2902*38fd1498Szrj if (own_fallthrough)
2903*38fd1498Szrj fill_slots_from_thread (jump_insn, condition, fallthrough_insn,
2904*38fd1498Szrj insn_at_target, 0, 0, own_fallthrough,
2905*38fd1498Szrj slots_to_fill, &slots_filled, &delay_list);
2906*38fd1498Szrj
2907*38fd1498Szrj if (delay_list.is_empty ())
2908*38fd1498Szrj fill_slots_from_thread (jump_insn, condition, insn_at_target,
2909*38fd1498Szrj next_active_insn (insn), 0, 1, own_target,
2910*38fd1498Szrj slots_to_fill, &slots_filled, &delay_list);
2911*38fd1498Szrj }
2912*38fd1498Szrj
2913*38fd1498Szrj if (!delay_list.is_empty ())
2914*38fd1498Szrj unfilled_slots_base[i]
2915*38fd1498Szrj = emit_delay_sequence (jump_insn, delay_list, slots_filled);
2916*38fd1498Szrj
2917*38fd1498Szrj if (slots_to_fill == slots_filled)
2918*38fd1498Szrj unfilled_slots_base[i] = 0;
2919*38fd1498Szrj
2920*38fd1498Szrj note_delay_statistics (slots_filled, 1);
2921*38fd1498Szrj }
2922*38fd1498Szrj }
2923*38fd1498Szrj
2924*38fd1498Szrj static void delete_computation (rtx_insn *insn);
2925*38fd1498Szrj
2926*38fd1498Szrj /* Recursively delete prior insns that compute the value (used only by INSN
2927*38fd1498Szrj which the caller is deleting) stored in the register mentioned by NOTE
2928*38fd1498Szrj which is a REG_DEAD note associated with INSN. */
2929*38fd1498Szrj
2930*38fd1498Szrj static void
delete_prior_computation(rtx note,rtx_insn * insn)2931*38fd1498Szrj delete_prior_computation (rtx note, rtx_insn *insn)
2932*38fd1498Szrj {
2933*38fd1498Szrj rtx_insn *our_prev;
2934*38fd1498Szrj rtx reg = XEXP (note, 0);
2935*38fd1498Szrj
2936*38fd1498Szrj for (our_prev = prev_nonnote_insn (insn);
2937*38fd1498Szrj our_prev && (NONJUMP_INSN_P (our_prev)
2938*38fd1498Szrj || CALL_P (our_prev));
2939*38fd1498Szrj our_prev = prev_nonnote_insn (our_prev))
2940*38fd1498Szrj {
2941*38fd1498Szrj rtx pat = PATTERN (our_prev);
2942*38fd1498Szrj
2943*38fd1498Szrj /* If we reach a CALL which is not calling a const function
2944*38fd1498Szrj or the callee pops the arguments, then give up. */
2945*38fd1498Szrj if (CALL_P (our_prev)
2946*38fd1498Szrj && (! RTL_CONST_CALL_P (our_prev)
2947*38fd1498Szrj || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
2948*38fd1498Szrj break;
2949*38fd1498Szrj
2950*38fd1498Szrj /* If we reach a SEQUENCE, it is too complex to try to
2951*38fd1498Szrj do anything with it, so give up. We can be run during
2952*38fd1498Szrj and after reorg, so SEQUENCE rtl can legitimately show
2953*38fd1498Szrj up here. */
2954*38fd1498Szrj if (GET_CODE (pat) == SEQUENCE)
2955*38fd1498Szrj break;
2956*38fd1498Szrj
2957*38fd1498Szrj if (GET_CODE (pat) == USE
2958*38fd1498Szrj && NONJUMP_INSN_P (XEXP (pat, 0)))
2959*38fd1498Szrj /* reorg creates USEs that look like this. We leave them
2960*38fd1498Szrj alone because reorg needs them for its own purposes. */
2961*38fd1498Szrj break;
2962*38fd1498Szrj
2963*38fd1498Szrj if (reg_set_p (reg, pat))
2964*38fd1498Szrj {
2965*38fd1498Szrj if (side_effects_p (pat) && !CALL_P (our_prev))
2966*38fd1498Szrj break;
2967*38fd1498Szrj
2968*38fd1498Szrj if (GET_CODE (pat) == PARALLEL)
2969*38fd1498Szrj {
2970*38fd1498Szrj /* If we find a SET of something else, we can't
2971*38fd1498Szrj delete the insn. */
2972*38fd1498Szrj
2973*38fd1498Szrj int i;
2974*38fd1498Szrj
2975*38fd1498Szrj for (i = 0; i < XVECLEN (pat, 0); i++)
2976*38fd1498Szrj {
2977*38fd1498Szrj rtx part = XVECEXP (pat, 0, i);
2978*38fd1498Szrj
2979*38fd1498Szrj if (GET_CODE (part) == SET
2980*38fd1498Szrj && SET_DEST (part) != reg)
2981*38fd1498Szrj break;
2982*38fd1498Szrj }
2983*38fd1498Szrj
2984*38fd1498Szrj if (i == XVECLEN (pat, 0))
2985*38fd1498Szrj delete_computation (our_prev);
2986*38fd1498Szrj }
2987*38fd1498Szrj else if (GET_CODE (pat) == SET
2988*38fd1498Szrj && REG_P (SET_DEST (pat)))
2989*38fd1498Szrj {
2990*38fd1498Szrj int dest_regno = REGNO (SET_DEST (pat));
2991*38fd1498Szrj int dest_endregno = END_REGNO (SET_DEST (pat));
2992*38fd1498Szrj int regno = REGNO (reg);
2993*38fd1498Szrj int endregno = END_REGNO (reg);
2994*38fd1498Szrj
2995*38fd1498Szrj if (dest_regno >= regno
2996*38fd1498Szrj && dest_endregno <= endregno)
2997*38fd1498Szrj delete_computation (our_prev);
2998*38fd1498Szrj
2999*38fd1498Szrj /* We may have a multi-word hard register and some, but not
3000*38fd1498Szrj all, of the words of the register are needed in subsequent
3001*38fd1498Szrj insns. Write REG_UNUSED notes for those parts that were not
3002*38fd1498Szrj needed. */
3003*38fd1498Szrj else if (dest_regno <= regno
3004*38fd1498Szrj && dest_endregno >= endregno)
3005*38fd1498Szrj {
3006*38fd1498Szrj int i;
3007*38fd1498Szrj
3008*38fd1498Szrj add_reg_note (our_prev, REG_UNUSED, reg);
3009*38fd1498Szrj
3010*38fd1498Szrj for (i = dest_regno; i < dest_endregno; i++)
3011*38fd1498Szrj if (! find_regno_note (our_prev, REG_UNUSED, i))
3012*38fd1498Szrj break;
3013*38fd1498Szrj
3014*38fd1498Szrj if (i == dest_endregno)
3015*38fd1498Szrj delete_computation (our_prev);
3016*38fd1498Szrj }
3017*38fd1498Szrj }
3018*38fd1498Szrj
3019*38fd1498Szrj break;
3020*38fd1498Szrj }
3021*38fd1498Szrj
3022*38fd1498Szrj /* If PAT references the register that dies here, it is an
3023*38fd1498Szrj additional use. Hence any prior SET isn't dead. However, this
3024*38fd1498Szrj insn becomes the new place for the REG_DEAD note. */
3025*38fd1498Szrj if (reg_overlap_mentioned_p (reg, pat))
3026*38fd1498Szrj {
3027*38fd1498Szrj XEXP (note, 1) = REG_NOTES (our_prev);
3028*38fd1498Szrj REG_NOTES (our_prev) = note;
3029*38fd1498Szrj break;
3030*38fd1498Szrj }
3031*38fd1498Szrj }
3032*38fd1498Szrj }
3033*38fd1498Szrj
3034*38fd1498Szrj /* Delete INSN and recursively delete insns that compute values used only
3035*38fd1498Szrj by INSN. This uses the REG_DEAD notes computed during flow analysis.
3036*38fd1498Szrj
3037*38fd1498Szrj Look at all our REG_DEAD notes. If a previous insn does nothing other
3038*38fd1498Szrj than set a register that dies in this insn, we can delete that insn
3039*38fd1498Szrj as well.
3040*38fd1498Szrj
3041*38fd1498Szrj On machines with CC0, if CC0 is used in this insn, we may be able to
3042*38fd1498Szrj delete the insn that set it. */
3043*38fd1498Szrj
3044*38fd1498Szrj static void
delete_computation(rtx_insn * insn)3045*38fd1498Szrj delete_computation (rtx_insn *insn)
3046*38fd1498Szrj {
3047*38fd1498Szrj rtx note, next;
3048*38fd1498Szrj
3049*38fd1498Szrj if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (insn)))
3050*38fd1498Szrj {
3051*38fd1498Szrj rtx_insn *prev = prev_nonnote_insn (insn);
3052*38fd1498Szrj /* We assume that at this stage
3053*38fd1498Szrj CC's are always set explicitly
3054*38fd1498Szrj and always immediately before the jump that
3055*38fd1498Szrj will use them. So if the previous insn
3056*38fd1498Szrj exists to set the CC's, delete it
3057*38fd1498Szrj (unless it performs auto-increments, etc.). */
3058*38fd1498Szrj if (prev && NONJUMP_INSN_P (prev)
3059*38fd1498Szrj && sets_cc0_p (PATTERN (prev)))
3060*38fd1498Szrj {
3061*38fd1498Szrj if (sets_cc0_p (PATTERN (prev)) > 0
3062*38fd1498Szrj && ! side_effects_p (PATTERN (prev)))
3063*38fd1498Szrj delete_computation (prev);
3064*38fd1498Szrj else
3065*38fd1498Szrj /* Otherwise, show that cc0 won't be used. */
3066*38fd1498Szrj add_reg_note (prev, REG_UNUSED, cc0_rtx);
3067*38fd1498Szrj }
3068*38fd1498Szrj }
3069*38fd1498Szrj
3070*38fd1498Szrj for (note = REG_NOTES (insn); note; note = next)
3071*38fd1498Szrj {
3072*38fd1498Szrj next = XEXP (note, 1);
3073*38fd1498Szrj
3074*38fd1498Szrj if (REG_NOTE_KIND (note) != REG_DEAD
3075*38fd1498Szrj /* Verify that the REG_NOTE is legitimate. */
3076*38fd1498Szrj || !REG_P (XEXP (note, 0)))
3077*38fd1498Szrj continue;
3078*38fd1498Szrj
3079*38fd1498Szrj delete_prior_computation (note, insn);
3080*38fd1498Szrj }
3081*38fd1498Szrj
3082*38fd1498Szrj delete_related_insns (insn);
3083*38fd1498Szrj }
3084*38fd1498Szrj
3085*38fd1498Szrj /* If all INSN does is set the pc, delete it,
3086*38fd1498Szrj and delete the insn that set the condition codes for it
3087*38fd1498Szrj if that's what the previous thing was. */
3088*38fd1498Szrj
3089*38fd1498Szrj static void
delete_jump(rtx_insn * insn)3090*38fd1498Szrj delete_jump (rtx_insn *insn)
3091*38fd1498Szrj {
3092*38fd1498Szrj rtx set = single_set (insn);
3093*38fd1498Szrj
3094*38fd1498Szrj if (set && GET_CODE (SET_DEST (set)) == PC)
3095*38fd1498Szrj delete_computation (insn);
3096*38fd1498Szrj }
3097*38fd1498Szrj
3098*38fd1498Szrj static rtx_insn *
label_before_next_insn(rtx_insn * x,rtx scan_limit)3099*38fd1498Szrj label_before_next_insn (rtx_insn *x, rtx scan_limit)
3100*38fd1498Szrj {
3101*38fd1498Szrj rtx_insn *insn = next_active_insn (x);
3102*38fd1498Szrj while (insn)
3103*38fd1498Szrj {
3104*38fd1498Szrj insn = PREV_INSN (insn);
3105*38fd1498Szrj if (insn == scan_limit || insn == NULL_RTX)
3106*38fd1498Szrj return NULL;
3107*38fd1498Szrj if (LABEL_P (insn))
3108*38fd1498Szrj break;
3109*38fd1498Szrj }
3110*38fd1498Szrj return insn;
3111*38fd1498Szrj }
3112*38fd1498Szrj
3113*38fd1498Szrj /* Return TRUE if there is a NOTE_INSN_SWITCH_TEXT_SECTIONS note in between
3114*38fd1498Szrj BEG and END. */
3115*38fd1498Szrj
3116*38fd1498Szrj static bool
switch_text_sections_between_p(const rtx_insn * beg,const rtx_insn * end)3117*38fd1498Szrj switch_text_sections_between_p (const rtx_insn *beg, const rtx_insn *end)
3118*38fd1498Szrj {
3119*38fd1498Szrj const rtx_insn *p;
3120*38fd1498Szrj for (p = beg; p != end; p = NEXT_INSN (p))
3121*38fd1498Szrj if (NOTE_P (p) && NOTE_KIND (p) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
3122*38fd1498Szrj return true;
3123*38fd1498Szrj return false;
3124*38fd1498Szrj }
3125*38fd1498Szrj
3126*38fd1498Szrj
3127*38fd1498Szrj /* Once we have tried two ways to fill a delay slot, make a pass over the
3128*38fd1498Szrj code to try to improve the results and to do such things as more jump
3129*38fd1498Szrj threading. */
3130*38fd1498Szrj
3131*38fd1498Szrj static void
relax_delay_slots(rtx_insn * first)3132*38fd1498Szrj relax_delay_slots (rtx_insn *first)
3133*38fd1498Szrj {
3134*38fd1498Szrj rtx_insn *insn, *next;
3135*38fd1498Szrj rtx_sequence *pat;
3136*38fd1498Szrj rtx_insn *delay_insn;
3137*38fd1498Szrj rtx target_label;
3138*38fd1498Szrj
3139*38fd1498Szrj /* Look at every JUMP_INSN and see if we can improve it. */
3140*38fd1498Szrj for (insn = first; insn; insn = next)
3141*38fd1498Szrj {
3142*38fd1498Szrj rtx_insn *other, *prior_insn;
3143*38fd1498Szrj bool crossing;
3144*38fd1498Szrj
3145*38fd1498Szrj next = next_active_insn (insn);
3146*38fd1498Szrj
3147*38fd1498Szrj /* If this is a jump insn, see if it now jumps to a jump, jumps to
3148*38fd1498Szrj the next insn, or jumps to a label that is not the last of a
3149*38fd1498Szrj group of consecutive labels. */
3150*38fd1498Szrj if (is_a <rtx_jump_insn *> (insn)
3151*38fd1498Szrj && (condjump_p (insn) || condjump_in_parallel_p (insn))
3152*38fd1498Szrj && !ANY_RETURN_P (target_label = JUMP_LABEL (insn)))
3153*38fd1498Szrj {
3154*38fd1498Szrj rtx_jump_insn *jump_insn = as_a <rtx_jump_insn *> (insn);
3155*38fd1498Szrj target_label
3156*38fd1498Szrj = skip_consecutive_labels (follow_jumps (target_label, jump_insn,
3157*38fd1498Szrj &crossing));
3158*38fd1498Szrj if (ANY_RETURN_P (target_label))
3159*38fd1498Szrj target_label = find_end_label (target_label);
3160*38fd1498Szrj
3161*38fd1498Szrj if (target_label
3162*38fd1498Szrj && next_active_insn (as_a<rtx_insn *> (target_label)) == next
3163*38fd1498Szrj && ! condjump_in_parallel_p (jump_insn)
3164*38fd1498Szrj && ! (next && switch_text_sections_between_p (jump_insn, next)))
3165*38fd1498Szrj {
3166*38fd1498Szrj delete_jump (jump_insn);
3167*38fd1498Szrj continue;
3168*38fd1498Szrj }
3169*38fd1498Szrj
3170*38fd1498Szrj if (target_label && target_label != JUMP_LABEL (jump_insn))
3171*38fd1498Szrj {
3172*38fd1498Szrj reorg_redirect_jump (jump_insn, target_label);
3173*38fd1498Szrj if (crossing)
3174*38fd1498Szrj CROSSING_JUMP_P (jump_insn) = 1;
3175*38fd1498Szrj }
3176*38fd1498Szrj
3177*38fd1498Szrj /* See if this jump conditionally branches around an unconditional
3178*38fd1498Szrj jump. If so, invert this jump and point it to the target of the
3179*38fd1498Szrj second jump. Check if it's possible on the target. */
3180*38fd1498Szrj if (next && simplejump_or_return_p (next)
3181*38fd1498Szrj && any_condjump_p (jump_insn)
3182*38fd1498Szrj && target_label
3183*38fd1498Szrj && (next_active_insn (as_a<rtx_insn *> (target_label))
3184*38fd1498Szrj == next_active_insn (next))
3185*38fd1498Szrj && no_labels_between_p (jump_insn, next)
3186*38fd1498Szrj && targetm.can_follow_jump (jump_insn, next))
3187*38fd1498Szrj {
3188*38fd1498Szrj rtx label = JUMP_LABEL (next);
3189*38fd1498Szrj
3190*38fd1498Szrj /* Be careful how we do this to avoid deleting code or
3191*38fd1498Szrj labels that are momentarily dead. See similar optimization
3192*38fd1498Szrj in jump.c.
3193*38fd1498Szrj
3194*38fd1498Szrj We also need to ensure we properly handle the case when
3195*38fd1498Szrj invert_jump fails. */
3196*38fd1498Szrj
3197*38fd1498Szrj ++LABEL_NUSES (target_label);
3198*38fd1498Szrj if (!ANY_RETURN_P (label))
3199*38fd1498Szrj ++LABEL_NUSES (label);
3200*38fd1498Szrj
3201*38fd1498Szrj if (invert_jump (jump_insn, label, 1))
3202*38fd1498Szrj {
3203*38fd1498Szrj delete_related_insns (next);
3204*38fd1498Szrj next = jump_insn;
3205*38fd1498Szrj }
3206*38fd1498Szrj
3207*38fd1498Szrj if (!ANY_RETURN_P (label))
3208*38fd1498Szrj --LABEL_NUSES (label);
3209*38fd1498Szrj
3210*38fd1498Szrj if (--LABEL_NUSES (target_label) == 0)
3211*38fd1498Szrj delete_related_insns (target_label);
3212*38fd1498Szrj
3213*38fd1498Szrj continue;
3214*38fd1498Szrj }
3215*38fd1498Szrj }
3216*38fd1498Szrj
3217*38fd1498Szrj /* If this is an unconditional jump and the previous insn is a
3218*38fd1498Szrj conditional jump, try reversing the condition of the previous
3219*38fd1498Szrj insn and swapping our targets. The next pass might be able to
3220*38fd1498Szrj fill the slots.
3221*38fd1498Szrj
3222*38fd1498Szrj Don't do this if we expect the conditional branch to be true, because
3223*38fd1498Szrj we would then be making the more common case longer. */
3224*38fd1498Szrj
3225*38fd1498Szrj if (simplejump_or_return_p (insn)
3226*38fd1498Szrj && (other = prev_active_insn (insn)) != 0
3227*38fd1498Szrj && any_condjump_p (other)
3228*38fd1498Szrj && no_labels_between_p (other, insn)
3229*38fd1498Szrj && mostly_true_jump (other) < 0)
3230*38fd1498Szrj {
3231*38fd1498Szrj rtx other_target = JUMP_LABEL (other);
3232*38fd1498Szrj target_label = JUMP_LABEL (insn);
3233*38fd1498Szrj
3234*38fd1498Szrj if (invert_jump (as_a <rtx_jump_insn *> (other), target_label, 0))
3235*38fd1498Szrj reorg_redirect_jump (as_a <rtx_jump_insn *> (insn), other_target);
3236*38fd1498Szrj }
3237*38fd1498Szrj
3238*38fd1498Szrj /* Now look only at cases where we have a filled delay slot. */
3239*38fd1498Szrj if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE)
3240*38fd1498Szrj continue;
3241*38fd1498Szrj
3242*38fd1498Szrj pat = as_a <rtx_sequence *> (PATTERN (insn));
3243*38fd1498Szrj delay_insn = pat->insn (0);
3244*38fd1498Szrj
3245*38fd1498Szrj /* See if the first insn in the delay slot is redundant with some
3246*38fd1498Szrj previous insn. Remove it from the delay slot if so; then set up
3247*38fd1498Szrj to reprocess this insn. */
3248*38fd1498Szrj if ((prior_insn = redundant_insn (pat->insn (1), delay_insn, vNULL)))
3249*38fd1498Szrj {
3250*38fd1498Szrj fix_reg_dead_note (prior_insn, insn);
3251*38fd1498Szrj update_block (pat->insn (1), insn);
3252*38fd1498Szrj delete_from_delay_slot (pat->insn (1));
3253*38fd1498Szrj next = prev_active_insn (next);
3254*38fd1498Szrj continue;
3255*38fd1498Szrj }
3256*38fd1498Szrj
3257*38fd1498Szrj /* See if we have a RETURN insn with a filled delay slot followed
3258*38fd1498Szrj by a RETURN insn with an unfilled a delay slot. If so, we can delete
3259*38fd1498Szrj the first RETURN (but not its delay insn). This gives the same
3260*38fd1498Szrj effect in fewer instructions.
3261*38fd1498Szrj
3262*38fd1498Szrj Only do so if optimizing for size since this results in slower, but
3263*38fd1498Szrj smaller code. */
3264*38fd1498Szrj if (optimize_function_for_size_p (cfun)
3265*38fd1498Szrj && ANY_RETURN_P (PATTERN (delay_insn))
3266*38fd1498Szrj && next
3267*38fd1498Szrj && JUMP_P (next)
3268*38fd1498Szrj && PATTERN (next) == PATTERN (delay_insn))
3269*38fd1498Szrj {
3270*38fd1498Szrj rtx_insn *after;
3271*38fd1498Szrj int i;
3272*38fd1498Szrj
3273*38fd1498Szrj /* Delete the RETURN and just execute the delay list insns.
3274*38fd1498Szrj
3275*38fd1498Szrj We do this by deleting the INSN containing the SEQUENCE, then
3276*38fd1498Szrj re-emitting the insns separately, and then deleting the RETURN.
3277*38fd1498Szrj This allows the count of the jump target to be properly
3278*38fd1498Szrj decremented.
3279*38fd1498Szrj
3280*38fd1498Szrj Note that we need to change the INSN_UID of the re-emitted insns
3281*38fd1498Szrj since it is used to hash the insns for mark_target_live_regs and
3282*38fd1498Szrj the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3283*38fd1498Szrj
3284*38fd1498Szrj Clear the from target bit, since these insns are no longer
3285*38fd1498Szrj in delay slots. */
3286*38fd1498Szrj for (i = 0; i < XVECLEN (pat, 0); i++)
3287*38fd1498Szrj INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3288*38fd1498Szrj
3289*38fd1498Szrj rtx_insn *prev = PREV_INSN (insn);
3290*38fd1498Szrj delete_related_insns (insn);
3291*38fd1498Szrj gcc_assert (GET_CODE (pat) == SEQUENCE);
3292*38fd1498Szrj add_insn_after (delay_insn, prev, NULL);
3293*38fd1498Szrj after = delay_insn;
3294*38fd1498Szrj for (i = 1; i < pat->len (); i++)
3295*38fd1498Szrj after = emit_copy_of_insn_after (pat->insn (i), after);
3296*38fd1498Szrj delete_scheduled_jump (delay_insn);
3297*38fd1498Szrj continue;
3298*38fd1498Szrj }
3299*38fd1498Szrj
3300*38fd1498Szrj /* Now look only at the cases where we have a filled JUMP_INSN. */
3301*38fd1498Szrj rtx_jump_insn *delay_jump_insn =
3302*38fd1498Szrj dyn_cast <rtx_jump_insn *> (delay_insn);
3303*38fd1498Szrj if (! delay_jump_insn || !(condjump_p (delay_jump_insn)
3304*38fd1498Szrj || condjump_in_parallel_p (delay_jump_insn)))
3305*38fd1498Szrj continue;
3306*38fd1498Szrj
3307*38fd1498Szrj target_label = JUMP_LABEL (delay_jump_insn);
3308*38fd1498Szrj if (target_label && ANY_RETURN_P (target_label))
3309*38fd1498Szrj continue;
3310*38fd1498Szrj
3311*38fd1498Szrj /* If this jump goes to another unconditional jump, thread it, but
3312*38fd1498Szrj don't convert a jump into a RETURN here. */
3313*38fd1498Szrj rtx trial = skip_consecutive_labels (follow_jumps (target_label,
3314*38fd1498Szrj delay_jump_insn,
3315*38fd1498Szrj &crossing));
3316*38fd1498Szrj if (ANY_RETURN_P (trial))
3317*38fd1498Szrj trial = find_end_label (trial);
3318*38fd1498Szrj
3319*38fd1498Szrj if (trial && trial != target_label
3320*38fd1498Szrj && redirect_with_delay_slots_safe_p (delay_jump_insn, trial, insn))
3321*38fd1498Szrj {
3322*38fd1498Szrj reorg_redirect_jump (delay_jump_insn, trial);
3323*38fd1498Szrj target_label = trial;
3324*38fd1498Szrj if (crossing)
3325*38fd1498Szrj CROSSING_JUMP_P (delay_jump_insn) = 1;
3326*38fd1498Szrj }
3327*38fd1498Szrj
3328*38fd1498Szrj /* If the first insn at TARGET_LABEL is redundant with a previous
3329*38fd1498Szrj insn, redirect the jump to the following insn and process again.
3330*38fd1498Szrj We use next_real_nondebug_insn instead of next_active_insn so we
3331*38fd1498Szrj don't skip USE-markers, or we'll end up with incorrect
3332*38fd1498Szrj liveness info. */
3333*38fd1498Szrj trial = next_real_nondebug_insn (target_label);
3334*38fd1498Szrj if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
3335*38fd1498Szrj && redundant_insn (trial, insn, vNULL)
3336*38fd1498Szrj && ! can_throw_internal (trial))
3337*38fd1498Szrj {
3338*38fd1498Szrj /* Figure out where to emit the special USE insn so we don't
3339*38fd1498Szrj later incorrectly compute register live/death info. */
3340*38fd1498Szrj rtx_insn *tmp = next_active_insn (as_a<rtx_insn *> (trial));
3341*38fd1498Szrj if (tmp == 0)
3342*38fd1498Szrj tmp = find_end_label (simple_return_rtx);
3343*38fd1498Szrj
3344*38fd1498Szrj if (tmp)
3345*38fd1498Szrj {
3346*38fd1498Szrj /* Insert the special USE insn and update dataflow info.
3347*38fd1498Szrj We know "trial" is an insn here as it is the output of
3348*38fd1498Szrj next_real_nondebug_insn () above. */
3349*38fd1498Szrj update_block (as_a <rtx_insn *> (trial), tmp);
3350*38fd1498Szrj
3351*38fd1498Szrj /* Now emit a label before the special USE insn, and
3352*38fd1498Szrj redirect our jump to the new label. */
3353*38fd1498Szrj target_label = get_label_before (PREV_INSN (tmp), target_label);
3354*38fd1498Szrj reorg_redirect_jump (delay_jump_insn, target_label);
3355*38fd1498Szrj next = insn;
3356*38fd1498Szrj continue;
3357*38fd1498Szrj }
3358*38fd1498Szrj }
3359*38fd1498Szrj
3360*38fd1498Szrj /* Similarly, if it is an unconditional jump with one insn in its
3361*38fd1498Szrj delay list and that insn is redundant, thread the jump. */
3362*38fd1498Szrj rtx_sequence *trial_seq =
3363*38fd1498Szrj trial ? dyn_cast <rtx_sequence *> (PATTERN (trial)) : NULL;
3364*38fd1498Szrj if (trial_seq
3365*38fd1498Szrj && trial_seq->len () == 2
3366*38fd1498Szrj && JUMP_P (trial_seq->insn (0))
3367*38fd1498Szrj && simplejump_or_return_p (trial_seq->insn (0))
3368*38fd1498Szrj && redundant_insn (trial_seq->insn (1), insn, vNULL))
3369*38fd1498Szrj {
3370*38fd1498Szrj rtx temp_label = JUMP_LABEL (trial_seq->insn (0));
3371*38fd1498Szrj if (ANY_RETURN_P (temp_label))
3372*38fd1498Szrj temp_label = find_end_label (temp_label);
3373*38fd1498Szrj
3374*38fd1498Szrj if (temp_label
3375*38fd1498Szrj && redirect_with_delay_slots_safe_p (delay_jump_insn,
3376*38fd1498Szrj temp_label, insn))
3377*38fd1498Szrj {
3378*38fd1498Szrj update_block (trial_seq->insn (1), insn);
3379*38fd1498Szrj reorg_redirect_jump (delay_jump_insn, temp_label);
3380*38fd1498Szrj next = insn;
3381*38fd1498Szrj continue;
3382*38fd1498Szrj }
3383*38fd1498Szrj }
3384*38fd1498Szrj
3385*38fd1498Szrj /* See if we have a simple (conditional) jump that is useless. */
3386*38fd1498Szrj if (!CROSSING_JUMP_P (delay_jump_insn)
3387*38fd1498Szrj && !INSN_ANNULLED_BRANCH_P (delay_jump_insn)
3388*38fd1498Szrj && !condjump_in_parallel_p (delay_jump_insn)
3389*38fd1498Szrj && prev_active_insn (as_a<rtx_insn *> (target_label)) == insn
3390*38fd1498Szrj && !BARRIER_P (prev_nonnote_insn (as_a<rtx_insn *> (target_label)))
3391*38fd1498Szrj /* If the last insn in the delay slot sets CC0 for some insn,
3392*38fd1498Szrj various code assumes that it is in a delay slot. We could
3393*38fd1498Szrj put it back where it belonged and delete the register notes,
3394*38fd1498Szrj but it doesn't seem worthwhile in this uncommon case. */
3395*38fd1498Szrj && (!HAVE_cc0
3396*38fd1498Szrj || ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
3397*38fd1498Szrj REG_CC_USER, NULL_RTX)))
3398*38fd1498Szrj {
3399*38fd1498Szrj rtx_insn *after;
3400*38fd1498Szrj int i;
3401*38fd1498Szrj
3402*38fd1498Szrj /* All this insn does is execute its delay list and jump to the
3403*38fd1498Szrj following insn. So delete the jump and just execute the delay
3404*38fd1498Szrj list insns.
3405*38fd1498Szrj
3406*38fd1498Szrj We do this by deleting the INSN containing the SEQUENCE, then
3407*38fd1498Szrj re-emitting the insns separately, and then deleting the jump.
3408*38fd1498Szrj This allows the count of the jump target to be properly
3409*38fd1498Szrj decremented.
3410*38fd1498Szrj
3411*38fd1498Szrj Note that we need to change the INSN_UID of the re-emitted insns
3412*38fd1498Szrj since it is used to hash the insns for mark_target_live_regs and
3413*38fd1498Szrj the re-emitted insns will no longer be wrapped up in a SEQUENCE.
3414*38fd1498Szrj
3415*38fd1498Szrj Clear the from target bit, since these insns are no longer
3416*38fd1498Szrj in delay slots. */
3417*38fd1498Szrj for (i = 0; i < XVECLEN (pat, 0); i++)
3418*38fd1498Szrj INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
3419*38fd1498Szrj
3420*38fd1498Szrj rtx_insn *prev = PREV_INSN (insn);
3421*38fd1498Szrj delete_related_insns (insn);
3422*38fd1498Szrj gcc_assert (GET_CODE (pat) == SEQUENCE);
3423*38fd1498Szrj add_insn_after (delay_jump_insn, prev, NULL);
3424*38fd1498Szrj after = delay_jump_insn;
3425*38fd1498Szrj for (i = 1; i < pat->len (); i++)
3426*38fd1498Szrj after = emit_copy_of_insn_after (pat->insn (i), after);
3427*38fd1498Szrj delete_scheduled_jump (delay_jump_insn);
3428*38fd1498Szrj continue;
3429*38fd1498Szrj }
3430*38fd1498Szrj
3431*38fd1498Szrj /* See if this is an unconditional jump around a single insn which is
3432*38fd1498Szrj identical to the one in its delay slot. In this case, we can just
3433*38fd1498Szrj delete the branch and the insn in its delay slot. */
3434*38fd1498Szrj if (next && NONJUMP_INSN_P (next)
3435*38fd1498Szrj && label_before_next_insn (next, insn) == target_label
3436*38fd1498Szrj && simplejump_p (insn)
3437*38fd1498Szrj && XVECLEN (pat, 0) == 2
3438*38fd1498Szrj && rtx_equal_p (PATTERN (next), PATTERN (pat->insn (1))))
3439*38fd1498Szrj {
3440*38fd1498Szrj delete_related_insns (insn);
3441*38fd1498Szrj continue;
3442*38fd1498Szrj }
3443*38fd1498Szrj
3444*38fd1498Szrj /* See if this jump (with its delay slots) conditionally branches
3445*38fd1498Szrj around an unconditional jump (without delay slots). If so, invert
3446*38fd1498Szrj this jump and point it to the target of the second jump. We cannot
3447*38fd1498Szrj do this for annulled jumps, though. Again, don't convert a jump to
3448*38fd1498Szrj a RETURN here. */
3449*38fd1498Szrj if (! INSN_ANNULLED_BRANCH_P (delay_jump_insn)
3450*38fd1498Szrj && any_condjump_p (delay_jump_insn)
3451*38fd1498Szrj && next && simplejump_or_return_p (next)
3452*38fd1498Szrj && (next_active_insn (as_a<rtx_insn *> (target_label))
3453*38fd1498Szrj == next_active_insn (next))
3454*38fd1498Szrj && no_labels_between_p (insn, next))
3455*38fd1498Szrj {
3456*38fd1498Szrj rtx label = JUMP_LABEL (next);
3457*38fd1498Szrj rtx old_label = JUMP_LABEL (delay_jump_insn);
3458*38fd1498Szrj
3459*38fd1498Szrj if (ANY_RETURN_P (label))
3460*38fd1498Szrj label = find_end_label (label);
3461*38fd1498Szrj
3462*38fd1498Szrj /* find_end_label can generate a new label. Check this first. */
3463*38fd1498Szrj if (label
3464*38fd1498Szrj && no_labels_between_p (insn, next)
3465*38fd1498Szrj && redirect_with_delay_slots_safe_p (delay_jump_insn,
3466*38fd1498Szrj label, insn))
3467*38fd1498Szrj {
3468*38fd1498Szrj /* Be careful how we do this to avoid deleting code or labels
3469*38fd1498Szrj that are momentarily dead. See similar optimization in
3470*38fd1498Szrj jump.c */
3471*38fd1498Szrj if (old_label)
3472*38fd1498Szrj ++LABEL_NUSES (old_label);
3473*38fd1498Szrj
3474*38fd1498Szrj if (invert_jump (delay_jump_insn, label, 1))
3475*38fd1498Szrj {
3476*38fd1498Szrj int i;
3477*38fd1498Szrj
3478*38fd1498Szrj /* Must update the INSN_FROM_TARGET_P bits now that
3479*38fd1498Szrj the branch is reversed, so that mark_target_live_regs
3480*38fd1498Szrj will handle the delay slot insn correctly. */
3481*38fd1498Szrj for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
3482*38fd1498Szrj {
3483*38fd1498Szrj rtx slot = XVECEXP (PATTERN (insn), 0, i);
3484*38fd1498Szrj INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
3485*38fd1498Szrj }
3486*38fd1498Szrj
3487*38fd1498Szrj delete_related_insns (next);
3488*38fd1498Szrj next = insn;
3489*38fd1498Szrj }
3490*38fd1498Szrj
3491*38fd1498Szrj if (old_label && --LABEL_NUSES (old_label) == 0)
3492*38fd1498Szrj delete_related_insns (old_label);
3493*38fd1498Szrj continue;
3494*38fd1498Szrj }
3495*38fd1498Szrj }
3496*38fd1498Szrj
3497*38fd1498Szrj /* If we own the thread opposite the way this insn branches, see if we
3498*38fd1498Szrj can merge its delay slots with following insns. */
3499*38fd1498Szrj if (INSN_FROM_TARGET_P (pat->insn (1))
3500*38fd1498Szrj && own_thread_p (NEXT_INSN (insn), 0, 1))
3501*38fd1498Szrj try_merge_delay_insns (insn, next);
3502*38fd1498Szrj else if (! INSN_FROM_TARGET_P (pat->insn (1))
3503*38fd1498Szrj && own_thread_p (target_label, target_label, 0))
3504*38fd1498Szrj try_merge_delay_insns (insn,
3505*38fd1498Szrj next_active_insn (as_a<rtx_insn *> (target_label)));
3506*38fd1498Szrj
3507*38fd1498Szrj /* If we get here, we haven't deleted INSN. But we may have deleted
3508*38fd1498Szrj NEXT, so recompute it. */
3509*38fd1498Szrj next = next_active_insn (insn);
3510*38fd1498Szrj }
3511*38fd1498Szrj }
3512*38fd1498Szrj
3513*38fd1498Szrj
3514*38fd1498Szrj /* Look for filled jumps to the end of function label. We can try to convert
3515*38fd1498Szrj them into RETURN insns if the insns in the delay slot are valid for the
3516*38fd1498Szrj RETURN as well. */
3517*38fd1498Szrj
3518*38fd1498Szrj static void
make_return_insns(rtx_insn * first)3519*38fd1498Szrj make_return_insns (rtx_insn *first)
3520*38fd1498Szrj {
3521*38fd1498Szrj rtx_insn *insn;
3522*38fd1498Szrj rtx_jump_insn *jump_insn;
3523*38fd1498Szrj rtx real_return_label = function_return_label;
3524*38fd1498Szrj rtx real_simple_return_label = function_simple_return_label;
3525*38fd1498Szrj int slots, i;
3526*38fd1498Szrj
3527*38fd1498Szrj /* See if there is a RETURN insn in the function other than the one we
3528*38fd1498Szrj made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
3529*38fd1498Szrj into a RETURN to jump to it. */
3530*38fd1498Szrj for (insn = first; insn; insn = NEXT_INSN (insn))
3531*38fd1498Szrj if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
3532*38fd1498Szrj {
3533*38fd1498Szrj rtx t = get_label_before (insn, NULL_RTX);
3534*38fd1498Szrj if (PATTERN (insn) == ret_rtx)
3535*38fd1498Szrj real_return_label = t;
3536*38fd1498Szrj else
3537*38fd1498Szrj real_simple_return_label = t;
3538*38fd1498Szrj break;
3539*38fd1498Szrj }
3540*38fd1498Szrj
3541*38fd1498Szrj /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
3542*38fd1498Szrj was equal to END_OF_FUNCTION_LABEL. */
3543*38fd1498Szrj if (real_return_label)
3544*38fd1498Szrj LABEL_NUSES (real_return_label)++;
3545*38fd1498Szrj if (real_simple_return_label)
3546*38fd1498Szrj LABEL_NUSES (real_simple_return_label)++;
3547*38fd1498Szrj
3548*38fd1498Szrj /* Clear the list of insns to fill so we can use it. */
3549*38fd1498Szrj obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3550*38fd1498Szrj
3551*38fd1498Szrj for (insn = first; insn; insn = NEXT_INSN (insn))
3552*38fd1498Szrj {
3553*38fd1498Szrj int flags;
3554*38fd1498Szrj rtx kind, real_label;
3555*38fd1498Szrj
3556*38fd1498Szrj /* Only look at filled JUMP_INSNs that go to the end of function
3557*38fd1498Szrj label. */
3558*38fd1498Szrj if (!NONJUMP_INSN_P (insn))
3559*38fd1498Szrj continue;
3560*38fd1498Szrj
3561*38fd1498Szrj if (GET_CODE (PATTERN (insn)) != SEQUENCE)
3562*38fd1498Szrj continue;
3563*38fd1498Szrj
3564*38fd1498Szrj rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (insn));
3565*38fd1498Szrj
3566*38fd1498Szrj if (!jump_to_label_p (pat->insn (0)))
3567*38fd1498Szrj continue;
3568*38fd1498Szrj
3569*38fd1498Szrj if (JUMP_LABEL (pat->insn (0)) == function_return_label)
3570*38fd1498Szrj {
3571*38fd1498Szrj kind = ret_rtx;
3572*38fd1498Szrj real_label = real_return_label;
3573*38fd1498Szrj }
3574*38fd1498Szrj else if (JUMP_LABEL (pat->insn (0)) == function_simple_return_label)
3575*38fd1498Szrj {
3576*38fd1498Szrj kind = simple_return_rtx;
3577*38fd1498Szrj real_label = real_simple_return_label;
3578*38fd1498Szrj }
3579*38fd1498Szrj else
3580*38fd1498Szrj continue;
3581*38fd1498Szrj
3582*38fd1498Szrj jump_insn = as_a <rtx_jump_insn *> (pat->insn (0));
3583*38fd1498Szrj
3584*38fd1498Szrj /* If we can't make the jump into a RETURN, try to redirect it to the best
3585*38fd1498Szrj RETURN and go on to the next insn. */
3586*38fd1498Szrj if (!reorg_redirect_jump (jump_insn, kind))
3587*38fd1498Szrj {
3588*38fd1498Szrj /* Make sure redirecting the jump will not invalidate the delay
3589*38fd1498Szrj slot insns. */
3590*38fd1498Szrj if (redirect_with_delay_slots_safe_p (jump_insn, real_label, insn))
3591*38fd1498Szrj reorg_redirect_jump (jump_insn, real_label);
3592*38fd1498Szrj continue;
3593*38fd1498Szrj }
3594*38fd1498Szrj
3595*38fd1498Szrj /* See if this RETURN can accept the insns current in its delay slot.
3596*38fd1498Szrj It can if it has more or an equal number of slots and the contents
3597*38fd1498Szrj of each is valid. */
3598*38fd1498Szrj
3599*38fd1498Szrj flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
3600*38fd1498Szrj slots = num_delay_slots (jump_insn);
3601*38fd1498Szrj if (slots >= XVECLEN (pat, 0) - 1)
3602*38fd1498Szrj {
3603*38fd1498Szrj for (i = 1; i < XVECLEN (pat, 0); i++)
3604*38fd1498Szrj if (! (
3605*38fd1498Szrj #if ANNUL_IFFALSE_SLOTS
3606*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump_insn)
3607*38fd1498Szrj && INSN_FROM_TARGET_P (pat->insn (i)))
3608*38fd1498Szrj ? eligible_for_annul_false (jump_insn, i - 1,
3609*38fd1498Szrj pat->insn (i), flags) :
3610*38fd1498Szrj #endif
3611*38fd1498Szrj #if ANNUL_IFTRUE_SLOTS
3612*38fd1498Szrj (INSN_ANNULLED_BRANCH_P (jump_insn)
3613*38fd1498Szrj && ! INSN_FROM_TARGET_P (pat->insn (i)))
3614*38fd1498Szrj ? eligible_for_annul_true (jump_insn, i - 1,
3615*38fd1498Szrj pat->insn (i), flags) :
3616*38fd1498Szrj #endif
3617*38fd1498Szrj eligible_for_delay (jump_insn, i - 1,
3618*38fd1498Szrj pat->insn (i), flags)))
3619*38fd1498Szrj break;
3620*38fd1498Szrj }
3621*38fd1498Szrj else
3622*38fd1498Szrj i = 0;
3623*38fd1498Szrj
3624*38fd1498Szrj if (i == XVECLEN (pat, 0))
3625*38fd1498Szrj continue;
3626*38fd1498Szrj
3627*38fd1498Szrj /* We have to do something with this insn. If it is an unconditional
3628*38fd1498Szrj RETURN, delete the SEQUENCE and output the individual insns,
3629*38fd1498Szrj followed by the RETURN. Then set things up so we try to find
3630*38fd1498Szrj insns for its delay slots, if it needs some. */
3631*38fd1498Szrj if (ANY_RETURN_P (PATTERN (jump_insn)))
3632*38fd1498Szrj {
3633*38fd1498Szrj rtx_insn *prev = PREV_INSN (insn);
3634*38fd1498Szrj
3635*38fd1498Szrj delete_related_insns (insn);
3636*38fd1498Szrj for (i = 1; i < XVECLEN (pat, 0); i++)
3637*38fd1498Szrj {
3638*38fd1498Szrj rtx_insn *in_seq_insn = as_a<rtx_insn *> (XVECEXP (pat, 0, i));
3639*38fd1498Szrj prev = emit_insn_after_setloc (PATTERN (in_seq_insn), prev,
3640*38fd1498Szrj INSN_LOCATION (in_seq_insn));
3641*38fd1498Szrj }
3642*38fd1498Szrj
3643*38fd1498Szrj insn = emit_jump_insn_after_setloc (PATTERN (jump_insn), prev,
3644*38fd1498Szrj INSN_LOCATION (jump_insn));
3645*38fd1498Szrj emit_barrier_after (insn);
3646*38fd1498Szrj
3647*38fd1498Szrj if (slots)
3648*38fd1498Szrj obstack_ptr_grow (&unfilled_slots_obstack, insn);
3649*38fd1498Szrj }
3650*38fd1498Szrj else
3651*38fd1498Szrj /* It is probably more efficient to keep this with its current
3652*38fd1498Szrj delay slot as a branch to a RETURN. */
3653*38fd1498Szrj reorg_redirect_jump (jump_insn, real_label);
3654*38fd1498Szrj }
3655*38fd1498Szrj
3656*38fd1498Szrj /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
3657*38fd1498Szrj new delay slots we have created. */
3658*38fd1498Szrj if (real_return_label != NULL_RTX && --LABEL_NUSES (real_return_label) == 0)
3659*38fd1498Szrj delete_related_insns (real_return_label);
3660*38fd1498Szrj if (real_simple_return_label != NULL_RTX
3661*38fd1498Szrj && --LABEL_NUSES (real_simple_return_label) == 0)
3662*38fd1498Szrj delete_related_insns (real_simple_return_label);
3663*38fd1498Szrj
3664*38fd1498Szrj fill_simple_delay_slots (1);
3665*38fd1498Szrj fill_simple_delay_slots (0);
3666*38fd1498Szrj }
3667*38fd1498Szrj
3668*38fd1498Szrj /* Try to find insns to place in delay slots. */
3669*38fd1498Szrj
3670*38fd1498Szrj static void
dbr_schedule(rtx_insn * first)3671*38fd1498Szrj dbr_schedule (rtx_insn *first)
3672*38fd1498Szrj {
3673*38fd1498Szrj rtx_insn *insn, *next, *epilogue_insn = 0;
3674*38fd1498Szrj int i;
3675*38fd1498Szrj bool need_return_insns;
3676*38fd1498Szrj
3677*38fd1498Szrj /* If the current function has no insns other than the prologue and
3678*38fd1498Szrj epilogue, then do not try to fill any delay slots. */
3679*38fd1498Szrj if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3680*38fd1498Szrj return;
3681*38fd1498Szrj
3682*38fd1498Szrj /* Find the highest INSN_UID and allocate and initialize our map from
3683*38fd1498Szrj INSN_UID's to position in code. */
3684*38fd1498Szrj for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
3685*38fd1498Szrj {
3686*38fd1498Szrj if (INSN_UID (insn) > max_uid)
3687*38fd1498Szrj max_uid = INSN_UID (insn);
3688*38fd1498Szrj if (NOTE_P (insn)
3689*38fd1498Szrj && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
3690*38fd1498Szrj epilogue_insn = insn;
3691*38fd1498Szrj }
3692*38fd1498Szrj
3693*38fd1498Szrj uid_to_ruid = XNEWVEC (int, max_uid + 1);
3694*38fd1498Szrj for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
3695*38fd1498Szrj uid_to_ruid[INSN_UID (insn)] = i;
3696*38fd1498Szrj
3697*38fd1498Szrj /* Initialize the list of insns that need filling. */
3698*38fd1498Szrj if (unfilled_firstobj == 0)
3699*38fd1498Szrj {
3700*38fd1498Szrj gcc_obstack_init (&unfilled_slots_obstack);
3701*38fd1498Szrj unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3702*38fd1498Szrj }
3703*38fd1498Szrj
3704*38fd1498Szrj for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
3705*38fd1498Szrj {
3706*38fd1498Szrj rtx target;
3707*38fd1498Szrj
3708*38fd1498Szrj /* Skip vector tables. We can't get attributes for them. */
3709*38fd1498Szrj if (JUMP_TABLE_DATA_P (insn))
3710*38fd1498Szrj continue;
3711*38fd1498Szrj
3712*38fd1498Szrj if (JUMP_P (insn))
3713*38fd1498Szrj INSN_ANNULLED_BRANCH_P (insn) = 0;
3714*38fd1498Szrj INSN_FROM_TARGET_P (insn) = 0;
3715*38fd1498Szrj
3716*38fd1498Szrj if (num_delay_slots (insn) > 0)
3717*38fd1498Szrj obstack_ptr_grow (&unfilled_slots_obstack, insn);
3718*38fd1498Szrj
3719*38fd1498Szrj /* Ensure all jumps go to the last of a set of consecutive labels. */
3720*38fd1498Szrj if (JUMP_P (insn)
3721*38fd1498Szrj && (condjump_p (insn) || condjump_in_parallel_p (insn))
3722*38fd1498Szrj && !ANY_RETURN_P (JUMP_LABEL (insn))
3723*38fd1498Szrj && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
3724*38fd1498Szrj != JUMP_LABEL (insn)))
3725*38fd1498Szrj redirect_jump (as_a <rtx_jump_insn *> (insn), target, 1);
3726*38fd1498Szrj }
3727*38fd1498Szrj
3728*38fd1498Szrj init_resource_info (epilogue_insn);
3729*38fd1498Szrj
3730*38fd1498Szrj /* Show we haven't computed an end-of-function label yet. */
3731*38fd1498Szrj function_return_label = function_simple_return_label = NULL;
3732*38fd1498Szrj
3733*38fd1498Szrj /* Initialize the statistics for this function. */
3734*38fd1498Szrj memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
3735*38fd1498Szrj memset (num_filled_delays, 0, sizeof num_filled_delays);
3736*38fd1498Szrj
3737*38fd1498Szrj /* Now do the delay slot filling. Try everything twice in case earlier
3738*38fd1498Szrj changes make more slots fillable. */
3739*38fd1498Szrj
3740*38fd1498Szrj for (reorg_pass_number = 0;
3741*38fd1498Szrj reorg_pass_number < MAX_REORG_PASSES;
3742*38fd1498Szrj reorg_pass_number++)
3743*38fd1498Szrj {
3744*38fd1498Szrj fill_simple_delay_slots (1);
3745*38fd1498Szrj fill_simple_delay_slots (0);
3746*38fd1498Szrj if (!targetm.no_speculation_in_delay_slots_p ())
3747*38fd1498Szrj fill_eager_delay_slots ();
3748*38fd1498Szrj relax_delay_slots (first);
3749*38fd1498Szrj }
3750*38fd1498Szrj
3751*38fd1498Szrj /* If we made an end of function label, indicate that it is now
3752*38fd1498Szrj safe to delete it by undoing our prior adjustment to LABEL_NUSES.
3753*38fd1498Szrj If it is now unused, delete it. */
3754*38fd1498Szrj if (function_return_label && --LABEL_NUSES (function_return_label) == 0)
3755*38fd1498Szrj delete_related_insns (function_return_label);
3756*38fd1498Szrj if (function_simple_return_label
3757*38fd1498Szrj && --LABEL_NUSES (function_simple_return_label) == 0)
3758*38fd1498Szrj delete_related_insns (function_simple_return_label);
3759*38fd1498Szrj
3760*38fd1498Szrj need_return_insns = false;
3761*38fd1498Szrj need_return_insns |= targetm.have_return () && function_return_label != 0;
3762*38fd1498Szrj need_return_insns |= (targetm.have_simple_return ()
3763*38fd1498Szrj && function_simple_return_label != 0);
3764*38fd1498Szrj if (need_return_insns)
3765*38fd1498Szrj make_return_insns (first);
3766*38fd1498Szrj
3767*38fd1498Szrj /* Delete any USE insns made by update_block; subsequent passes don't need
3768*38fd1498Szrj them or know how to deal with them. */
3769*38fd1498Szrj for (insn = first; insn; insn = next)
3770*38fd1498Szrj {
3771*38fd1498Szrj next = NEXT_INSN (insn);
3772*38fd1498Szrj
3773*38fd1498Szrj if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
3774*38fd1498Szrj && INSN_P (XEXP (PATTERN (insn), 0)))
3775*38fd1498Szrj next = delete_related_insns (insn);
3776*38fd1498Szrj }
3777*38fd1498Szrj
3778*38fd1498Szrj obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
3779*38fd1498Szrj
3780*38fd1498Szrj /* It is not clear why the line below is needed, but it does seem to be. */
3781*38fd1498Szrj unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
3782*38fd1498Szrj
3783*38fd1498Szrj if (dump_file)
3784*38fd1498Szrj {
3785*38fd1498Szrj int i, j, need_comma;
3786*38fd1498Szrj int total_delay_slots[MAX_DELAY_HISTOGRAM + 1];
3787*38fd1498Szrj int total_annul_slots[MAX_DELAY_HISTOGRAM + 1];
3788*38fd1498Szrj
3789*38fd1498Szrj for (reorg_pass_number = 0;
3790*38fd1498Szrj reorg_pass_number < MAX_REORG_PASSES;
3791*38fd1498Szrj reorg_pass_number++)
3792*38fd1498Szrj {
3793*38fd1498Szrj fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
3794*38fd1498Szrj for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
3795*38fd1498Szrj {
3796*38fd1498Szrj need_comma = 0;
3797*38fd1498Szrj fprintf (dump_file, ";; Reorg function #%d\n", i);
3798*38fd1498Szrj
3799*38fd1498Szrj fprintf (dump_file, ";; %d insns needing delay slots\n;; ",
3800*38fd1498Szrj num_insns_needing_delays[i][reorg_pass_number]);
3801*38fd1498Szrj
3802*38fd1498Szrj for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3803*38fd1498Szrj if (num_filled_delays[i][j][reorg_pass_number])
3804*38fd1498Szrj {
3805*38fd1498Szrj if (need_comma)
3806*38fd1498Szrj fprintf (dump_file, ", ");
3807*38fd1498Szrj need_comma = 1;
3808*38fd1498Szrj fprintf (dump_file, "%d got %d delays",
3809*38fd1498Szrj num_filled_delays[i][j][reorg_pass_number], j);
3810*38fd1498Szrj }
3811*38fd1498Szrj fprintf (dump_file, "\n");
3812*38fd1498Szrj }
3813*38fd1498Szrj }
3814*38fd1498Szrj memset (total_delay_slots, 0, sizeof total_delay_slots);
3815*38fd1498Szrj memset (total_annul_slots, 0, sizeof total_annul_slots);
3816*38fd1498Szrj for (insn = first; insn; insn = NEXT_INSN (insn))
3817*38fd1498Szrj {
3818*38fd1498Szrj if (! insn->deleted ()
3819*38fd1498Szrj && NONJUMP_INSN_P (insn)
3820*38fd1498Szrj && GET_CODE (PATTERN (insn)) != USE
3821*38fd1498Szrj && GET_CODE (PATTERN (insn)) != CLOBBER)
3822*38fd1498Szrj {
3823*38fd1498Szrj if (GET_CODE (PATTERN (insn)) == SEQUENCE)
3824*38fd1498Szrj {
3825*38fd1498Szrj rtx control;
3826*38fd1498Szrj j = XVECLEN (PATTERN (insn), 0) - 1;
3827*38fd1498Szrj if (j > MAX_DELAY_HISTOGRAM)
3828*38fd1498Szrj j = MAX_DELAY_HISTOGRAM;
3829*38fd1498Szrj control = XVECEXP (PATTERN (insn), 0, 0);
3830*38fd1498Szrj if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
3831*38fd1498Szrj total_annul_slots[j]++;
3832*38fd1498Szrj else
3833*38fd1498Szrj total_delay_slots[j]++;
3834*38fd1498Szrj }
3835*38fd1498Szrj else if (num_delay_slots (insn) > 0)
3836*38fd1498Szrj total_delay_slots[0]++;
3837*38fd1498Szrj }
3838*38fd1498Szrj }
3839*38fd1498Szrj fprintf (dump_file, ";; Reorg totals: ");
3840*38fd1498Szrj need_comma = 0;
3841*38fd1498Szrj for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3842*38fd1498Szrj {
3843*38fd1498Szrj if (total_delay_slots[j])
3844*38fd1498Szrj {
3845*38fd1498Szrj if (need_comma)
3846*38fd1498Szrj fprintf (dump_file, ", ");
3847*38fd1498Szrj need_comma = 1;
3848*38fd1498Szrj fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j);
3849*38fd1498Szrj }
3850*38fd1498Szrj }
3851*38fd1498Szrj fprintf (dump_file, "\n");
3852*38fd1498Szrj
3853*38fd1498Szrj if (ANNUL_IFTRUE_SLOTS || ANNUL_IFFALSE_SLOTS)
3854*38fd1498Szrj {
3855*38fd1498Szrj fprintf (dump_file, ";; Reorg annuls: ");
3856*38fd1498Szrj need_comma = 0;
3857*38fd1498Szrj for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
3858*38fd1498Szrj {
3859*38fd1498Szrj if (total_annul_slots[j])
3860*38fd1498Szrj {
3861*38fd1498Szrj if (need_comma)
3862*38fd1498Szrj fprintf (dump_file, ", ");
3863*38fd1498Szrj need_comma = 1;
3864*38fd1498Szrj fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j);
3865*38fd1498Szrj }
3866*38fd1498Szrj }
3867*38fd1498Szrj fprintf (dump_file, "\n");
3868*38fd1498Szrj }
3869*38fd1498Szrj
3870*38fd1498Szrj fprintf (dump_file, "\n");
3871*38fd1498Szrj }
3872*38fd1498Szrj
3873*38fd1498Szrj if (!sibling_labels.is_empty ())
3874*38fd1498Szrj {
3875*38fd1498Szrj update_alignments (sibling_labels);
3876*38fd1498Szrj sibling_labels.release ();
3877*38fd1498Szrj }
3878*38fd1498Szrj
3879*38fd1498Szrj free_resource_info ();
3880*38fd1498Szrj free (uid_to_ruid);
3881*38fd1498Szrj crtl->dbr_scheduled_p = true;
3882*38fd1498Szrj }
3883*38fd1498Szrj
3884*38fd1498Szrj /* Run delay slot optimization. */
3885*38fd1498Szrj static unsigned int
rest_of_handle_delay_slots(void)3886*38fd1498Szrj rest_of_handle_delay_slots (void)
3887*38fd1498Szrj {
3888*38fd1498Szrj if (DELAY_SLOTS)
3889*38fd1498Szrj dbr_schedule (get_insns ());
3890*38fd1498Szrj
3891*38fd1498Szrj return 0;
3892*38fd1498Szrj }
3893*38fd1498Szrj
3894*38fd1498Szrj namespace {
3895*38fd1498Szrj
3896*38fd1498Szrj const pass_data pass_data_delay_slots =
3897*38fd1498Szrj {
3898*38fd1498Szrj RTL_PASS, /* type */
3899*38fd1498Szrj "dbr", /* name */
3900*38fd1498Szrj OPTGROUP_NONE, /* optinfo_flags */
3901*38fd1498Szrj TV_DBR_SCHED, /* tv_id */
3902*38fd1498Szrj 0, /* properties_required */
3903*38fd1498Szrj 0, /* properties_provided */
3904*38fd1498Szrj 0, /* properties_destroyed */
3905*38fd1498Szrj 0, /* todo_flags_start */
3906*38fd1498Szrj 0, /* todo_flags_finish */
3907*38fd1498Szrj };
3908*38fd1498Szrj
3909*38fd1498Szrj class pass_delay_slots : public rtl_opt_pass
3910*38fd1498Szrj {
3911*38fd1498Szrj public:
pass_delay_slots(gcc::context * ctxt)3912*38fd1498Szrj pass_delay_slots (gcc::context *ctxt)
3913*38fd1498Szrj : rtl_opt_pass (pass_data_delay_slots, ctxt)
3914*38fd1498Szrj {}
3915*38fd1498Szrj
3916*38fd1498Szrj /* opt_pass methods: */
3917*38fd1498Szrj virtual bool gate (function *);
execute(function *)3918*38fd1498Szrj virtual unsigned int execute (function *)
3919*38fd1498Szrj {
3920*38fd1498Szrj return rest_of_handle_delay_slots ();
3921*38fd1498Szrj }
3922*38fd1498Szrj
3923*38fd1498Szrj }; // class pass_delay_slots
3924*38fd1498Szrj
3925*38fd1498Szrj bool
gate(function *)3926*38fd1498Szrj pass_delay_slots::gate (function *)
3927*38fd1498Szrj {
3928*38fd1498Szrj /* At -O0 dataflow info isn't updated after RA. */
3929*38fd1498Szrj if (DELAY_SLOTS)
3930*38fd1498Szrj return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
3931*38fd1498Szrj
3932*38fd1498Szrj return false;
3933*38fd1498Szrj }
3934*38fd1498Szrj
3935*38fd1498Szrj } // anon namespace
3936*38fd1498Szrj
3937*38fd1498Szrj rtl_opt_pass *
make_pass_delay_slots(gcc::context * ctxt)3938*38fd1498Szrj make_pass_delay_slots (gcc::context *ctxt)
3939*38fd1498Szrj {
3940*38fd1498Szrj return new pass_delay_slots (ctxt);
3941*38fd1498Szrj }
3942*38fd1498Szrj
3943*38fd1498Szrj /* Machine dependent reorg pass. */
3944*38fd1498Szrj
3945*38fd1498Szrj namespace {
3946*38fd1498Szrj
3947*38fd1498Szrj const pass_data pass_data_machine_reorg =
3948*38fd1498Szrj {
3949*38fd1498Szrj RTL_PASS, /* type */
3950*38fd1498Szrj "mach", /* name */
3951*38fd1498Szrj OPTGROUP_NONE, /* optinfo_flags */
3952*38fd1498Szrj TV_MACH_DEP, /* tv_id */
3953*38fd1498Szrj 0, /* properties_required */
3954*38fd1498Szrj 0, /* properties_provided */
3955*38fd1498Szrj 0, /* properties_destroyed */
3956*38fd1498Szrj 0, /* todo_flags_start */
3957*38fd1498Szrj 0, /* todo_flags_finish */
3958*38fd1498Szrj };
3959*38fd1498Szrj
3960*38fd1498Szrj class pass_machine_reorg : public rtl_opt_pass
3961*38fd1498Szrj {
3962*38fd1498Szrj public:
pass_machine_reorg(gcc::context * ctxt)3963*38fd1498Szrj pass_machine_reorg (gcc::context *ctxt)
3964*38fd1498Szrj : rtl_opt_pass (pass_data_machine_reorg, ctxt)
3965*38fd1498Szrj {}
3966*38fd1498Szrj
3967*38fd1498Szrj /* opt_pass methods: */
gate(function *)3968*38fd1498Szrj virtual bool gate (function *)
3969*38fd1498Szrj {
3970*38fd1498Szrj return targetm.machine_dependent_reorg != 0;
3971*38fd1498Szrj }
3972*38fd1498Szrj
execute(function *)3973*38fd1498Szrj virtual unsigned int execute (function *)
3974*38fd1498Szrj {
3975*38fd1498Szrj targetm.machine_dependent_reorg ();
3976*38fd1498Szrj return 0;
3977*38fd1498Szrj }
3978*38fd1498Szrj
3979*38fd1498Szrj }; // class pass_machine_reorg
3980*38fd1498Szrj
3981*38fd1498Szrj } // anon namespace
3982*38fd1498Szrj
3983*38fd1498Szrj rtl_opt_pass *
make_pass_machine_reorg(gcc::context * ctxt)3984*38fd1498Szrj make_pass_machine_reorg (gcc::context *ctxt)
3985*38fd1498Szrj {
3986*38fd1498Szrj return new pass_machine_reorg (ctxt);
3987*38fd1498Szrj }
3988