xref: /dflybsd-src/contrib/gcc-8.0/gcc/gimple-ssa-split-paths.c (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj /* Support routines for Splitting Paths to loop backedges
2*38fd1498Szrj    Copyright (C) 2015-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Contributed by Ajit Kumar Agarwal <ajitkum@xilinx.com>.
4*38fd1498Szrj 
5*38fd1498Szrj  This file is part of GCC.
6*38fd1498Szrj 
7*38fd1498Szrj  GCC is free software; you can redistribute it and/or modify
8*38fd1498Szrj  it under the terms of the GNU General Public License as published by
9*38fd1498Szrj  the Free Software Foundation; either version 3, or (at your option)
10*38fd1498Szrj  any later version.
11*38fd1498Szrj 
12*38fd1498Szrj GCC is distributed in the hope that it will be useful,
13*38fd1498Szrj but WITHOUT ANY WARRANTY; without even the implied warranty of
14*38fd1498Szrj MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15*38fd1498Szrj GNU General Public License for more details.
16*38fd1498Szrj 
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
20*38fd1498Szrj 
21*38fd1498Szrj #include "config.h"
22*38fd1498Szrj #include "system.h"
23*38fd1498Szrj #include "coretypes.h"
24*38fd1498Szrj #include "backend.h"
25*38fd1498Szrj #include "tree.h"
26*38fd1498Szrj #include "gimple.h"
27*38fd1498Szrj #include "tree-pass.h"
28*38fd1498Szrj #include "tree-cfg.h"
29*38fd1498Szrj #include "cfganal.h"
30*38fd1498Szrj #include "cfgloop.h"
31*38fd1498Szrj #include "gimple-iterator.h"
32*38fd1498Szrj #include "tracer.h"
33*38fd1498Szrj #include "predict.h"
34*38fd1498Szrj #include "params.h"
35*38fd1498Szrj #include "gimple-ssa.h"
36*38fd1498Szrj #include "tree-phinodes.h"
37*38fd1498Szrj #include "ssa-iterators.h"
38*38fd1498Szrj 
39*38fd1498Szrj /* Given LATCH, the latch block in a loop, see if the shape of the
40*38fd1498Szrj    path reaching LATCH is suitable for being split by duplication.
41*38fd1498Szrj    If so, return the block that will be duplicated into its predecessor
42*38fd1498Szrj    paths.  Else return NULL.  */
43*38fd1498Szrj 
44*38fd1498Szrj static basic_block
find_block_to_duplicate_for_splitting_paths(basic_block latch)45*38fd1498Szrj find_block_to_duplicate_for_splitting_paths (basic_block latch)
46*38fd1498Szrj {
47*38fd1498Szrj   /* We should have simple latches at this point.  So the latch should
48*38fd1498Szrj      have a single successor.  This implies the predecessor of the latch
49*38fd1498Szrj      likely has the loop exit.  And it's that predecessor we're most
50*38fd1498Szrj      interested in. To keep things simple, we're going to require that
51*38fd1498Szrj      the latch have a single predecessor too.  */
52*38fd1498Szrj   if (single_succ_p (latch) && single_pred_p (latch))
53*38fd1498Szrj     {
54*38fd1498Szrj       basic_block bb = get_immediate_dominator (CDI_DOMINATORS, latch);
55*38fd1498Szrj       gcc_assert (single_pred_edge (latch)->src == bb);
56*38fd1498Szrj 
57*38fd1498Szrj       /* If BB has been marked as not to be duplicated, then honor that
58*38fd1498Szrj 	 request.  */
59*38fd1498Szrj       if (ignore_bb_p (bb))
60*38fd1498Szrj 	return NULL;
61*38fd1498Szrj 
62*38fd1498Szrj       gimple *last = gsi_stmt (gsi_last_nondebug_bb (bb));
63*38fd1498Szrj       /* The immediate dominator of the latch must end in a conditional.  */
64*38fd1498Szrj       if (!last || gimple_code (last) != GIMPLE_COND)
65*38fd1498Szrj 	return NULL;
66*38fd1498Szrj 
67*38fd1498Szrj       /* We're hoping that BB is a join point for an IF-THEN-ELSE diamond
68*38fd1498Szrj 	 region.  Verify that it is.
69*38fd1498Szrj 
70*38fd1498Szrj 	 First, verify that BB has two predecessors (each arm of the
71*38fd1498Szrj 	 IF-THEN-ELSE) and two successors (the latch and exit).  */
72*38fd1498Szrj       if (EDGE_COUNT (bb->preds) == 2 && EDGE_COUNT (bb->succs) == 2)
73*38fd1498Szrj 	{
74*38fd1498Szrj 	  /* Now verify that BB's immediate dominator ends in a
75*38fd1498Szrj 	     conditional as well.  */
76*38fd1498Szrj 	  basic_block bb_idom = get_immediate_dominator (CDI_DOMINATORS, bb);
77*38fd1498Szrj 	  gimple *last = gsi_stmt (gsi_last_nondebug_bb (bb_idom));
78*38fd1498Szrj 	  if (!last || gimple_code (last) != GIMPLE_COND)
79*38fd1498Szrj 	    return NULL;
80*38fd1498Szrj 
81*38fd1498Szrj 	  /* And that BB's immediate dominator's successors are the
82*38fd1498Szrj 	     predecessors of BB or BB itself.  */
83*38fd1498Szrj 	  if (!(EDGE_PRED (bb, 0)->src == bb_idom
84*38fd1498Szrj 		|| find_edge (bb_idom, EDGE_PRED (bb, 0)->src))
85*38fd1498Szrj 	      || !(EDGE_PRED (bb, 1)->src == bb_idom
86*38fd1498Szrj 		   || find_edge (bb_idom, EDGE_PRED (bb, 1)->src)))
87*38fd1498Szrj 	    return NULL;
88*38fd1498Szrj 
89*38fd1498Szrj 	  /* And that the predecessors of BB each have a single successor
90*38fd1498Szrj 	     or are BB's immediate domiator itself.  */
91*38fd1498Szrj 	  if (!(EDGE_PRED (bb, 0)->src == bb_idom
92*38fd1498Szrj 		|| single_succ_p (EDGE_PRED (bb, 0)->src))
93*38fd1498Szrj 	      || !(EDGE_PRED (bb, 1)->src == bb_idom
94*38fd1498Szrj 		   || single_succ_p (EDGE_PRED (bb, 1)->src)))
95*38fd1498Szrj 	    return NULL;
96*38fd1498Szrj 
97*38fd1498Szrj 	  /* So at this point we have a simple diamond for an IF-THEN-ELSE
98*38fd1498Szrj 	     construct starting at BB_IDOM, with a join point at BB.  BB
99*38fd1498Szrj 	     pass control outside the loop or to the loop latch.
100*38fd1498Szrj 
101*38fd1498Szrj 	     We're going to want to create two duplicates of BB, one for
102*38fd1498Szrj 	     each successor of BB_IDOM.  */
103*38fd1498Szrj 	  return bb;
104*38fd1498Szrj 	}
105*38fd1498Szrj     }
106*38fd1498Szrj   return NULL;
107*38fd1498Szrj }
108*38fd1498Szrj 
109*38fd1498Szrj /* Return the number of non-debug statements in a block.  */
110*38fd1498Szrj static unsigned int
count_stmts_in_block(basic_block bb)111*38fd1498Szrj count_stmts_in_block (basic_block bb)
112*38fd1498Szrj {
113*38fd1498Szrj   gimple_stmt_iterator gsi;
114*38fd1498Szrj   unsigned int num_stmts = 0;
115*38fd1498Szrj 
116*38fd1498Szrj   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
117*38fd1498Szrj     {
118*38fd1498Szrj       gimple *stmt = gsi_stmt (gsi);
119*38fd1498Szrj       if (!is_gimple_debug (stmt))
120*38fd1498Szrj 	num_stmts++;
121*38fd1498Szrj     }
122*38fd1498Szrj   return num_stmts;
123*38fd1498Szrj }
124*38fd1498Szrj 
125*38fd1498Szrj /* Return TRUE if CODE represents a tree code that is not likely to
126*38fd1498Szrj    be easily if-convertable because it likely expands into multiple
127*38fd1498Szrj    insns, FALSE otherwise.  */
128*38fd1498Szrj static bool
poor_ifcvt_candidate_code(enum tree_code code)129*38fd1498Szrj poor_ifcvt_candidate_code (enum tree_code code)
130*38fd1498Szrj {
131*38fd1498Szrj   return (code == MIN_EXPR
132*38fd1498Szrj 	  || code == MAX_EXPR
133*38fd1498Szrj 	  || code == ABS_EXPR
134*38fd1498Szrj 	  || code == COND_EXPR
135*38fd1498Szrj 	  || code == CALL_EXPR);
136*38fd1498Szrj }
137*38fd1498Szrj 
138*38fd1498Szrj /* Return TRUE if BB is a reasonable block to duplicate by examining
139*38fd1498Szrj    its size, false otherwise.  BB will always be a loop latch block.
140*38fd1498Szrj 
141*38fd1498Szrj    Things to consider:
142*38fd1498Szrj 
143*38fd1498Szrj      We do not want to spoil if-conversion if at all possible.
144*38fd1498Szrj 
145*38fd1498Szrj      Most of the benefit seems to be from eliminating the unconditional
146*38fd1498Szrj      jump rather than CSE/DCE opportunities.  So favor duplicating
147*38fd1498Szrj      small latches.  A latch with just a conditional branch is ideal.
148*38fd1498Szrj 
149*38fd1498Szrj      CSE/DCE opportunties crop up when statements from the predecessors
150*38fd1498Szrj      feed statements in the latch and allow statements in the latch to
151*38fd1498Szrj      simplify.  */
152*38fd1498Szrj 
153*38fd1498Szrj static bool
is_feasible_trace(basic_block bb)154*38fd1498Szrj is_feasible_trace (basic_block bb)
155*38fd1498Szrj {
156*38fd1498Szrj   basic_block pred1 = EDGE_PRED (bb, 0)->src;
157*38fd1498Szrj   basic_block pred2 = EDGE_PRED (bb, 1)->src;
158*38fd1498Szrj   int num_stmts_in_join = count_stmts_in_block (bb);
159*38fd1498Szrj   int num_stmts_in_pred1
160*38fd1498Szrj     = EDGE_COUNT (pred1->succs) == 1 ? count_stmts_in_block (pred1) : 0;
161*38fd1498Szrj   int num_stmts_in_pred2
162*38fd1498Szrj     = EDGE_COUNT (pred2->succs) == 1 ? count_stmts_in_block (pred2) : 0;
163*38fd1498Szrj 
164*38fd1498Szrj   /* This is meant to catch cases that are likely opportunities for
165*38fd1498Szrj      if-conversion.  Essentially we look for the case where
166*38fd1498Szrj      BB's predecessors are both single statement blocks where
167*38fd1498Szrj      the output of that statement feed the same PHI in BB.  */
168*38fd1498Szrj   if (num_stmts_in_pred1 == 1 && num_stmts_in_pred2 == 1)
169*38fd1498Szrj     {
170*38fd1498Szrj       gimple *stmt1 = last_and_only_stmt (pred1);
171*38fd1498Szrj       gimple *stmt2 = last_and_only_stmt (pred2);
172*38fd1498Szrj 
173*38fd1498Szrj       if (stmt1 && stmt2
174*38fd1498Szrj 	  && gimple_code (stmt1) == GIMPLE_ASSIGN
175*38fd1498Szrj 	  && gimple_code (stmt2) == GIMPLE_ASSIGN)
176*38fd1498Szrj 	{
177*38fd1498Szrj 	  enum tree_code code1 = gimple_assign_rhs_code (stmt1);
178*38fd1498Szrj 	  enum tree_code code2 = gimple_assign_rhs_code (stmt2);
179*38fd1498Szrj 
180*38fd1498Szrj 	  if (!poor_ifcvt_candidate_code (code1)
181*38fd1498Szrj 	      && !poor_ifcvt_candidate_code (code2))
182*38fd1498Szrj 	    {
183*38fd1498Szrj 	      tree lhs1 = gimple_assign_lhs (stmt1);
184*38fd1498Szrj 	      tree lhs2 = gimple_assign_lhs (stmt2);
185*38fd1498Szrj 	      gimple_stmt_iterator gsi;
186*38fd1498Szrj 	      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
187*38fd1498Szrj 		{
188*38fd1498Szrj 		  gimple *phi = gsi_stmt (gsi);
189*38fd1498Szrj 		  if ((gimple_phi_arg_def (phi, 0) == lhs1
190*38fd1498Szrj 		       && gimple_phi_arg_def (phi, 1) == lhs2)
191*38fd1498Szrj 		      || (gimple_phi_arg_def (phi, 1) == lhs1
192*38fd1498Szrj 			  && gimple_phi_arg_def (phi, 0) == lhs2))
193*38fd1498Szrj 		    {
194*38fd1498Szrj 		      if (dump_file && (dump_flags & TDF_DETAILS))
195*38fd1498Szrj 			fprintf (dump_file,
196*38fd1498Szrj 				 "Block %d appears to be a join point for "
197*38fd1498Szrj 				 "if-convertable diamond.\n",
198*38fd1498Szrj 				 bb->index);
199*38fd1498Szrj 		      return false;
200*38fd1498Szrj 		    }
201*38fd1498Szrj 		}
202*38fd1498Szrj 	    }
203*38fd1498Szrj 	}
204*38fd1498Szrj     }
205*38fd1498Szrj 
206*38fd1498Szrj   /* If the joiner has no PHIs with useful uses there is zero chance
207*38fd1498Szrj      of CSE/DCE/jump-threading possibilities exposed by duplicating it.  */
208*38fd1498Szrj   bool found_useful_phi = false;
209*38fd1498Szrj   for (gphi_iterator si = gsi_start_phis (bb); ! gsi_end_p (si);
210*38fd1498Szrj        gsi_next (&si))
211*38fd1498Szrj     {
212*38fd1498Szrj       gphi *phi = si.phi ();
213*38fd1498Szrj       use_operand_p use_p;
214*38fd1498Szrj       imm_use_iterator iter;
215*38fd1498Szrj       FOR_EACH_IMM_USE_FAST (use_p, iter, gimple_phi_result (phi))
216*38fd1498Szrj 	{
217*38fd1498Szrj 	  gimple *stmt = USE_STMT (use_p);
218*38fd1498Szrj 	  if (is_gimple_debug (stmt))
219*38fd1498Szrj 	    continue;
220*38fd1498Szrj 	  /* If there's a use in the joiner this might be a CSE/DCE
221*38fd1498Szrj 	     opportunity.  */
222*38fd1498Szrj 	  if (gimple_bb (stmt) == bb)
223*38fd1498Szrj 	    {
224*38fd1498Szrj 	      found_useful_phi = true;
225*38fd1498Szrj 	      break;
226*38fd1498Szrj 	    }
227*38fd1498Szrj 	  /* If the use is on a loop header PHI and on one path the
228*38fd1498Szrj 	     value is unchanged this might expose a jump threading
229*38fd1498Szrj 	     opportunity.  */
230*38fd1498Szrj 	  if (gimple_code (stmt) == GIMPLE_PHI
231*38fd1498Szrj 	      && gimple_bb (stmt) == bb->loop_father->header
232*38fd1498Szrj 	      /* But for memory the PHI alone isn't good enough.  */
233*38fd1498Szrj 	      && ! virtual_operand_p (gimple_phi_result (stmt)))
234*38fd1498Szrj 	    {
235*38fd1498Szrj 	      bool found_unchanged_path = false;
236*38fd1498Szrj 	      for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
237*38fd1498Szrj 		if (gimple_phi_arg_def (phi, i) == gimple_phi_result (stmt))
238*38fd1498Szrj 		  {
239*38fd1498Szrj 		    found_unchanged_path = true;
240*38fd1498Szrj 		    break;
241*38fd1498Szrj 		  }
242*38fd1498Szrj 	      /* If we found an unchanged path this can only be a threading
243*38fd1498Szrj 	         opportunity if we have uses of the loop header PHI result
244*38fd1498Szrj 		 in a stmt dominating the merge block.  Otherwise the
245*38fd1498Szrj 		 splitting may prevent if-conversion.  */
246*38fd1498Szrj 	      if (found_unchanged_path)
247*38fd1498Szrj 		{
248*38fd1498Szrj 		  use_operand_p use2_p;
249*38fd1498Szrj 		  imm_use_iterator iter2;
250*38fd1498Szrj 		  FOR_EACH_IMM_USE_FAST (use2_p, iter2, gimple_phi_result (stmt))
251*38fd1498Szrj 		    {
252*38fd1498Szrj 		      gimple *use_stmt = USE_STMT (use2_p);
253*38fd1498Szrj 		      if (is_gimple_debug (use_stmt))
254*38fd1498Szrj 			continue;
255*38fd1498Szrj 		      basic_block use_bb = gimple_bb (use_stmt);
256*38fd1498Szrj 		      if (use_bb != bb
257*38fd1498Szrj 			  && dominated_by_p (CDI_DOMINATORS, bb, use_bb))
258*38fd1498Szrj 			{
259*38fd1498Szrj 			  if (gcond *cond = dyn_cast <gcond *> (use_stmt))
260*38fd1498Szrj 			    if (gimple_cond_code (cond) == EQ_EXPR
261*38fd1498Szrj 				|| gimple_cond_code (cond) == NE_EXPR)
262*38fd1498Szrj 			      found_useful_phi = true;
263*38fd1498Szrj 			  break;
264*38fd1498Szrj 			}
265*38fd1498Szrj 		    }
266*38fd1498Szrj 		}
267*38fd1498Szrj 	      if (found_useful_phi)
268*38fd1498Szrj 		break;
269*38fd1498Szrj 	    }
270*38fd1498Szrj 	}
271*38fd1498Szrj       if (found_useful_phi)
272*38fd1498Szrj 	break;
273*38fd1498Szrj     }
274*38fd1498Szrj   /* There is one exception namely a controlling condition we can propagate
275*38fd1498Szrj      an equivalence from to the joiner.  */
276*38fd1498Szrj   bool found_cprop_opportunity = false;
277*38fd1498Szrj   basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
278*38fd1498Szrj   gcond *cond = as_a <gcond *> (last_stmt (dom));
279*38fd1498Szrj   if (gimple_cond_code (cond) == EQ_EXPR
280*38fd1498Szrj       || gimple_cond_code (cond) == NE_EXPR)
281*38fd1498Szrj     for (unsigned i = 0; i < 2; ++i)
282*38fd1498Szrj       {
283*38fd1498Szrj 	tree op = gimple_op (cond, i);
284*38fd1498Szrj 	if (TREE_CODE (op) == SSA_NAME)
285*38fd1498Szrj 	  {
286*38fd1498Szrj 	    use_operand_p use_p;
287*38fd1498Szrj 	    imm_use_iterator iter;
288*38fd1498Szrj 	    FOR_EACH_IMM_USE_FAST (use_p, iter, op)
289*38fd1498Szrj 	      {
290*38fd1498Szrj 		if (is_gimple_debug (USE_STMT (use_p)))
291*38fd1498Szrj 		  continue;
292*38fd1498Szrj 		if (gimple_bb (USE_STMT (use_p)) == bb)
293*38fd1498Szrj 		  {
294*38fd1498Szrj 		    found_cprop_opportunity = true;
295*38fd1498Szrj 		    break;
296*38fd1498Szrj 		  }
297*38fd1498Szrj 	      }
298*38fd1498Szrj 	  }
299*38fd1498Szrj 	if (found_cprop_opportunity)
300*38fd1498Szrj 	  break;
301*38fd1498Szrj       }
302*38fd1498Szrj 
303*38fd1498Szrj   if (! found_useful_phi && ! found_cprop_opportunity)
304*38fd1498Szrj     {
305*38fd1498Szrj       if (dump_file && (dump_flags & TDF_DETAILS))
306*38fd1498Szrj 	fprintf (dump_file,
307*38fd1498Szrj 		 "Block %d is a join that does not expose CSE/DCE/jump-thread "
308*38fd1498Szrj 		 "opportunities when duplicated.\n",
309*38fd1498Szrj 		 bb->index);
310*38fd1498Szrj       return false;
311*38fd1498Szrj     }
312*38fd1498Szrj 
313*38fd1498Szrj   /* We may want something here which looks at dataflow and tries
314*38fd1498Szrj      to guess if duplication of BB is likely to result in simplification
315*38fd1498Szrj      of instructions in BB in either the original or the duplicate.  */
316*38fd1498Szrj 
317*38fd1498Szrj   /* Upper Hard limit on the number statements to copy.  */
318*38fd1498Szrj   if (num_stmts_in_join
319*38fd1498Szrj       >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
320*38fd1498Szrj     return false;
321*38fd1498Szrj 
322*38fd1498Szrj   return true;
323*38fd1498Szrj }
324*38fd1498Szrj 
325*38fd1498Szrj /* If the immediate dominator of the latch of the loop is
326*38fd1498Szrj    block with conditional branch, then the loop latch  is
327*38fd1498Szrj    duplicated to its predecessors path preserving the SSA
328*38fd1498Szrj    semantics.
329*38fd1498Szrj 
330*38fd1498Szrj    CFG before transformation.
331*38fd1498Szrj 
332*38fd1498Szrj               2
333*38fd1498Szrj               |
334*38fd1498Szrj               |
335*38fd1498Szrj         +---->3
336*38fd1498Szrj         |    / \
337*38fd1498Szrj         |   /   \
338*38fd1498Szrj         |  4     5
339*38fd1498Szrj         |   \   /
340*38fd1498Szrj         |    \ /
341*38fd1498Szrj         |     6
342*38fd1498Szrj         |    / \
343*38fd1498Szrj         |   /   \
344*38fd1498Szrj         |  8     7
345*38fd1498Szrj         |  |     |
346*38fd1498Szrj         ---+     E
347*38fd1498Szrj 
348*38fd1498Szrj 
349*38fd1498Szrj 
350*38fd1498Szrj     Block 8 is the latch.  We're going to make copies of block 6 (9 & 10)
351*38fd1498Szrj     and wire things up so they look like this:
352*38fd1498Szrj 
353*38fd1498Szrj               2
354*38fd1498Szrj               |
355*38fd1498Szrj               |
356*38fd1498Szrj         +---->3
357*38fd1498Szrj         |    / \
358*38fd1498Szrj         |   /   \
359*38fd1498Szrj         |  4     5
360*38fd1498Szrj         |  |     |
361*38fd1498Szrj         |  |     |
362*38fd1498Szrj         |  9    10
363*38fd1498Szrj         |  |\   /|
364*38fd1498Szrj         |  | \ / |
365*38fd1498Szrj         |  |  7  |
366*38fd1498Szrj         |  |  |  |
367*38fd1498Szrj         |  |  E  |
368*38fd1498Szrj         |  |     |
369*38fd1498Szrj         |   \   /
370*38fd1498Szrj         |    \ /
371*38fd1498Szrj         +-----8
372*38fd1498Szrj 
373*38fd1498Szrj 
374*38fd1498Szrj     Blocks 9 and 10 will get merged into blocks 4 & 5 respectively which
375*38fd1498Szrj     enables CSE, DCE and other optimizations to occur on a larger block
376*38fd1498Szrj     of code.   */
377*38fd1498Szrj 
378*38fd1498Szrj static bool
split_paths()379*38fd1498Szrj split_paths ()
380*38fd1498Szrj {
381*38fd1498Szrj   bool changed = false;
382*38fd1498Szrj   loop_p loop;
383*38fd1498Szrj 
384*38fd1498Szrj   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
385*38fd1498Szrj   initialize_original_copy_tables ();
386*38fd1498Szrj   calculate_dominance_info (CDI_DOMINATORS);
387*38fd1498Szrj 
388*38fd1498Szrj   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
389*38fd1498Szrj     {
390*38fd1498Szrj       /* Only split paths if we are optimizing this loop for speed.  */
391*38fd1498Szrj       if (!optimize_loop_for_speed_p (loop))
392*38fd1498Szrj 	continue;
393*38fd1498Szrj 
394*38fd1498Szrj       /* See if there is a block that we can duplicate to split the
395*38fd1498Szrj 	 path to the loop latch.  */
396*38fd1498Szrj       basic_block bb
397*38fd1498Szrj 	= find_block_to_duplicate_for_splitting_paths (loop->latch);
398*38fd1498Szrj 
399*38fd1498Szrj       /* BB is the merge point for an IF-THEN-ELSE we want to transform.
400*38fd1498Szrj 
401*38fd1498Szrj 	 Essentially we want to create a duplicate of bb and redirect the
402*38fd1498Szrj 	 first predecessor of BB to the duplicate (leaving the second
403*38fd1498Szrj 	 predecessor as is.  This will split the path leading to the latch
404*38fd1498Szrj 	 re-using BB to avoid useless copying.  */
405*38fd1498Szrj       if (bb && is_feasible_trace (bb))
406*38fd1498Szrj 	{
407*38fd1498Szrj 	  if (dump_file && (dump_flags & TDF_DETAILS))
408*38fd1498Szrj 	    fprintf (dump_file,
409*38fd1498Szrj 		     "Duplicating join block %d into predecessor paths\n",
410*38fd1498Szrj 		     bb->index);
411*38fd1498Szrj 	  basic_block pred0 = EDGE_PRED (bb, 0)->src;
412*38fd1498Szrj 	  if (EDGE_COUNT (pred0->succs) != 1)
413*38fd1498Szrj 	    pred0 = EDGE_PRED (bb, 1)->src;
414*38fd1498Szrj 	  transform_duplicate (pred0, bb);
415*38fd1498Szrj 	  changed = true;
416*38fd1498Szrj 
417*38fd1498Szrj 	  /* If BB has an outgoing edge marked as IRREDUCIBLE, then
418*38fd1498Szrj 	     duplicating BB may result in an irreducible region turning
419*38fd1498Szrj 	     into a natural loop.
420*38fd1498Szrj 
421*38fd1498Szrj 	     Long term we might want to hook this into the block
422*38fd1498Szrj 	     duplication code, but as we've seen with similar changes
423*38fd1498Szrj 	     for edge removal, that can be somewhat risky.  */
424*38fd1498Szrj 	  if (EDGE_SUCC (bb, 0)->flags & EDGE_IRREDUCIBLE_LOOP
425*38fd1498Szrj 	      || EDGE_SUCC (bb, 1)->flags & EDGE_IRREDUCIBLE_LOOP)
426*38fd1498Szrj 	    {
427*38fd1498Szrj 	      if (dump_file && (dump_flags & TDF_DETAILS))
428*38fd1498Szrj 		  fprintf (dump_file,
429*38fd1498Szrj 			   "Join block %d has EDGE_IRREDUCIBLE_LOOP set.  "
430*38fd1498Szrj 			   "Scheduling loop fixups.\n",
431*38fd1498Szrj 			   bb->index);
432*38fd1498Szrj 	      loops_state_set (LOOPS_NEED_FIXUP);
433*38fd1498Szrj 	    }
434*38fd1498Szrj 	}
435*38fd1498Szrj     }
436*38fd1498Szrj 
437*38fd1498Szrj   loop_optimizer_finalize ();
438*38fd1498Szrj   free_original_copy_tables ();
439*38fd1498Szrj   return changed;
440*38fd1498Szrj }
441*38fd1498Szrj 
442*38fd1498Szrj /* Main entry point for splitting paths.  Returns TODO_cleanup_cfg if any
443*38fd1498Szrj    paths where split, otherwise return zero.  */
444*38fd1498Szrj 
445*38fd1498Szrj static unsigned int
execute_split_paths()446*38fd1498Szrj execute_split_paths ()
447*38fd1498Szrj {
448*38fd1498Szrj   /* If we don't have at least 2 real blocks and backedges in the
449*38fd1498Szrj      CFG, then there's no point in trying to perform path splitting.  */
450*38fd1498Szrj   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
451*38fd1498Szrj       || !mark_dfs_back_edges ())
452*38fd1498Szrj     return 0;
453*38fd1498Szrj 
454*38fd1498Szrj   bool changed = split_paths();
455*38fd1498Szrj   if (changed)
456*38fd1498Szrj     free_dominance_info (CDI_DOMINATORS);
457*38fd1498Szrj 
458*38fd1498Szrj   return changed ? TODO_cleanup_cfg : 0;
459*38fd1498Szrj }
460*38fd1498Szrj 
461*38fd1498Szrj static bool
gate_split_paths()462*38fd1498Szrj gate_split_paths ()
463*38fd1498Szrj {
464*38fd1498Szrj   return flag_split_paths;
465*38fd1498Szrj }
466*38fd1498Szrj 
467*38fd1498Szrj namespace {
468*38fd1498Szrj 
469*38fd1498Szrj const pass_data pass_data_split_paths =
470*38fd1498Szrj {
471*38fd1498Szrj   GIMPLE_PASS, /* type */
472*38fd1498Szrj   "split-paths", /* name */
473*38fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
474*38fd1498Szrj   TV_SPLIT_PATHS, /* tv_id */
475*38fd1498Szrj   PROP_ssa, /* properties_required */
476*38fd1498Szrj   0, /* properties_provided */
477*38fd1498Szrj   0, /* properties_destroyed */
478*38fd1498Szrj   0, /* todo_flags_start */
479*38fd1498Szrj   TODO_update_ssa, /* todo_flags_finish */
480*38fd1498Szrj };
481*38fd1498Szrj 
482*38fd1498Szrj class pass_split_paths : public gimple_opt_pass
483*38fd1498Szrj {
484*38fd1498Szrj    public:
pass_split_paths(gcc::context * ctxt)485*38fd1498Szrj     pass_split_paths (gcc::context *ctxt)
486*38fd1498Szrj       : gimple_opt_pass (pass_data_split_paths, ctxt)
487*38fd1498Szrj     {}
488*38fd1498Szrj    /* opt_pass methods: */
clone()489*38fd1498Szrj    opt_pass * clone () { return new pass_split_paths (m_ctxt); }
gate(function *)490*38fd1498Szrj    virtual bool gate (function *) { return gate_split_paths (); }
execute(function *)491*38fd1498Szrj    virtual unsigned int execute (function *) { return execute_split_paths (); }
492*38fd1498Szrj 
493*38fd1498Szrj }; // class pass_split_paths
494*38fd1498Szrj 
495*38fd1498Szrj } // anon namespace
496*38fd1498Szrj 
497*38fd1498Szrj gimple_opt_pass *
make_pass_split_paths(gcc::context * ctxt)498*38fd1498Szrj make_pass_split_paths (gcc::context *ctxt)
499*38fd1498Szrj {
500*38fd1498Szrj   return new pass_split_paths (ctxt);
501*38fd1498Szrj }
502