1*38fd1498Szrj /* High-level loop manipulation functions.
2*38fd1498Szrj Copyright (C) 2004-2018 Free Software Foundation, Inc.
3*38fd1498Szrj
4*38fd1498Szrj This file is part of GCC.
5*38fd1498Szrj
6*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it
7*38fd1498Szrj under the terms of the GNU General Public License as published by the
8*38fd1498Szrj Free Software Foundation; either version 3, or (at your option) any
9*38fd1498Szrj later version.
10*38fd1498Szrj
11*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT
12*38fd1498Szrj ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14*38fd1498Szrj for more details.
15*38fd1498Szrj
16*38fd1498Szrj You should have received a copy of the GNU General Public License
17*38fd1498Szrj along with GCC; see the file COPYING3. If not see
18*38fd1498Szrj <http://www.gnu.org/licenses/>. */
19*38fd1498Szrj
20*38fd1498Szrj #include "config.h"
21*38fd1498Szrj #include "system.h"
22*38fd1498Szrj #include "coretypes.h"
23*38fd1498Szrj #include "backend.h"
24*38fd1498Szrj #include "tree.h"
25*38fd1498Szrj #include "gimple.h"
26*38fd1498Szrj #include "cfghooks.h"
27*38fd1498Szrj #include "tree-pass.h" /* ??? for TODO_update_ssa but this isn't a pass. */
28*38fd1498Szrj #include "ssa.h"
29*38fd1498Szrj #include "gimple-pretty-print.h"
30*38fd1498Szrj #include "fold-const.h"
31*38fd1498Szrj #include "cfganal.h"
32*38fd1498Szrj #include "gimplify.h"
33*38fd1498Szrj #include "gimple-iterator.h"
34*38fd1498Szrj #include "gimplify-me.h"
35*38fd1498Szrj #include "tree-cfg.h"
36*38fd1498Szrj #include "tree-ssa-loop-ivopts.h"
37*38fd1498Szrj #include "tree-ssa-loop-manip.h"
38*38fd1498Szrj #include "tree-ssa-loop-niter.h"
39*38fd1498Szrj #include "tree-ssa-loop.h"
40*38fd1498Szrj #include "tree-into-ssa.h"
41*38fd1498Szrj #include "tree-ssa.h"
42*38fd1498Szrj #include "cfgloop.h"
43*38fd1498Szrj #include "tree-scalar-evolution.h"
44*38fd1498Szrj #include "params.h"
45*38fd1498Szrj #include "tree-inline.h"
46*38fd1498Szrj
47*38fd1498Szrj /* All bitmaps for rewriting into loop-closed SSA go on this obstack,
48*38fd1498Szrj so that we can free them all at once. */
49*38fd1498Szrj static bitmap_obstack loop_renamer_obstack;
50*38fd1498Szrj
51*38fd1498Szrj /* Creates an induction variable with value BASE + STEP * iteration in LOOP.
52*38fd1498Szrj It is expected that neither BASE nor STEP are shared with other expressions
53*38fd1498Szrj (unless the sharing rules allow this). Use VAR as a base var_decl for it
54*38fd1498Szrj (if NULL, a new temporary will be created). The increment will occur at
55*38fd1498Szrj INCR_POS (after it if AFTER is true, before it otherwise). INCR_POS and
56*38fd1498Szrj AFTER can be computed using standard_iv_increment_position. The ssa versions
57*38fd1498Szrj of the variable before and after increment will be stored in VAR_BEFORE and
58*38fd1498Szrj VAR_AFTER (unless they are NULL). */
59*38fd1498Szrj
60*38fd1498Szrj void
create_iv(tree base,tree step,tree var,struct loop * loop,gimple_stmt_iterator * incr_pos,bool after,tree * var_before,tree * var_after)61*38fd1498Szrj create_iv (tree base, tree step, tree var, struct loop *loop,
62*38fd1498Szrj gimple_stmt_iterator *incr_pos, bool after,
63*38fd1498Szrj tree *var_before, tree *var_after)
64*38fd1498Szrj {
65*38fd1498Szrj gassign *stmt;
66*38fd1498Szrj gphi *phi;
67*38fd1498Szrj tree initial, step1;
68*38fd1498Szrj gimple_seq stmts;
69*38fd1498Szrj tree vb, va;
70*38fd1498Szrj enum tree_code incr_op = PLUS_EXPR;
71*38fd1498Szrj edge pe = loop_preheader_edge (loop);
72*38fd1498Szrj
73*38fd1498Szrj if (var != NULL_TREE)
74*38fd1498Szrj {
75*38fd1498Szrj vb = make_ssa_name (var);
76*38fd1498Szrj va = make_ssa_name (var);
77*38fd1498Szrj }
78*38fd1498Szrj else
79*38fd1498Szrj {
80*38fd1498Szrj vb = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
81*38fd1498Szrj va = make_temp_ssa_name (TREE_TYPE (base), NULL, "ivtmp");
82*38fd1498Szrj }
83*38fd1498Szrj if (var_before)
84*38fd1498Szrj *var_before = vb;
85*38fd1498Szrj if (var_after)
86*38fd1498Szrj *var_after = va;
87*38fd1498Szrj
88*38fd1498Szrj /* For easier readability of the created code, produce MINUS_EXPRs
89*38fd1498Szrj when suitable. */
90*38fd1498Szrj if (TREE_CODE (step) == INTEGER_CST)
91*38fd1498Szrj {
92*38fd1498Szrj if (TYPE_UNSIGNED (TREE_TYPE (step)))
93*38fd1498Szrj {
94*38fd1498Szrj step1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
95*38fd1498Szrj if (tree_int_cst_lt (step1, step))
96*38fd1498Szrj {
97*38fd1498Szrj incr_op = MINUS_EXPR;
98*38fd1498Szrj step = step1;
99*38fd1498Szrj }
100*38fd1498Szrj }
101*38fd1498Szrj else
102*38fd1498Szrj {
103*38fd1498Szrj bool ovf;
104*38fd1498Szrj
105*38fd1498Szrj if (!tree_expr_nonnegative_warnv_p (step, &ovf)
106*38fd1498Szrj && may_negate_without_overflow_p (step))
107*38fd1498Szrj {
108*38fd1498Szrj incr_op = MINUS_EXPR;
109*38fd1498Szrj step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
110*38fd1498Szrj }
111*38fd1498Szrj }
112*38fd1498Szrj }
113*38fd1498Szrj if (POINTER_TYPE_P (TREE_TYPE (base)))
114*38fd1498Szrj {
115*38fd1498Szrj if (TREE_CODE (base) == ADDR_EXPR)
116*38fd1498Szrj mark_addressable (TREE_OPERAND (base, 0));
117*38fd1498Szrj step = convert_to_ptrofftype (step);
118*38fd1498Szrj if (incr_op == MINUS_EXPR)
119*38fd1498Szrj step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
120*38fd1498Szrj incr_op = POINTER_PLUS_EXPR;
121*38fd1498Szrj }
122*38fd1498Szrj /* Gimplify the step if necessary. We put the computations in front of the
123*38fd1498Szrj loop (i.e. the step should be loop invariant). */
124*38fd1498Szrj step = force_gimple_operand (step, &stmts, true, NULL_TREE);
125*38fd1498Szrj if (stmts)
126*38fd1498Szrj gsi_insert_seq_on_edge_immediate (pe, stmts);
127*38fd1498Szrj
128*38fd1498Szrj stmt = gimple_build_assign (va, incr_op, vb, step);
129*38fd1498Szrj if (after)
130*38fd1498Szrj gsi_insert_after (incr_pos, stmt, GSI_NEW_STMT);
131*38fd1498Szrj else
132*38fd1498Szrj gsi_insert_before (incr_pos, stmt, GSI_NEW_STMT);
133*38fd1498Szrj
134*38fd1498Szrj initial = force_gimple_operand (base, &stmts, true, var);
135*38fd1498Szrj if (stmts)
136*38fd1498Szrj gsi_insert_seq_on_edge_immediate (pe, stmts);
137*38fd1498Szrj
138*38fd1498Szrj phi = create_phi_node (vb, loop->header);
139*38fd1498Szrj add_phi_arg (phi, initial, loop_preheader_edge (loop), UNKNOWN_LOCATION);
140*38fd1498Szrj add_phi_arg (phi, va, loop_latch_edge (loop), UNKNOWN_LOCATION);
141*38fd1498Szrj }
142*38fd1498Szrj
143*38fd1498Szrj /* Return the innermost superloop LOOP of USE_LOOP that is a superloop of
144*38fd1498Szrj both DEF_LOOP and USE_LOOP. */
145*38fd1498Szrj
146*38fd1498Szrj static inline struct loop *
find_sibling_superloop(struct loop * use_loop,struct loop * def_loop)147*38fd1498Szrj find_sibling_superloop (struct loop *use_loop, struct loop *def_loop)
148*38fd1498Szrj {
149*38fd1498Szrj unsigned ud = loop_depth (use_loop);
150*38fd1498Szrj unsigned dd = loop_depth (def_loop);
151*38fd1498Szrj gcc_assert (ud > 0 && dd > 0);
152*38fd1498Szrj if (ud > dd)
153*38fd1498Szrj use_loop = superloop_at_depth (use_loop, dd);
154*38fd1498Szrj if (ud < dd)
155*38fd1498Szrj def_loop = superloop_at_depth (def_loop, ud);
156*38fd1498Szrj while (loop_outer (use_loop) != loop_outer (def_loop))
157*38fd1498Szrj {
158*38fd1498Szrj use_loop = loop_outer (use_loop);
159*38fd1498Szrj def_loop = loop_outer (def_loop);
160*38fd1498Szrj gcc_assert (use_loop && def_loop);
161*38fd1498Szrj }
162*38fd1498Szrj return use_loop;
163*38fd1498Szrj }
164*38fd1498Szrj
165*38fd1498Szrj /* DEF_BB is a basic block containing a DEF that needs rewriting into
166*38fd1498Szrj loop-closed SSA form. USE_BLOCKS is the set of basic blocks containing
167*38fd1498Szrj uses of DEF that "escape" from the loop containing DEF_BB (i.e. blocks in
168*38fd1498Szrj USE_BLOCKS are dominated by DEF_BB but not in the loop father of DEF_B).
169*38fd1498Szrj ALL_EXITS[I] is the set of all basic blocks that exit loop I.
170*38fd1498Szrj
171*38fd1498Szrj Compute the subset of LOOP_EXITS that exit the loop containing DEF_BB
172*38fd1498Szrj or one of its loop fathers, in which DEF is live. This set is returned
173*38fd1498Szrj in the bitmap LIVE_EXITS.
174*38fd1498Szrj
175*38fd1498Szrj Instead of computing the complete livein set of the def, we use the loop
176*38fd1498Szrj nesting tree as a form of poor man's structure analysis. This greatly
177*38fd1498Szrj speeds up the analysis, which is important because this function may be
178*38fd1498Szrj called on all SSA names that need rewriting, one at a time. */
179*38fd1498Szrj
180*38fd1498Szrj static void
compute_live_loop_exits(bitmap live_exits,bitmap use_blocks,bitmap * loop_exits,basic_block def_bb)181*38fd1498Szrj compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
182*38fd1498Szrj bitmap *loop_exits, basic_block def_bb)
183*38fd1498Szrj {
184*38fd1498Szrj unsigned i;
185*38fd1498Szrj bitmap_iterator bi;
186*38fd1498Szrj struct loop *def_loop = def_bb->loop_father;
187*38fd1498Szrj unsigned def_loop_depth = loop_depth (def_loop);
188*38fd1498Szrj bitmap def_loop_exits;
189*38fd1498Szrj
190*38fd1498Szrj /* Normally the work list size is bounded by the number of basic
191*38fd1498Szrj blocks in the largest loop. We don't know this number, but we
192*38fd1498Szrj can be fairly sure that it will be relatively small. */
193*38fd1498Szrj auto_vec<basic_block> worklist (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
194*38fd1498Szrj
195*38fd1498Szrj EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
196*38fd1498Szrj {
197*38fd1498Szrj basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
198*38fd1498Szrj struct loop *use_loop = use_bb->loop_father;
199*38fd1498Szrj gcc_checking_assert (def_loop != use_loop
200*38fd1498Szrj && ! flow_loop_nested_p (def_loop, use_loop));
201*38fd1498Szrj if (! flow_loop_nested_p (use_loop, def_loop))
202*38fd1498Szrj use_bb = find_sibling_superloop (use_loop, def_loop)->header;
203*38fd1498Szrj if (bitmap_set_bit (live_exits, use_bb->index))
204*38fd1498Szrj worklist.safe_push (use_bb);
205*38fd1498Szrj }
206*38fd1498Szrj
207*38fd1498Szrj /* Iterate until the worklist is empty. */
208*38fd1498Szrj while (! worklist.is_empty ())
209*38fd1498Szrj {
210*38fd1498Szrj edge e;
211*38fd1498Szrj edge_iterator ei;
212*38fd1498Szrj
213*38fd1498Szrj /* Pull a block off the worklist. */
214*38fd1498Szrj basic_block bb = worklist.pop ();
215*38fd1498Szrj
216*38fd1498Szrj /* Make sure we have at least enough room in the work list
217*38fd1498Szrj for all predecessors of this block. */
218*38fd1498Szrj worklist.reserve (EDGE_COUNT (bb->preds));
219*38fd1498Szrj
220*38fd1498Szrj /* For each predecessor block. */
221*38fd1498Szrj FOR_EACH_EDGE (e, ei, bb->preds)
222*38fd1498Szrj {
223*38fd1498Szrj basic_block pred = e->src;
224*38fd1498Szrj struct loop *pred_loop = pred->loop_father;
225*38fd1498Szrj unsigned pred_loop_depth = loop_depth (pred_loop);
226*38fd1498Szrj bool pred_visited;
227*38fd1498Szrj
228*38fd1498Szrj /* We should have met DEF_BB along the way. */
229*38fd1498Szrj gcc_assert (pred != ENTRY_BLOCK_PTR_FOR_FN (cfun));
230*38fd1498Szrj
231*38fd1498Szrj if (pred_loop_depth >= def_loop_depth)
232*38fd1498Szrj {
233*38fd1498Szrj if (pred_loop_depth > def_loop_depth)
234*38fd1498Szrj pred_loop = superloop_at_depth (pred_loop, def_loop_depth);
235*38fd1498Szrj /* If we've reached DEF_LOOP, our train ends here. */
236*38fd1498Szrj if (pred_loop == def_loop)
237*38fd1498Szrj continue;
238*38fd1498Szrj }
239*38fd1498Szrj else if (! flow_loop_nested_p (pred_loop, def_loop))
240*38fd1498Szrj pred = find_sibling_superloop (pred_loop, def_loop)->header;
241*38fd1498Szrj
242*38fd1498Szrj /* Add PRED to the LIVEIN set. PRED_VISITED is true if
243*38fd1498Szrj we had already added PRED to LIVEIN before. */
244*38fd1498Szrj pred_visited = !bitmap_set_bit (live_exits, pred->index);
245*38fd1498Szrj
246*38fd1498Szrj /* If we have visited PRED before, don't add it to the worklist.
247*38fd1498Szrj If BB dominates PRED, then we're probably looking at a loop.
248*38fd1498Szrj We're only interested in looking up in the dominance tree
249*38fd1498Szrj because DEF_BB dominates all the uses. */
250*38fd1498Szrj if (pred_visited || dominated_by_p (CDI_DOMINATORS, pred, bb))
251*38fd1498Szrj continue;
252*38fd1498Szrj
253*38fd1498Szrj worklist.quick_push (pred);
254*38fd1498Szrj }
255*38fd1498Szrj }
256*38fd1498Szrj
257*38fd1498Szrj def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
258*38fd1498Szrj for (struct loop *loop = def_loop;
259*38fd1498Szrj loop != current_loops->tree_root;
260*38fd1498Szrj loop = loop_outer (loop))
261*38fd1498Szrj bitmap_ior_into (def_loop_exits, loop_exits[loop->num]);
262*38fd1498Szrj bitmap_and_into (live_exits, def_loop_exits);
263*38fd1498Szrj BITMAP_FREE (def_loop_exits);
264*38fd1498Szrj }
265*38fd1498Szrj
266*38fd1498Szrj /* Add a loop-closing PHI for VAR in basic block EXIT. */
267*38fd1498Szrj
268*38fd1498Szrj static void
add_exit_phi(basic_block exit,tree var)269*38fd1498Szrj add_exit_phi (basic_block exit, tree var)
270*38fd1498Szrj {
271*38fd1498Szrj gphi *phi;
272*38fd1498Szrj edge e;
273*38fd1498Szrj edge_iterator ei;
274*38fd1498Szrj
275*38fd1498Szrj /* Check that at least one of the edges entering the EXIT block exits
276*38fd1498Szrj the loop, or a superloop of that loop, that VAR is defined in. */
277*38fd1498Szrj if (flag_checking)
278*38fd1498Szrj {
279*38fd1498Szrj gimple *def_stmt = SSA_NAME_DEF_STMT (var);
280*38fd1498Szrj basic_block def_bb = gimple_bb (def_stmt);
281*38fd1498Szrj FOR_EACH_EDGE (e, ei, exit->preds)
282*38fd1498Szrj {
283*38fd1498Szrj struct loop *aloop = find_common_loop (def_bb->loop_father,
284*38fd1498Szrj e->src->loop_father);
285*38fd1498Szrj if (!flow_bb_inside_loop_p (aloop, e->dest))
286*38fd1498Szrj break;
287*38fd1498Szrj }
288*38fd1498Szrj gcc_assert (e);
289*38fd1498Szrj }
290*38fd1498Szrj
291*38fd1498Szrj phi = create_phi_node (NULL_TREE, exit);
292*38fd1498Szrj create_new_def_for (var, phi, gimple_phi_result_ptr (phi));
293*38fd1498Szrj FOR_EACH_EDGE (e, ei, exit->preds)
294*38fd1498Szrj add_phi_arg (phi, var, e, UNKNOWN_LOCATION);
295*38fd1498Szrj
296*38fd1498Szrj if (dump_file && (dump_flags & TDF_DETAILS))
297*38fd1498Szrj {
298*38fd1498Szrj fprintf (dump_file, ";; Created LCSSA PHI: ");
299*38fd1498Szrj print_gimple_stmt (dump_file, phi, 0, dump_flags);
300*38fd1498Szrj }
301*38fd1498Szrj }
302*38fd1498Szrj
303*38fd1498Szrj /* Add exit phis for VAR that is used in LIVEIN.
304*38fd1498Szrj Exits of the loops are stored in LOOP_EXITS. */
305*38fd1498Szrj
306*38fd1498Szrj static void
add_exit_phis_var(tree var,bitmap use_blocks,bitmap * loop_exits)307*38fd1498Szrj add_exit_phis_var (tree var, bitmap use_blocks, bitmap *loop_exits)
308*38fd1498Szrj {
309*38fd1498Szrj unsigned index;
310*38fd1498Szrj bitmap_iterator bi;
311*38fd1498Szrj basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
312*38fd1498Szrj bitmap live_exits = BITMAP_ALLOC (&loop_renamer_obstack);
313*38fd1498Szrj
314*38fd1498Szrj gcc_checking_assert (! bitmap_bit_p (use_blocks, def_bb->index));
315*38fd1498Szrj
316*38fd1498Szrj compute_live_loop_exits (live_exits, use_blocks, loop_exits, def_bb);
317*38fd1498Szrj
318*38fd1498Szrj EXECUTE_IF_SET_IN_BITMAP (live_exits, 0, index, bi)
319*38fd1498Szrj {
320*38fd1498Szrj add_exit_phi (BASIC_BLOCK_FOR_FN (cfun, index), var);
321*38fd1498Szrj }
322*38fd1498Szrj
323*38fd1498Szrj BITMAP_FREE (live_exits);
324*38fd1498Szrj }
325*38fd1498Szrj
326*38fd1498Szrj /* Add exit phis for the names marked in NAMES_TO_RENAME.
327*38fd1498Szrj Exits of the loops are stored in EXITS. Sets of blocks where the ssa
328*38fd1498Szrj names are used are stored in USE_BLOCKS. */
329*38fd1498Szrj
330*38fd1498Szrj static void
add_exit_phis(bitmap names_to_rename,bitmap * use_blocks,bitmap * loop_exits)331*38fd1498Szrj add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
332*38fd1498Szrj {
333*38fd1498Szrj unsigned i;
334*38fd1498Szrj bitmap_iterator bi;
335*38fd1498Szrj
336*38fd1498Szrj EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, bi)
337*38fd1498Szrj {
338*38fd1498Szrj add_exit_phis_var (ssa_name (i), use_blocks[i], loop_exits);
339*38fd1498Szrj }
340*38fd1498Szrj }
341*38fd1498Szrj
342*38fd1498Szrj /* Fill the array of bitmaps LOOP_EXITS with all loop exit edge targets. */
343*38fd1498Szrj
344*38fd1498Szrj static void
get_loops_exits(bitmap * loop_exits)345*38fd1498Szrj get_loops_exits (bitmap *loop_exits)
346*38fd1498Szrj {
347*38fd1498Szrj struct loop *loop;
348*38fd1498Szrj unsigned j;
349*38fd1498Szrj edge e;
350*38fd1498Szrj
351*38fd1498Szrj FOR_EACH_LOOP (loop, 0)
352*38fd1498Szrj {
353*38fd1498Szrj vec<edge> exit_edges = get_loop_exit_edges (loop);
354*38fd1498Szrj loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
355*38fd1498Szrj FOR_EACH_VEC_ELT (exit_edges, j, e)
356*38fd1498Szrj bitmap_set_bit (loop_exits[loop->num], e->dest->index);
357*38fd1498Szrj exit_edges.release ();
358*38fd1498Szrj }
359*38fd1498Szrj }
360*38fd1498Szrj
361*38fd1498Szrj /* For USE in BB, if it is used outside of the loop it is defined in,
362*38fd1498Szrj mark it for rewrite. Record basic block BB where it is used
363*38fd1498Szrj to USE_BLOCKS. Record the ssa name index to NEED_PHIS bitmap.
364*38fd1498Szrj Note that for USEs in phis, BB should be the src of the edge corresponding to
365*38fd1498Szrj the use, rather than the bb containing the phi. */
366*38fd1498Szrj
367*38fd1498Szrj static void
find_uses_to_rename_use(basic_block bb,tree use,bitmap * use_blocks,bitmap need_phis)368*38fd1498Szrj find_uses_to_rename_use (basic_block bb, tree use, bitmap *use_blocks,
369*38fd1498Szrj bitmap need_phis)
370*38fd1498Szrj {
371*38fd1498Szrj unsigned ver;
372*38fd1498Szrj basic_block def_bb;
373*38fd1498Szrj struct loop *def_loop;
374*38fd1498Szrj
375*38fd1498Szrj if (TREE_CODE (use) != SSA_NAME)
376*38fd1498Szrj return;
377*38fd1498Szrj
378*38fd1498Szrj ver = SSA_NAME_VERSION (use);
379*38fd1498Szrj def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
380*38fd1498Szrj if (!def_bb)
381*38fd1498Szrj return;
382*38fd1498Szrj def_loop = def_bb->loop_father;
383*38fd1498Szrj
384*38fd1498Szrj /* If the definition is not inside a loop, it is not interesting. */
385*38fd1498Szrj if (!loop_outer (def_loop))
386*38fd1498Szrj return;
387*38fd1498Szrj
388*38fd1498Szrj /* If the use is not outside of the loop it is defined in, it is not
389*38fd1498Szrj interesting. */
390*38fd1498Szrj if (flow_bb_inside_loop_p (def_loop, bb))
391*38fd1498Szrj return;
392*38fd1498Szrj
393*38fd1498Szrj /* If we're seeing VER for the first time, we still have to allocate
394*38fd1498Szrj a bitmap for its uses. */
395*38fd1498Szrj if (bitmap_set_bit (need_phis, ver))
396*38fd1498Szrj use_blocks[ver] = BITMAP_ALLOC (&loop_renamer_obstack);
397*38fd1498Szrj bitmap_set_bit (use_blocks[ver], bb->index);
398*38fd1498Szrj }
399*38fd1498Szrj
400*38fd1498Szrj /* For uses matching USE_FLAGS in STMT, mark names that are used outside of the
401*38fd1498Szrj loop they are defined to rewrite. Record the set of blocks in which the ssa
402*38fd1498Szrj names are used to USE_BLOCKS, and the ssa names themselves to NEED_PHIS. */
403*38fd1498Szrj
404*38fd1498Szrj static void
find_uses_to_rename_stmt(gimple * stmt,bitmap * use_blocks,bitmap need_phis,int use_flags)405*38fd1498Szrj find_uses_to_rename_stmt (gimple *stmt, bitmap *use_blocks, bitmap need_phis,
406*38fd1498Szrj int use_flags)
407*38fd1498Szrj {
408*38fd1498Szrj ssa_op_iter iter;
409*38fd1498Szrj tree var;
410*38fd1498Szrj basic_block bb = gimple_bb (stmt);
411*38fd1498Szrj
412*38fd1498Szrj if (is_gimple_debug (stmt))
413*38fd1498Szrj return;
414*38fd1498Szrj
415*38fd1498Szrj /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows SSA_OP_VIRTUAL_USES
416*38fd1498Szrj only. */
417*38fd1498Szrj if (use_flags == SSA_OP_VIRTUAL_USES)
418*38fd1498Szrj {
419*38fd1498Szrj tree vuse = gimple_vuse (stmt);
420*38fd1498Szrj if (vuse != NULL_TREE)
421*38fd1498Szrj find_uses_to_rename_use (bb, gimple_vuse (stmt), use_blocks, need_phis);
422*38fd1498Szrj }
423*38fd1498Szrj else
424*38fd1498Szrj FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, use_flags)
425*38fd1498Szrj find_uses_to_rename_use (bb, var, use_blocks, need_phis);
426*38fd1498Szrj }
427*38fd1498Szrj
428*38fd1498Szrj /* Marks names matching USE_FLAGS that are used in BB and outside of the loop
429*38fd1498Szrj they are defined in for rewrite. Records the set of blocks in which the ssa
430*38fd1498Szrj names are used to USE_BLOCKS. Record the SSA names that will
431*38fd1498Szrj need exit PHIs in NEED_PHIS. */
432*38fd1498Szrj
433*38fd1498Szrj static void
find_uses_to_rename_bb(basic_block bb,bitmap * use_blocks,bitmap need_phis,int use_flags)434*38fd1498Szrj find_uses_to_rename_bb (basic_block bb, bitmap *use_blocks, bitmap need_phis,
435*38fd1498Szrj int use_flags)
436*38fd1498Szrj {
437*38fd1498Szrj edge e;
438*38fd1498Szrj edge_iterator ei;
439*38fd1498Szrj bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
440*38fd1498Szrj bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
441*38fd1498Szrj
442*38fd1498Szrj FOR_EACH_EDGE (e, ei, bb->succs)
443*38fd1498Szrj for (gphi_iterator bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi);
444*38fd1498Szrj gsi_next (&bsi))
445*38fd1498Szrj {
446*38fd1498Szrj gphi *phi = bsi.phi ();
447*38fd1498Szrj bool virtual_p = virtual_operand_p (gimple_phi_result (phi));
448*38fd1498Szrj if ((virtual_p && do_virtuals)
449*38fd1498Szrj || (!virtual_p && do_nonvirtuals))
450*38fd1498Szrj find_uses_to_rename_use (bb, PHI_ARG_DEF_FROM_EDGE (phi, e),
451*38fd1498Szrj use_blocks, need_phis);
452*38fd1498Szrj }
453*38fd1498Szrj
454*38fd1498Szrj for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
455*38fd1498Szrj gsi_next (&bsi))
456*38fd1498Szrj find_uses_to_rename_stmt (gsi_stmt (bsi), use_blocks, need_phis,
457*38fd1498Szrj use_flags);
458*38fd1498Szrj }
459*38fd1498Szrj
460*38fd1498Szrj /* Marks names matching USE_FLAGS that are used outside of the loop they are
461*38fd1498Szrj defined in for rewrite. Records the set of blocks in which the ssa names are
462*38fd1498Szrj used to USE_BLOCKS. Record the SSA names that will need exit PHIs in
463*38fd1498Szrj NEED_PHIS. If CHANGED_BBS is not NULL, scan only blocks in this set. */
464*38fd1498Szrj
465*38fd1498Szrj static void
find_uses_to_rename(bitmap changed_bbs,bitmap * use_blocks,bitmap need_phis,int use_flags)466*38fd1498Szrj find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis,
467*38fd1498Szrj int use_flags)
468*38fd1498Szrj {
469*38fd1498Szrj basic_block bb;
470*38fd1498Szrj unsigned index;
471*38fd1498Szrj bitmap_iterator bi;
472*38fd1498Szrj
473*38fd1498Szrj if (changed_bbs)
474*38fd1498Szrj EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
475*38fd1498Szrj {
476*38fd1498Szrj bb = BASIC_BLOCK_FOR_FN (cfun, index);
477*38fd1498Szrj if (bb)
478*38fd1498Szrj find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
479*38fd1498Szrj }
480*38fd1498Szrj else
481*38fd1498Szrj FOR_EACH_BB_FN (bb, cfun)
482*38fd1498Szrj find_uses_to_rename_bb (bb, use_blocks, need_phis, use_flags);
483*38fd1498Szrj }
484*38fd1498Szrj
485*38fd1498Szrj /* Mark uses of DEF that are used outside of the loop they are defined in for
486*38fd1498Szrj rewrite. Record the set of blocks in which the ssa names are used to
487*38fd1498Szrj USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
488*38fd1498Szrj
489*38fd1498Szrj static void
find_uses_to_rename_def(tree def,bitmap * use_blocks,bitmap need_phis)490*38fd1498Szrj find_uses_to_rename_def (tree def, bitmap *use_blocks, bitmap need_phis)
491*38fd1498Szrj {
492*38fd1498Szrj gimple *use_stmt;
493*38fd1498Szrj imm_use_iterator imm_iter;
494*38fd1498Szrj
495*38fd1498Szrj FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
496*38fd1498Szrj {
497*38fd1498Szrj if (is_gimple_debug (use_stmt))
498*38fd1498Szrj continue;
499*38fd1498Szrj
500*38fd1498Szrj basic_block use_bb = gimple_bb (use_stmt);
501*38fd1498Szrj
502*38fd1498Szrj use_operand_p use_p;
503*38fd1498Szrj FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
504*38fd1498Szrj {
505*38fd1498Szrj if (gimple_code (use_stmt) == GIMPLE_PHI)
506*38fd1498Szrj {
507*38fd1498Szrj edge e = gimple_phi_arg_edge (as_a <gphi *> (use_stmt),
508*38fd1498Szrj PHI_ARG_INDEX_FROM_USE (use_p));
509*38fd1498Szrj use_bb = e->src;
510*38fd1498Szrj }
511*38fd1498Szrj find_uses_to_rename_use (use_bb, USE_FROM_PTR (use_p), use_blocks,
512*38fd1498Szrj need_phis);
513*38fd1498Szrj }
514*38fd1498Szrj }
515*38fd1498Szrj }
516*38fd1498Szrj
517*38fd1498Szrj /* Marks names matching USE_FLAGS that are defined in LOOP and used outside of
518*38fd1498Szrj it for rewrite. Records the set of blocks in which the ssa names are used to
519*38fd1498Szrj USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
520*38fd1498Szrj
521*38fd1498Szrj static void
find_uses_to_rename_in_loop(struct loop * loop,bitmap * use_blocks,bitmap need_phis,int use_flags)522*38fd1498Szrj find_uses_to_rename_in_loop (struct loop *loop, bitmap *use_blocks,
523*38fd1498Szrj bitmap need_phis, int use_flags)
524*38fd1498Szrj {
525*38fd1498Szrj bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
526*38fd1498Szrj bool do_nonvirtuals = (use_flags & SSA_OP_USE) != 0;
527*38fd1498Szrj int def_flags = ((do_virtuals ? SSA_OP_VIRTUAL_DEFS : 0)
528*38fd1498Szrj | (do_nonvirtuals ? SSA_OP_DEF : 0));
529*38fd1498Szrj
530*38fd1498Szrj
531*38fd1498Szrj basic_block *bbs = get_loop_body (loop);
532*38fd1498Szrj
533*38fd1498Szrj for (unsigned int i = 0; i < loop->num_nodes; i++)
534*38fd1498Szrj {
535*38fd1498Szrj basic_block bb = bbs[i];
536*38fd1498Szrj
537*38fd1498Szrj for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
538*38fd1498Szrj gsi_next (&bsi))
539*38fd1498Szrj {
540*38fd1498Szrj gphi *phi = bsi.phi ();
541*38fd1498Szrj tree res = gimple_phi_result (phi);
542*38fd1498Szrj bool virtual_p = virtual_operand_p (res);
543*38fd1498Szrj if ((virtual_p && do_virtuals)
544*38fd1498Szrj || (!virtual_p && do_nonvirtuals))
545*38fd1498Szrj find_uses_to_rename_def (res, use_blocks, need_phis);
546*38fd1498Szrj }
547*38fd1498Szrj
548*38fd1498Szrj for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
549*38fd1498Szrj gsi_next (&bsi))
550*38fd1498Szrj {
551*38fd1498Szrj gimple *stmt = gsi_stmt (bsi);
552*38fd1498Szrj /* FOR_EACH_SSA_TREE_OPERAND iterator does not allows
553*38fd1498Szrj SSA_OP_VIRTUAL_DEFS only. */
554*38fd1498Szrj if (def_flags == SSA_OP_VIRTUAL_DEFS)
555*38fd1498Szrj {
556*38fd1498Szrj tree vdef = gimple_vdef (stmt);
557*38fd1498Szrj if (vdef != NULL)
558*38fd1498Szrj find_uses_to_rename_def (vdef, use_blocks, need_phis);
559*38fd1498Szrj }
560*38fd1498Szrj else
561*38fd1498Szrj {
562*38fd1498Szrj tree var;
563*38fd1498Szrj ssa_op_iter iter;
564*38fd1498Szrj FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, def_flags)
565*38fd1498Szrj find_uses_to_rename_def (var, use_blocks, need_phis);
566*38fd1498Szrj }
567*38fd1498Szrj }
568*38fd1498Szrj }
569*38fd1498Szrj
570*38fd1498Szrj XDELETEVEC (bbs);
571*38fd1498Szrj }
572*38fd1498Szrj
573*38fd1498Szrj /* Rewrites the program into a loop closed ssa form -- i.e. inserts extra
574*38fd1498Szrj phi nodes to ensure that no variable is used outside the loop it is
575*38fd1498Szrj defined in.
576*38fd1498Szrj
577*38fd1498Szrj This strengthening of the basic ssa form has several advantages:
578*38fd1498Szrj
579*38fd1498Szrj 1) Updating it during unrolling/peeling/versioning is trivial, since
580*38fd1498Szrj we do not need to care about the uses outside of the loop.
581*38fd1498Szrj The same applies to virtual operands which are also rewritten into
582*38fd1498Szrj loop closed SSA form. Note that virtual operands are always live
583*38fd1498Szrj until function exit.
584*38fd1498Szrj 2) The behavior of all uses of an induction variable is the same.
585*38fd1498Szrj Without this, you need to distinguish the case when the variable
586*38fd1498Szrj is used outside of the loop it is defined in, for example
587*38fd1498Szrj
588*38fd1498Szrj for (i = 0; i < 100; i++)
589*38fd1498Szrj {
590*38fd1498Szrj for (j = 0; j < 100; j++)
591*38fd1498Szrj {
592*38fd1498Szrj k = i + j;
593*38fd1498Szrj use1 (k);
594*38fd1498Szrj }
595*38fd1498Szrj use2 (k);
596*38fd1498Szrj }
597*38fd1498Szrj
598*38fd1498Szrj Looking from the outer loop with the normal SSA form, the first use of k
599*38fd1498Szrj is not well-behaved, while the second one is an induction variable with
600*38fd1498Szrj base 99 and step 1.
601*38fd1498Szrj
602*38fd1498Szrj If LOOP is non-null, only rewrite uses that have defs in LOOP. Otherwise,
603*38fd1498Szrj if CHANGED_BBS is not NULL, we look for uses outside loops only in the
604*38fd1498Szrj basic blocks in this set.
605*38fd1498Szrj
606*38fd1498Szrj USE_FLAGS allows us to specify whether we want virtual, non-virtual or
607*38fd1498Szrj both variables rewritten.
608*38fd1498Szrj
609*38fd1498Szrj UPDATE_FLAG is used in the call to update_ssa. See
610*38fd1498Szrj TODO_update_ssa* for documentation. */
611*38fd1498Szrj
612*38fd1498Szrj void
rewrite_into_loop_closed_ssa_1(bitmap changed_bbs,unsigned update_flag,int use_flags,struct loop * loop)613*38fd1498Szrj rewrite_into_loop_closed_ssa_1 (bitmap changed_bbs, unsigned update_flag,
614*38fd1498Szrj int use_flags, struct loop *loop)
615*38fd1498Szrj {
616*38fd1498Szrj bitmap *use_blocks;
617*38fd1498Szrj bitmap names_to_rename;
618*38fd1498Szrj
619*38fd1498Szrj loops_state_set (LOOP_CLOSED_SSA);
620*38fd1498Szrj if (number_of_loops (cfun) <= 1)
621*38fd1498Szrj return;
622*38fd1498Szrj
623*38fd1498Szrj /* If the pass has caused the SSA form to be out-of-date, update it
624*38fd1498Szrj now. */
625*38fd1498Szrj if (update_flag != 0)
626*38fd1498Szrj update_ssa (update_flag);
627*38fd1498Szrj else if (flag_checking)
628*38fd1498Szrj verify_ssa (true, true);
629*38fd1498Szrj
630*38fd1498Szrj bitmap_obstack_initialize (&loop_renamer_obstack);
631*38fd1498Szrj
632*38fd1498Szrj names_to_rename = BITMAP_ALLOC (&loop_renamer_obstack);
633*38fd1498Szrj
634*38fd1498Szrj /* Uses of names to rename. We don't have to initialize this array,
635*38fd1498Szrj because we know that we will only have entries for the SSA names
636*38fd1498Szrj in NAMES_TO_RENAME. */
637*38fd1498Szrj use_blocks = XNEWVEC (bitmap, num_ssa_names);
638*38fd1498Szrj
639*38fd1498Szrj if (loop != NULL)
640*38fd1498Szrj {
641*38fd1498Szrj gcc_assert (changed_bbs == NULL);
642*38fd1498Szrj find_uses_to_rename_in_loop (loop, use_blocks, names_to_rename,
643*38fd1498Szrj use_flags);
644*38fd1498Szrj }
645*38fd1498Szrj else
646*38fd1498Szrj {
647*38fd1498Szrj gcc_assert (loop == NULL);
648*38fd1498Szrj find_uses_to_rename (changed_bbs, use_blocks, names_to_rename, use_flags);
649*38fd1498Szrj }
650*38fd1498Szrj
651*38fd1498Szrj if (!bitmap_empty_p (names_to_rename))
652*38fd1498Szrj {
653*38fd1498Szrj /* An array of bitmaps where LOOP_EXITS[I] is the set of basic blocks
654*38fd1498Szrj that are the destination of an edge exiting loop number I. */
655*38fd1498Szrj bitmap *loop_exits = XNEWVEC (bitmap, number_of_loops (cfun));
656*38fd1498Szrj get_loops_exits (loop_exits);
657*38fd1498Szrj
658*38fd1498Szrj /* Add the PHI nodes on exits of the loops for the names we need to
659*38fd1498Szrj rewrite. */
660*38fd1498Szrj add_exit_phis (names_to_rename, use_blocks, loop_exits);
661*38fd1498Szrj
662*38fd1498Szrj free (loop_exits);
663*38fd1498Szrj
664*38fd1498Szrj /* Fix up all the names found to be used outside their original
665*38fd1498Szrj loops. */
666*38fd1498Szrj update_ssa (TODO_update_ssa);
667*38fd1498Szrj }
668*38fd1498Szrj
669*38fd1498Szrj bitmap_obstack_release (&loop_renamer_obstack);
670*38fd1498Szrj free (use_blocks);
671*38fd1498Szrj }
672*38fd1498Szrj
673*38fd1498Szrj /* Rewrites the non-virtual defs and uses into a loop closed ssa form. If
674*38fd1498Szrj CHANGED_BBS is not NULL, we look for uses outside loops only in the basic
675*38fd1498Szrj blocks in this set. UPDATE_FLAG is used in the call to update_ssa. See
676*38fd1498Szrj TODO_update_ssa* for documentation. */
677*38fd1498Szrj
678*38fd1498Szrj void
rewrite_into_loop_closed_ssa(bitmap changed_bbs,unsigned update_flag)679*38fd1498Szrj rewrite_into_loop_closed_ssa (bitmap changed_bbs, unsigned update_flag)
680*38fd1498Szrj {
681*38fd1498Szrj rewrite_into_loop_closed_ssa_1 (changed_bbs, update_flag, SSA_OP_USE, NULL);
682*38fd1498Szrj }
683*38fd1498Szrj
684*38fd1498Szrj /* Rewrites virtual defs and uses with def in LOOP into loop closed ssa
685*38fd1498Szrj form. */
686*38fd1498Szrj
687*38fd1498Szrj void
rewrite_virtuals_into_loop_closed_ssa(struct loop * loop)688*38fd1498Szrj rewrite_virtuals_into_loop_closed_ssa (struct loop *loop)
689*38fd1498Szrj {
690*38fd1498Szrj rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_VIRTUAL_USES, loop);
691*38fd1498Szrj }
692*38fd1498Szrj
693*38fd1498Szrj /* Check invariants of the loop closed ssa form for the def in DEF_BB. */
694*38fd1498Szrj
695*38fd1498Szrj static void
check_loop_closed_ssa_def(basic_block def_bb,tree def)696*38fd1498Szrj check_loop_closed_ssa_def (basic_block def_bb, tree def)
697*38fd1498Szrj {
698*38fd1498Szrj use_operand_p use_p;
699*38fd1498Szrj imm_use_iterator iterator;
700*38fd1498Szrj FOR_EACH_IMM_USE_FAST (use_p, iterator, def)
701*38fd1498Szrj {
702*38fd1498Szrj if (is_gimple_debug (USE_STMT (use_p)))
703*38fd1498Szrj continue;
704*38fd1498Szrj
705*38fd1498Szrj basic_block use_bb = gimple_bb (USE_STMT (use_p));
706*38fd1498Szrj if (is_a <gphi *> (USE_STMT (use_p)))
707*38fd1498Szrj use_bb = EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
708*38fd1498Szrj
709*38fd1498Szrj gcc_assert (flow_bb_inside_loop_p (def_bb->loop_father, use_bb));
710*38fd1498Szrj }
711*38fd1498Szrj }
712*38fd1498Szrj
713*38fd1498Szrj /* Checks invariants of loop closed ssa form in BB. */
714*38fd1498Szrj
715*38fd1498Szrj static void
check_loop_closed_ssa_bb(basic_block bb)716*38fd1498Szrj check_loop_closed_ssa_bb (basic_block bb)
717*38fd1498Szrj {
718*38fd1498Szrj for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
719*38fd1498Szrj gsi_next (&bsi))
720*38fd1498Szrj {
721*38fd1498Szrj gphi *phi = bsi.phi ();
722*38fd1498Szrj
723*38fd1498Szrj if (!virtual_operand_p (PHI_RESULT (phi)))
724*38fd1498Szrj check_loop_closed_ssa_def (bb, PHI_RESULT (phi));
725*38fd1498Szrj }
726*38fd1498Szrj
727*38fd1498Szrj for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb); !gsi_end_p (bsi);
728*38fd1498Szrj gsi_next_nondebug (&bsi))
729*38fd1498Szrj {
730*38fd1498Szrj ssa_op_iter iter;
731*38fd1498Szrj tree var;
732*38fd1498Szrj gimple *stmt = gsi_stmt (bsi);
733*38fd1498Szrj
734*38fd1498Szrj FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
735*38fd1498Szrj check_loop_closed_ssa_def (bb, var);
736*38fd1498Szrj }
737*38fd1498Szrj }
738*38fd1498Szrj
739*38fd1498Szrj /* Checks that invariants of the loop closed ssa form are preserved.
740*38fd1498Szrj Call verify_ssa when VERIFY_SSA_P is true. Note all loops are checked
741*38fd1498Szrj if LOOP is NULL, otherwise, only LOOP is checked. */
742*38fd1498Szrj
743*38fd1498Szrj DEBUG_FUNCTION void
verify_loop_closed_ssa(bool verify_ssa_p,struct loop * loop)744*38fd1498Szrj verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop)
745*38fd1498Szrj {
746*38fd1498Szrj if (number_of_loops (cfun) <= 1)
747*38fd1498Szrj return;
748*38fd1498Szrj
749*38fd1498Szrj if (verify_ssa_p)
750*38fd1498Szrj verify_ssa (false, true);
751*38fd1498Szrj
752*38fd1498Szrj timevar_push (TV_VERIFY_LOOP_CLOSED);
753*38fd1498Szrj
754*38fd1498Szrj if (loop == NULL)
755*38fd1498Szrj {
756*38fd1498Szrj basic_block bb;
757*38fd1498Szrj
758*38fd1498Szrj FOR_EACH_BB_FN (bb, cfun)
759*38fd1498Szrj if (bb->loop_father && bb->loop_father->num > 0)
760*38fd1498Szrj check_loop_closed_ssa_bb (bb);
761*38fd1498Szrj }
762*38fd1498Szrj else
763*38fd1498Szrj {
764*38fd1498Szrj basic_block *bbs = get_loop_body (loop);
765*38fd1498Szrj
766*38fd1498Szrj for (unsigned i = 0; i < loop->num_nodes; ++i)
767*38fd1498Szrj check_loop_closed_ssa_bb (bbs[i]);
768*38fd1498Szrj
769*38fd1498Szrj free (bbs);
770*38fd1498Szrj }
771*38fd1498Szrj
772*38fd1498Szrj timevar_pop (TV_VERIFY_LOOP_CLOSED);
773*38fd1498Szrj }
774*38fd1498Szrj
775*38fd1498Szrj /* Split loop exit edge EXIT. The things are a bit complicated by a need to
776*38fd1498Szrj preserve the loop closed ssa form. The newly created block is returned. */
777*38fd1498Szrj
778*38fd1498Szrj basic_block
split_loop_exit_edge(edge exit)779*38fd1498Szrj split_loop_exit_edge (edge exit)
780*38fd1498Szrj {
781*38fd1498Szrj basic_block dest = exit->dest;
782*38fd1498Szrj basic_block bb = split_edge (exit);
783*38fd1498Szrj gphi *phi, *new_phi;
784*38fd1498Szrj tree new_name, name;
785*38fd1498Szrj use_operand_p op_p;
786*38fd1498Szrj gphi_iterator psi;
787*38fd1498Szrj source_location locus;
788*38fd1498Szrj
789*38fd1498Szrj for (psi = gsi_start_phis (dest); !gsi_end_p (psi); gsi_next (&psi))
790*38fd1498Szrj {
791*38fd1498Szrj phi = psi.phi ();
792*38fd1498Szrj op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (bb));
793*38fd1498Szrj locus = gimple_phi_arg_location_from_edge (phi, single_succ_edge (bb));
794*38fd1498Szrj
795*38fd1498Szrj name = USE_FROM_PTR (op_p);
796*38fd1498Szrj
797*38fd1498Szrj /* If the argument of the PHI node is a constant, we do not need
798*38fd1498Szrj to keep it inside loop. */
799*38fd1498Szrj if (TREE_CODE (name) != SSA_NAME)
800*38fd1498Szrj continue;
801*38fd1498Szrj
802*38fd1498Szrj /* Otherwise create an auxiliary phi node that will copy the value
803*38fd1498Szrj of the SSA name out of the loop. */
804*38fd1498Szrj new_name = duplicate_ssa_name (name, NULL);
805*38fd1498Szrj new_phi = create_phi_node (new_name, bb);
806*38fd1498Szrj add_phi_arg (new_phi, name, exit, locus);
807*38fd1498Szrj SET_USE (op_p, new_name);
808*38fd1498Szrj }
809*38fd1498Szrj
810*38fd1498Szrj return bb;
811*38fd1498Szrj }
812*38fd1498Szrj
813*38fd1498Szrj /* Returns the basic block in that statements should be emitted for induction
814*38fd1498Szrj variables incremented at the end of the LOOP. */
815*38fd1498Szrj
816*38fd1498Szrj basic_block
ip_end_pos(struct loop * loop)817*38fd1498Szrj ip_end_pos (struct loop *loop)
818*38fd1498Szrj {
819*38fd1498Szrj return loop->latch;
820*38fd1498Szrj }
821*38fd1498Szrj
822*38fd1498Szrj /* Returns the basic block in that statements should be emitted for induction
823*38fd1498Szrj variables incremented just before exit condition of a LOOP. */
824*38fd1498Szrj
825*38fd1498Szrj basic_block
ip_normal_pos(struct loop * loop)826*38fd1498Szrj ip_normal_pos (struct loop *loop)
827*38fd1498Szrj {
828*38fd1498Szrj gimple *last;
829*38fd1498Szrj basic_block bb;
830*38fd1498Szrj edge exit;
831*38fd1498Szrj
832*38fd1498Szrj if (!single_pred_p (loop->latch))
833*38fd1498Szrj return NULL;
834*38fd1498Szrj
835*38fd1498Szrj bb = single_pred (loop->latch);
836*38fd1498Szrj last = last_stmt (bb);
837*38fd1498Szrj if (!last
838*38fd1498Szrj || gimple_code (last) != GIMPLE_COND)
839*38fd1498Szrj return NULL;
840*38fd1498Szrj
841*38fd1498Szrj exit = EDGE_SUCC (bb, 0);
842*38fd1498Szrj if (exit->dest == loop->latch)
843*38fd1498Szrj exit = EDGE_SUCC (bb, 1);
844*38fd1498Szrj
845*38fd1498Szrj if (flow_bb_inside_loop_p (loop, exit->dest))
846*38fd1498Szrj return NULL;
847*38fd1498Szrj
848*38fd1498Szrj return bb;
849*38fd1498Szrj }
850*38fd1498Szrj
851*38fd1498Szrj /* Stores the standard position for induction variable increment in LOOP
852*38fd1498Szrj (just before the exit condition if it is available and latch block is empty,
853*38fd1498Szrj end of the latch block otherwise) to BSI. INSERT_AFTER is set to true if
854*38fd1498Szrj the increment should be inserted after *BSI. */
855*38fd1498Szrj
856*38fd1498Szrj void
standard_iv_increment_position(struct loop * loop,gimple_stmt_iterator * bsi,bool * insert_after)857*38fd1498Szrj standard_iv_increment_position (struct loop *loop, gimple_stmt_iterator *bsi,
858*38fd1498Szrj bool *insert_after)
859*38fd1498Szrj {
860*38fd1498Szrj basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop);
861*38fd1498Szrj gimple *last = last_stmt (latch);
862*38fd1498Szrj
863*38fd1498Szrj if (!bb
864*38fd1498Szrj || (last && gimple_code (last) != GIMPLE_LABEL))
865*38fd1498Szrj {
866*38fd1498Szrj *bsi = gsi_last_bb (latch);
867*38fd1498Szrj *insert_after = true;
868*38fd1498Szrj }
869*38fd1498Szrj else
870*38fd1498Szrj {
871*38fd1498Szrj *bsi = gsi_last_bb (bb);
872*38fd1498Szrj *insert_after = false;
873*38fd1498Szrj }
874*38fd1498Szrj }
875*38fd1498Szrj
876*38fd1498Szrj /* Copies phi node arguments for duplicated blocks. The index of the first
877*38fd1498Szrj duplicated block is FIRST_NEW_BLOCK. */
878*38fd1498Szrj
879*38fd1498Szrj static void
copy_phi_node_args(unsigned first_new_block)880*38fd1498Szrj copy_phi_node_args (unsigned first_new_block)
881*38fd1498Szrj {
882*38fd1498Szrj unsigned i;
883*38fd1498Szrj
884*38fd1498Szrj for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
885*38fd1498Szrj BASIC_BLOCK_FOR_FN (cfun, i)->flags |= BB_DUPLICATED;
886*38fd1498Szrj
887*38fd1498Szrj for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
888*38fd1498Szrj add_phi_args_after_copy_bb (BASIC_BLOCK_FOR_FN (cfun, i));
889*38fd1498Szrj
890*38fd1498Szrj for (i = first_new_block; i < (unsigned) last_basic_block_for_fn (cfun); i++)
891*38fd1498Szrj BASIC_BLOCK_FOR_FN (cfun, i)->flags &= ~BB_DUPLICATED;
892*38fd1498Szrj }
893*38fd1498Szrj
894*38fd1498Szrj
895*38fd1498Szrj /* The same as cfgloopmanip.c:duplicate_loop_to_header_edge, but also
896*38fd1498Szrj updates the PHI nodes at start of the copied region. In order to
897*38fd1498Szrj achieve this, only loops whose exits all lead to the same location
898*38fd1498Szrj are handled.
899*38fd1498Szrj
900*38fd1498Szrj Notice that we do not completely update the SSA web after
901*38fd1498Szrj duplication. The caller is responsible for calling update_ssa
902*38fd1498Szrj after the loop has been duplicated. */
903*38fd1498Szrj
904*38fd1498Szrj bool
gimple_duplicate_loop_to_header_edge(struct loop * loop,edge e,unsigned int ndupl,sbitmap wont_exit,edge orig,vec<edge> * to_remove,int flags)905*38fd1498Szrj gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
906*38fd1498Szrj unsigned int ndupl, sbitmap wont_exit,
907*38fd1498Szrj edge orig, vec<edge> *to_remove,
908*38fd1498Szrj int flags)
909*38fd1498Szrj {
910*38fd1498Szrj unsigned first_new_block;
911*38fd1498Szrj
912*38fd1498Szrj if (!loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
913*38fd1498Szrj return false;
914*38fd1498Szrj if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS))
915*38fd1498Szrj return false;
916*38fd1498Szrj
917*38fd1498Szrj first_new_block = last_basic_block_for_fn (cfun);
918*38fd1498Szrj if (!duplicate_loop_to_header_edge (loop, e, ndupl, wont_exit,
919*38fd1498Szrj orig, to_remove, flags))
920*38fd1498Szrj return false;
921*38fd1498Szrj
922*38fd1498Szrj /* Readd the removed phi args for e. */
923*38fd1498Szrj flush_pending_stmts (e);
924*38fd1498Szrj
925*38fd1498Szrj /* Copy the phi node arguments. */
926*38fd1498Szrj copy_phi_node_args (first_new_block);
927*38fd1498Szrj
928*38fd1498Szrj scev_reset ();
929*38fd1498Szrj
930*38fd1498Szrj return true;
931*38fd1498Szrj }
932*38fd1498Szrj
933*38fd1498Szrj /* Returns true if we can unroll LOOP FACTOR times. Number
934*38fd1498Szrj of iterations of the loop is returned in NITER. */
935*38fd1498Szrj
936*38fd1498Szrj bool
can_unroll_loop_p(struct loop * loop,unsigned factor,struct tree_niter_desc * niter)937*38fd1498Szrj can_unroll_loop_p (struct loop *loop, unsigned factor,
938*38fd1498Szrj struct tree_niter_desc *niter)
939*38fd1498Szrj {
940*38fd1498Szrj edge exit;
941*38fd1498Szrj
942*38fd1498Szrj /* Check whether unrolling is possible. We only want to unroll loops
943*38fd1498Szrj for that we are able to determine number of iterations. We also
944*38fd1498Szrj want to split the extra iterations of the loop from its end,
945*38fd1498Szrj therefore we require that the loop has precisely one
946*38fd1498Szrj exit. */
947*38fd1498Szrj
948*38fd1498Szrj exit = single_dom_exit (loop);
949*38fd1498Szrj if (!exit)
950*38fd1498Szrj return false;
951*38fd1498Szrj
952*38fd1498Szrj if (!number_of_iterations_exit (loop, exit, niter, false)
953*38fd1498Szrj || niter->cmp == ERROR_MARK
954*38fd1498Szrj /* Scalar evolutions analysis might have copy propagated
955*38fd1498Szrj the abnormal ssa names into these expressions, hence
956*38fd1498Szrj emitting the computations based on them during loop
957*38fd1498Szrj unrolling might create overlapping life ranges for
958*38fd1498Szrj them, and failures in out-of-ssa. */
959*38fd1498Szrj || contains_abnormal_ssa_name_p (niter->may_be_zero)
960*38fd1498Szrj || contains_abnormal_ssa_name_p (niter->control.base)
961*38fd1498Szrj || contains_abnormal_ssa_name_p (niter->control.step)
962*38fd1498Szrj || contains_abnormal_ssa_name_p (niter->bound))
963*38fd1498Szrj return false;
964*38fd1498Szrj
965*38fd1498Szrj /* And of course, we must be able to duplicate the loop. */
966*38fd1498Szrj if (!can_duplicate_loop_p (loop))
967*38fd1498Szrj return false;
968*38fd1498Szrj
969*38fd1498Szrj /* The final loop should be small enough. */
970*38fd1498Szrj if (tree_num_loop_insns (loop, &eni_size_weights) * factor
971*38fd1498Szrj > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS))
972*38fd1498Szrj return false;
973*38fd1498Szrj
974*38fd1498Szrj return true;
975*38fd1498Szrj }
976*38fd1498Szrj
977*38fd1498Szrj /* Determines the conditions that control execution of LOOP unrolled FACTOR
978*38fd1498Szrj times. DESC is number of iterations of LOOP. ENTER_COND is set to
979*38fd1498Szrj condition that must be true if the main loop can be entered.
980*38fd1498Szrj EXIT_BASE, EXIT_STEP, EXIT_CMP and EXIT_BOUND are set to values describing
981*38fd1498Szrj how the exit from the unrolled loop should be controlled. */
982*38fd1498Szrj
983*38fd1498Szrj static void
determine_exit_conditions(struct loop * loop,struct tree_niter_desc * desc,unsigned factor,tree * enter_cond,tree * exit_base,tree * exit_step,enum tree_code * exit_cmp,tree * exit_bound)984*38fd1498Szrj determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
985*38fd1498Szrj unsigned factor, tree *enter_cond,
986*38fd1498Szrj tree *exit_base, tree *exit_step,
987*38fd1498Szrj enum tree_code *exit_cmp, tree *exit_bound)
988*38fd1498Szrj {
989*38fd1498Szrj gimple_seq stmts;
990*38fd1498Szrj tree base = desc->control.base;
991*38fd1498Szrj tree step = desc->control.step;
992*38fd1498Szrj tree bound = desc->bound;
993*38fd1498Szrj tree type = TREE_TYPE (step);
994*38fd1498Szrj tree bigstep, delta;
995*38fd1498Szrj tree min = lower_bound_in_type (type, type);
996*38fd1498Szrj tree max = upper_bound_in_type (type, type);
997*38fd1498Szrj enum tree_code cmp = desc->cmp;
998*38fd1498Szrj tree cond = boolean_true_node, assum;
999*38fd1498Szrj
1000*38fd1498Szrj /* For pointers, do the arithmetics in the type of step. */
1001*38fd1498Szrj base = fold_convert (type, base);
1002*38fd1498Szrj bound = fold_convert (type, bound);
1003*38fd1498Szrj
1004*38fd1498Szrj *enter_cond = boolean_false_node;
1005*38fd1498Szrj *exit_base = NULL_TREE;
1006*38fd1498Szrj *exit_step = NULL_TREE;
1007*38fd1498Szrj *exit_cmp = ERROR_MARK;
1008*38fd1498Szrj *exit_bound = NULL_TREE;
1009*38fd1498Szrj gcc_assert (cmp != ERROR_MARK);
1010*38fd1498Szrj
1011*38fd1498Szrj /* We only need to be correct when we answer question
1012*38fd1498Szrj "Do at least FACTOR more iterations remain?" in the unrolled loop.
1013*38fd1498Szrj Thus, transforming BASE + STEP * i <> BOUND to
1014*38fd1498Szrj BASE + STEP * i < BOUND is ok. */
1015*38fd1498Szrj if (cmp == NE_EXPR)
1016*38fd1498Szrj {
1017*38fd1498Szrj if (tree_int_cst_sign_bit (step))
1018*38fd1498Szrj cmp = GT_EXPR;
1019*38fd1498Szrj else
1020*38fd1498Szrj cmp = LT_EXPR;
1021*38fd1498Szrj }
1022*38fd1498Szrj else if (cmp == LT_EXPR)
1023*38fd1498Szrj {
1024*38fd1498Szrj gcc_assert (!tree_int_cst_sign_bit (step));
1025*38fd1498Szrj }
1026*38fd1498Szrj else if (cmp == GT_EXPR)
1027*38fd1498Szrj {
1028*38fd1498Szrj gcc_assert (tree_int_cst_sign_bit (step));
1029*38fd1498Szrj }
1030*38fd1498Szrj else
1031*38fd1498Szrj gcc_unreachable ();
1032*38fd1498Szrj
1033*38fd1498Szrj /* The main body of the loop may be entered iff:
1034*38fd1498Szrj
1035*38fd1498Szrj 1) desc->may_be_zero is false.
1036*38fd1498Szrj 2) it is possible to check that there are at least FACTOR iterations
1037*38fd1498Szrj of the loop, i.e., BOUND - step * FACTOR does not overflow.
1038*38fd1498Szrj 3) # of iterations is at least FACTOR */
1039*38fd1498Szrj
1040*38fd1498Szrj if (!integer_zerop (desc->may_be_zero))
1041*38fd1498Szrj cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1042*38fd1498Szrj invert_truthvalue (desc->may_be_zero),
1043*38fd1498Szrj cond);
1044*38fd1498Szrj
1045*38fd1498Szrj bigstep = fold_build2 (MULT_EXPR, type, step,
1046*38fd1498Szrj build_int_cst_type (type, factor));
1047*38fd1498Szrj delta = fold_build2 (MINUS_EXPR, type, bigstep, step);
1048*38fd1498Szrj if (cmp == LT_EXPR)
1049*38fd1498Szrj assum = fold_build2 (GE_EXPR, boolean_type_node,
1050*38fd1498Szrj bound,
1051*38fd1498Szrj fold_build2 (PLUS_EXPR, type, min, delta));
1052*38fd1498Szrj else
1053*38fd1498Szrj assum = fold_build2 (LE_EXPR, boolean_type_node,
1054*38fd1498Szrj bound,
1055*38fd1498Szrj fold_build2 (PLUS_EXPR, type, max, delta));
1056*38fd1498Szrj cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1057*38fd1498Szrj
1058*38fd1498Szrj bound = fold_build2 (MINUS_EXPR, type, bound, delta);
1059*38fd1498Szrj assum = fold_build2 (cmp, boolean_type_node, base, bound);
1060*38fd1498Szrj cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond);
1061*38fd1498Szrj
1062*38fd1498Szrj cond = force_gimple_operand (unshare_expr (cond), &stmts, false, NULL_TREE);
1063*38fd1498Szrj if (stmts)
1064*38fd1498Szrj gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1065*38fd1498Szrj /* cond now may be a gimple comparison, which would be OK, but also any
1066*38fd1498Szrj other gimple rhs (say a && b). In this case we need to force it to
1067*38fd1498Szrj operand. */
1068*38fd1498Szrj if (!is_gimple_condexpr (cond))
1069*38fd1498Szrj {
1070*38fd1498Szrj cond = force_gimple_operand (cond, &stmts, true, NULL_TREE);
1071*38fd1498Szrj if (stmts)
1072*38fd1498Szrj gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1073*38fd1498Szrj }
1074*38fd1498Szrj *enter_cond = cond;
1075*38fd1498Szrj
1076*38fd1498Szrj base = force_gimple_operand (unshare_expr (base), &stmts, true, NULL_TREE);
1077*38fd1498Szrj if (stmts)
1078*38fd1498Szrj gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1079*38fd1498Szrj bound = force_gimple_operand (unshare_expr (bound), &stmts, true, NULL_TREE);
1080*38fd1498Szrj if (stmts)
1081*38fd1498Szrj gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1082*38fd1498Szrj
1083*38fd1498Szrj *exit_base = base;
1084*38fd1498Szrj *exit_step = bigstep;
1085*38fd1498Szrj *exit_cmp = cmp;
1086*38fd1498Szrj *exit_bound = bound;
1087*38fd1498Szrj }
1088*38fd1498Szrj
1089*38fd1498Szrj /* Scales the frequencies of all basic blocks in LOOP that are strictly
1090*38fd1498Szrj dominated by BB by NUM/DEN. */
1091*38fd1498Szrj
1092*38fd1498Szrj static void
scale_dominated_blocks_in_loop(struct loop * loop,basic_block bb,profile_count num,profile_count den)1093*38fd1498Szrj scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
1094*38fd1498Szrj profile_count num, profile_count den)
1095*38fd1498Szrj {
1096*38fd1498Szrj basic_block son;
1097*38fd1498Szrj
1098*38fd1498Szrj if (!den.nonzero_p () && !(num == profile_count::zero ()))
1099*38fd1498Szrj return;
1100*38fd1498Szrj
1101*38fd1498Szrj for (son = first_dom_son (CDI_DOMINATORS, bb);
1102*38fd1498Szrj son;
1103*38fd1498Szrj son = next_dom_son (CDI_DOMINATORS, son))
1104*38fd1498Szrj {
1105*38fd1498Szrj if (!flow_bb_inside_loop_p (loop, son))
1106*38fd1498Szrj continue;
1107*38fd1498Szrj scale_bbs_frequencies_profile_count (&son, 1, num, den);
1108*38fd1498Szrj scale_dominated_blocks_in_loop (loop, son, num, den);
1109*38fd1498Szrj }
1110*38fd1498Szrj }
1111*38fd1498Szrj
1112*38fd1498Szrj /* Return estimated niter for LOOP after unrolling by FACTOR times. */
1113*38fd1498Szrj
1114*38fd1498Szrj gcov_type
niter_for_unrolled_loop(struct loop * loop,unsigned factor)1115*38fd1498Szrj niter_for_unrolled_loop (struct loop *loop, unsigned factor)
1116*38fd1498Szrj {
1117*38fd1498Szrj gcc_assert (factor != 0);
1118*38fd1498Szrj bool profile_p = false;
1119*38fd1498Szrj gcov_type est_niter = expected_loop_iterations_unbounded (loop, &profile_p);
1120*38fd1498Szrj /* Note that this is really CEIL (est_niter + 1, factor) - 1, where the
1121*38fd1498Szrj "+ 1" converts latch iterations to loop iterations and the "- 1"
1122*38fd1498Szrj converts back. */
1123*38fd1498Szrj gcov_type new_est_niter = est_niter / factor;
1124*38fd1498Szrj
1125*38fd1498Szrj if (est_niter == -1)
1126*38fd1498Szrj return -1;
1127*38fd1498Szrj
1128*38fd1498Szrj /* Without profile feedback, loops for which we do not know a better estimate
1129*38fd1498Szrj are assumed to roll 10 times. When we unroll such loop, it appears to
1130*38fd1498Szrj roll too little, and it may even seem to be cold. To avoid this, we
1131*38fd1498Szrj ensure that the created loop appears to roll at least 5 times (but at
1132*38fd1498Szrj most as many times as before unrolling). Don't do adjustment if profile
1133*38fd1498Szrj feedback is present. */
1134*38fd1498Szrj if (new_est_niter < 5 && !profile_p)
1135*38fd1498Szrj {
1136*38fd1498Szrj if (est_niter < 5)
1137*38fd1498Szrj new_est_niter = est_niter;
1138*38fd1498Szrj else
1139*38fd1498Szrj new_est_niter = 5;
1140*38fd1498Szrj }
1141*38fd1498Szrj
1142*38fd1498Szrj if (loop->any_upper_bound)
1143*38fd1498Szrj {
1144*38fd1498Szrj /* As above, this is really CEIL (upper_bound + 1, factor) - 1. */
1145*38fd1498Szrj widest_int bound = wi::udiv_floor (loop->nb_iterations_upper_bound,
1146*38fd1498Szrj factor);
1147*38fd1498Szrj if (wi::ltu_p (bound, new_est_niter))
1148*38fd1498Szrj new_est_niter = bound.to_uhwi ();
1149*38fd1498Szrj }
1150*38fd1498Szrj
1151*38fd1498Szrj return new_est_niter;
1152*38fd1498Szrj }
1153*38fd1498Szrj
1154*38fd1498Szrj /* Unroll LOOP FACTOR times. DESC describes number of iterations of LOOP.
1155*38fd1498Szrj EXIT is the exit of the loop to that DESC corresponds.
1156*38fd1498Szrj
1157*38fd1498Szrj If N is number of iterations of the loop and MAY_BE_ZERO is the condition
1158*38fd1498Szrj under that loop exits in the first iteration even if N != 0,
1159*38fd1498Szrj
1160*38fd1498Szrj while (1)
1161*38fd1498Szrj {
1162*38fd1498Szrj x = phi (init, next);
1163*38fd1498Szrj
1164*38fd1498Szrj pre;
1165*38fd1498Szrj if (st)
1166*38fd1498Szrj break;
1167*38fd1498Szrj post;
1168*38fd1498Szrj }
1169*38fd1498Szrj
1170*38fd1498Szrj becomes (with possibly the exit conditions formulated a bit differently,
1171*38fd1498Szrj avoiding the need to create a new iv):
1172*38fd1498Szrj
1173*38fd1498Szrj if (MAY_BE_ZERO || N < FACTOR)
1174*38fd1498Szrj goto rest;
1175*38fd1498Szrj
1176*38fd1498Szrj do
1177*38fd1498Szrj {
1178*38fd1498Szrj x = phi (init, next);
1179*38fd1498Szrj
1180*38fd1498Szrj pre;
1181*38fd1498Szrj post;
1182*38fd1498Szrj pre;
1183*38fd1498Szrj post;
1184*38fd1498Szrj ...
1185*38fd1498Szrj pre;
1186*38fd1498Szrj post;
1187*38fd1498Szrj N -= FACTOR;
1188*38fd1498Szrj
1189*38fd1498Szrj } while (N >= FACTOR);
1190*38fd1498Szrj
1191*38fd1498Szrj rest:
1192*38fd1498Szrj init' = phi (init, x);
1193*38fd1498Szrj
1194*38fd1498Szrj while (1)
1195*38fd1498Szrj {
1196*38fd1498Szrj x = phi (init', next);
1197*38fd1498Szrj
1198*38fd1498Szrj pre;
1199*38fd1498Szrj if (st)
1200*38fd1498Szrj break;
1201*38fd1498Szrj post;
1202*38fd1498Szrj }
1203*38fd1498Szrj
1204*38fd1498Szrj Before the loop is unrolled, TRANSFORM is called for it (only for the
1205*38fd1498Szrj unrolled loop, but not for its versioned copy). DATA is passed to
1206*38fd1498Szrj TRANSFORM. */
1207*38fd1498Szrj
1208*38fd1498Szrj /* Probability in % that the unrolled loop is entered. Just a guess. */
1209*38fd1498Szrj #define PROB_UNROLLED_LOOP_ENTERED 90
1210*38fd1498Szrj
1211*38fd1498Szrj void
tree_transform_and_unroll_loop(struct loop * loop,unsigned factor,edge exit,struct tree_niter_desc * desc,transform_callback transform,void * data)1212*38fd1498Szrj tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
1213*38fd1498Szrj edge exit, struct tree_niter_desc *desc,
1214*38fd1498Szrj transform_callback transform,
1215*38fd1498Szrj void *data)
1216*38fd1498Szrj {
1217*38fd1498Szrj gcond *exit_if;
1218*38fd1498Szrj tree ctr_before, ctr_after;
1219*38fd1498Szrj tree enter_main_cond, exit_base, exit_step, exit_bound;
1220*38fd1498Szrj enum tree_code exit_cmp;
1221*38fd1498Szrj gphi *phi_old_loop, *phi_new_loop, *phi_rest;
1222*38fd1498Szrj gphi_iterator psi_old_loop, psi_new_loop;
1223*38fd1498Szrj tree init, next, new_init;
1224*38fd1498Szrj struct loop *new_loop;
1225*38fd1498Szrj basic_block rest, exit_bb;
1226*38fd1498Szrj edge old_entry, new_entry, old_latch, precond_edge, new_exit;
1227*38fd1498Szrj edge new_nonexit, e;
1228*38fd1498Szrj gimple_stmt_iterator bsi;
1229*38fd1498Szrj use_operand_p op;
1230*38fd1498Szrj bool ok;
1231*38fd1498Szrj unsigned i;
1232*38fd1498Szrj profile_probability prob, prob_entry, scale_unrolled;
1233*38fd1498Szrj profile_count freq_e, freq_h;
1234*38fd1498Szrj gcov_type new_est_niter = niter_for_unrolled_loop (loop, factor);
1235*38fd1498Szrj unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
1236*38fd1498Szrj auto_vec<edge> to_remove;
1237*38fd1498Szrj
1238*38fd1498Szrj determine_exit_conditions (loop, desc, factor,
1239*38fd1498Szrj &enter_main_cond, &exit_base, &exit_step,
1240*38fd1498Szrj &exit_cmp, &exit_bound);
1241*38fd1498Szrj
1242*38fd1498Szrj /* Let us assume that the unrolled loop is quite likely to be entered. */
1243*38fd1498Szrj if (integer_nonzerop (enter_main_cond))
1244*38fd1498Szrj prob_entry = profile_probability::always ();
1245*38fd1498Szrj else
1246*38fd1498Szrj prob_entry = profile_probability::guessed_always ()
1247*38fd1498Szrj .apply_scale (PROB_UNROLLED_LOOP_ENTERED, 100);
1248*38fd1498Szrj
1249*38fd1498Szrj /* The values for scales should keep profile consistent, and somewhat close
1250*38fd1498Szrj to correct.
1251*38fd1498Szrj
1252*38fd1498Szrj TODO: The current value of SCALE_REST makes it appear that the loop that
1253*38fd1498Szrj is created by splitting the remaining iterations of the unrolled loop is
1254*38fd1498Szrj executed the same number of times as the original loop, and with the same
1255*38fd1498Szrj frequencies, which is obviously wrong. This does not appear to cause
1256*38fd1498Szrj problems, so we do not bother with fixing it for now. To make the profile
1257*38fd1498Szrj correct, we would need to change the probability of the exit edge of the
1258*38fd1498Szrj loop, and recompute the distribution of frequencies in its body because
1259*38fd1498Szrj of this change (scale the frequencies of blocks before and after the exit
1260*38fd1498Szrj by appropriate factors). */
1261*38fd1498Szrj scale_unrolled = prob_entry;
1262*38fd1498Szrj
1263*38fd1498Szrj new_loop = loop_version (loop, enter_main_cond, NULL, prob_entry,
1264*38fd1498Szrj prob_entry.invert (), scale_unrolled,
1265*38fd1498Szrj profile_probability::guessed_always (),
1266*38fd1498Szrj true);
1267*38fd1498Szrj gcc_assert (new_loop != NULL);
1268*38fd1498Szrj update_ssa (TODO_update_ssa);
1269*38fd1498Szrj
1270*38fd1498Szrj /* Prepare the cfg and update the phi nodes. Move the loop exit to the
1271*38fd1498Szrj loop latch (and make its condition dummy, for the moment). */
1272*38fd1498Szrj rest = loop_preheader_edge (new_loop)->src;
1273*38fd1498Szrj precond_edge = single_pred_edge (rest);
1274*38fd1498Szrj split_edge (loop_latch_edge (loop));
1275*38fd1498Szrj exit_bb = single_pred (loop->latch);
1276*38fd1498Szrj
1277*38fd1498Szrj /* Since the exit edge will be removed, the frequency of all the blocks
1278*38fd1498Szrj in the loop that are dominated by it must be scaled by
1279*38fd1498Szrj 1 / (1 - exit->probability). */
1280*38fd1498Szrj if (exit->probability.initialized_p ())
1281*38fd1498Szrj scale_dominated_blocks_in_loop (loop, exit->src,
1282*38fd1498Szrj /* We are scaling up here so probability
1283*38fd1498Szrj does not fit. */
1284*38fd1498Szrj loop->header->count,
1285*38fd1498Szrj loop->header->count
1286*38fd1498Szrj - loop->header->count.apply_probability
1287*38fd1498Szrj (exit->probability));
1288*38fd1498Szrj
1289*38fd1498Szrj bsi = gsi_last_bb (exit_bb);
1290*38fd1498Szrj exit_if = gimple_build_cond (EQ_EXPR, integer_zero_node,
1291*38fd1498Szrj integer_zero_node,
1292*38fd1498Szrj NULL_TREE, NULL_TREE);
1293*38fd1498Szrj
1294*38fd1498Szrj gsi_insert_after (&bsi, exit_if, GSI_NEW_STMT);
1295*38fd1498Szrj new_exit = make_edge (exit_bb, rest, EDGE_FALSE_VALUE | irr);
1296*38fd1498Szrj rescan_loop_exit (new_exit, true, false);
1297*38fd1498Szrj
1298*38fd1498Szrj /* Set the probability of new exit to the same of the old one. Fix
1299*38fd1498Szrj the frequency of the latch block, by scaling it back by
1300*38fd1498Szrj 1 - exit->probability. */
1301*38fd1498Szrj new_exit->probability = exit->probability;
1302*38fd1498Szrj new_nonexit = single_pred_edge (loop->latch);
1303*38fd1498Szrj new_nonexit->probability = exit->probability.invert ();
1304*38fd1498Szrj new_nonexit->flags = EDGE_TRUE_VALUE;
1305*38fd1498Szrj if (new_nonexit->probability.initialized_p ())
1306*38fd1498Szrj scale_bbs_frequencies (&loop->latch, 1, new_nonexit->probability);
1307*38fd1498Szrj
1308*38fd1498Szrj old_entry = loop_preheader_edge (loop);
1309*38fd1498Szrj new_entry = loop_preheader_edge (new_loop);
1310*38fd1498Szrj old_latch = loop_latch_edge (loop);
1311*38fd1498Szrj for (psi_old_loop = gsi_start_phis (loop->header),
1312*38fd1498Szrj psi_new_loop = gsi_start_phis (new_loop->header);
1313*38fd1498Szrj !gsi_end_p (psi_old_loop);
1314*38fd1498Szrj gsi_next (&psi_old_loop), gsi_next (&psi_new_loop))
1315*38fd1498Szrj {
1316*38fd1498Szrj phi_old_loop = psi_old_loop.phi ();
1317*38fd1498Szrj phi_new_loop = psi_new_loop.phi ();
1318*38fd1498Szrj
1319*38fd1498Szrj init = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_entry);
1320*38fd1498Szrj op = PHI_ARG_DEF_PTR_FROM_EDGE (phi_new_loop, new_entry);
1321*38fd1498Szrj gcc_assert (operand_equal_for_phi_arg_p (init, USE_FROM_PTR (op)));
1322*38fd1498Szrj next = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_latch);
1323*38fd1498Szrj
1324*38fd1498Szrj /* Prefer using original variable as a base for the new ssa name.
1325*38fd1498Szrj This is necessary for virtual ops, and useful in order to avoid
1326*38fd1498Szrj losing debug info for real ops. */
1327*38fd1498Szrj if (TREE_CODE (next) == SSA_NAME
1328*38fd1498Szrj && useless_type_conversion_p (TREE_TYPE (next),
1329*38fd1498Szrj TREE_TYPE (init)))
1330*38fd1498Szrj new_init = copy_ssa_name (next);
1331*38fd1498Szrj else if (TREE_CODE (init) == SSA_NAME
1332*38fd1498Szrj && useless_type_conversion_p (TREE_TYPE (init),
1333*38fd1498Szrj TREE_TYPE (next)))
1334*38fd1498Szrj new_init = copy_ssa_name (init);
1335*38fd1498Szrj else if (useless_type_conversion_p (TREE_TYPE (next), TREE_TYPE (init)))
1336*38fd1498Szrj new_init = make_temp_ssa_name (TREE_TYPE (next), NULL, "unrinittmp");
1337*38fd1498Szrj else
1338*38fd1498Szrj new_init = make_temp_ssa_name (TREE_TYPE (init), NULL, "unrinittmp");
1339*38fd1498Szrj
1340*38fd1498Szrj phi_rest = create_phi_node (new_init, rest);
1341*38fd1498Szrj
1342*38fd1498Szrj add_phi_arg (phi_rest, init, precond_edge, UNKNOWN_LOCATION);
1343*38fd1498Szrj add_phi_arg (phi_rest, next, new_exit, UNKNOWN_LOCATION);
1344*38fd1498Szrj SET_USE (op, new_init);
1345*38fd1498Szrj }
1346*38fd1498Szrj
1347*38fd1498Szrj remove_path (exit);
1348*38fd1498Szrj
1349*38fd1498Szrj /* Transform the loop. */
1350*38fd1498Szrj if (transform)
1351*38fd1498Szrj (*transform) (loop, data);
1352*38fd1498Szrj
1353*38fd1498Szrj /* Unroll the loop and remove the exits in all iterations except for the
1354*38fd1498Szrj last one. */
1355*38fd1498Szrj auto_sbitmap wont_exit (factor);
1356*38fd1498Szrj bitmap_ones (wont_exit);
1357*38fd1498Szrj bitmap_clear_bit (wont_exit, factor - 1);
1358*38fd1498Szrj
1359*38fd1498Szrj ok = gimple_duplicate_loop_to_header_edge
1360*38fd1498Szrj (loop, loop_latch_edge (loop), factor - 1,
1361*38fd1498Szrj wont_exit, new_exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ);
1362*38fd1498Szrj gcc_assert (ok);
1363*38fd1498Szrj
1364*38fd1498Szrj FOR_EACH_VEC_ELT (to_remove, i, e)
1365*38fd1498Szrj {
1366*38fd1498Szrj ok = remove_path (e);
1367*38fd1498Szrj gcc_assert (ok);
1368*38fd1498Szrj }
1369*38fd1498Szrj update_ssa (TODO_update_ssa);
1370*38fd1498Szrj
1371*38fd1498Szrj /* Ensure that the frequencies in the loop match the new estimated
1372*38fd1498Szrj number of iterations, and change the probability of the new
1373*38fd1498Szrj exit edge. */
1374*38fd1498Szrj
1375*38fd1498Szrj freq_h = loop->header->count;
1376*38fd1498Szrj freq_e = (loop_preheader_edge (loop))->count ();
1377*38fd1498Szrj if (freq_h.nonzero_p ())
1378*38fd1498Szrj {
1379*38fd1498Szrj /* Avoid dropping loop body profile counter to 0 because of zero count
1380*38fd1498Szrj in loop's preheader. */
1381*38fd1498Szrj if (freq_h.nonzero_p () && !(freq_e == profile_count::zero ()))
1382*38fd1498Szrj freq_e = freq_e.force_nonzero ();
1383*38fd1498Szrj scale_loop_frequencies (loop, freq_e.probability_in (freq_h));
1384*38fd1498Szrj }
1385*38fd1498Szrj
1386*38fd1498Szrj exit_bb = single_pred (loop->latch);
1387*38fd1498Szrj new_exit = find_edge (exit_bb, rest);
1388*38fd1498Szrj new_exit->probability = profile_probability::always ()
1389*38fd1498Szrj .apply_scale (1, new_est_niter + 1);
1390*38fd1498Szrj
1391*38fd1498Szrj rest->count += new_exit->count ();
1392*38fd1498Szrj
1393*38fd1498Szrj new_nonexit = single_pred_edge (loop->latch);
1394*38fd1498Szrj prob = new_nonexit->probability;
1395*38fd1498Szrj new_nonexit->probability = new_exit->probability.invert ();
1396*38fd1498Szrj prob = new_nonexit->probability / prob;
1397*38fd1498Szrj if (prob.initialized_p ())
1398*38fd1498Szrj scale_bbs_frequencies (&loop->latch, 1, prob);
1399*38fd1498Szrj
1400*38fd1498Szrj /* Finally create the new counter for number of iterations and add the new
1401*38fd1498Szrj exit instruction. */
1402*38fd1498Szrj bsi = gsi_last_nondebug_bb (exit_bb);
1403*38fd1498Szrj exit_if = as_a <gcond *> (gsi_stmt (bsi));
1404*38fd1498Szrj create_iv (exit_base, exit_step, NULL_TREE, loop,
1405*38fd1498Szrj &bsi, false, &ctr_before, &ctr_after);
1406*38fd1498Szrj gimple_cond_set_code (exit_if, exit_cmp);
1407*38fd1498Szrj gimple_cond_set_lhs (exit_if, ctr_after);
1408*38fd1498Szrj gimple_cond_set_rhs (exit_if, exit_bound);
1409*38fd1498Szrj update_stmt (exit_if);
1410*38fd1498Szrj
1411*38fd1498Szrj checking_verify_flow_info ();
1412*38fd1498Szrj checking_verify_loop_structure ();
1413*38fd1498Szrj checking_verify_loop_closed_ssa (true, loop);
1414*38fd1498Szrj checking_verify_loop_closed_ssa (true, new_loop);
1415*38fd1498Szrj }
1416*38fd1498Szrj
1417*38fd1498Szrj /* Wrapper over tree_transform_and_unroll_loop for case we do not
1418*38fd1498Szrj want to transform the loop before unrolling. The meaning
1419*38fd1498Szrj of the arguments is the same as for tree_transform_and_unroll_loop. */
1420*38fd1498Szrj
1421*38fd1498Szrj void
tree_unroll_loop(struct loop * loop,unsigned factor,edge exit,struct tree_niter_desc * desc)1422*38fd1498Szrj tree_unroll_loop (struct loop *loop, unsigned factor,
1423*38fd1498Szrj edge exit, struct tree_niter_desc *desc)
1424*38fd1498Szrj {
1425*38fd1498Szrj tree_transform_and_unroll_loop (loop, factor, exit, desc,
1426*38fd1498Szrj NULL, NULL);
1427*38fd1498Szrj }
1428*38fd1498Szrj
1429*38fd1498Szrj /* Rewrite the phi node at position PSI in function of the main
1430*38fd1498Szrj induction variable MAIN_IV and insert the generated code at GSI. */
1431*38fd1498Szrj
1432*38fd1498Szrj static void
rewrite_phi_with_iv(loop_p loop,gphi_iterator * psi,gimple_stmt_iterator * gsi,tree main_iv)1433*38fd1498Szrj rewrite_phi_with_iv (loop_p loop,
1434*38fd1498Szrj gphi_iterator *psi,
1435*38fd1498Szrj gimple_stmt_iterator *gsi,
1436*38fd1498Szrj tree main_iv)
1437*38fd1498Szrj {
1438*38fd1498Szrj affine_iv iv;
1439*38fd1498Szrj gassign *stmt;
1440*38fd1498Szrj gphi *phi = psi->phi ();
1441*38fd1498Szrj tree atype, mtype, val, res = PHI_RESULT (phi);
1442*38fd1498Szrj
1443*38fd1498Szrj if (virtual_operand_p (res) || res == main_iv)
1444*38fd1498Szrj {
1445*38fd1498Szrj gsi_next (psi);
1446*38fd1498Szrj return;
1447*38fd1498Szrj }
1448*38fd1498Szrj
1449*38fd1498Szrj if (!simple_iv (loop, loop, res, &iv, true))
1450*38fd1498Szrj {
1451*38fd1498Szrj gsi_next (psi);
1452*38fd1498Szrj return;
1453*38fd1498Szrj }
1454*38fd1498Szrj
1455*38fd1498Szrj remove_phi_node (psi, false);
1456*38fd1498Szrj
1457*38fd1498Szrj atype = TREE_TYPE (res);
1458*38fd1498Szrj mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
1459*38fd1498Szrj val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
1460*38fd1498Szrj fold_convert (mtype, main_iv));
1461*38fd1498Szrj val = fold_build2 (POINTER_TYPE_P (atype)
1462*38fd1498Szrj ? POINTER_PLUS_EXPR : PLUS_EXPR,
1463*38fd1498Szrj atype, unshare_expr (iv.base), val);
1464*38fd1498Szrj val = force_gimple_operand_gsi (gsi, val, false, NULL_TREE, true,
1465*38fd1498Szrj GSI_SAME_STMT);
1466*38fd1498Szrj stmt = gimple_build_assign (res, val);
1467*38fd1498Szrj gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1468*38fd1498Szrj }
1469*38fd1498Szrj
1470*38fd1498Szrj /* Rewrite all the phi nodes of LOOP in function of the main induction
1471*38fd1498Szrj variable MAIN_IV. */
1472*38fd1498Szrj
1473*38fd1498Szrj static void
rewrite_all_phi_nodes_with_iv(loop_p loop,tree main_iv)1474*38fd1498Szrj rewrite_all_phi_nodes_with_iv (loop_p loop, tree main_iv)
1475*38fd1498Szrj {
1476*38fd1498Szrj unsigned i;
1477*38fd1498Szrj basic_block *bbs = get_loop_body_in_dom_order (loop);
1478*38fd1498Szrj gphi_iterator psi;
1479*38fd1498Szrj
1480*38fd1498Szrj for (i = 0; i < loop->num_nodes; i++)
1481*38fd1498Szrj {
1482*38fd1498Szrj basic_block bb = bbs[i];
1483*38fd1498Szrj gimple_stmt_iterator gsi = gsi_after_labels (bb);
1484*38fd1498Szrj
1485*38fd1498Szrj if (bb->loop_father != loop)
1486*38fd1498Szrj continue;
1487*38fd1498Szrj
1488*38fd1498Szrj for (psi = gsi_start_phis (bb); !gsi_end_p (psi); )
1489*38fd1498Szrj rewrite_phi_with_iv (loop, &psi, &gsi, main_iv);
1490*38fd1498Szrj }
1491*38fd1498Szrj
1492*38fd1498Szrj free (bbs);
1493*38fd1498Szrj }
1494*38fd1498Szrj
1495*38fd1498Szrj /* Bases all the induction variables in LOOP on a single induction variable
1496*38fd1498Szrj (with base 0 and step 1), whose final value is compared with *NIT. When the
1497*38fd1498Szrj IV type precision has to be larger than *NIT type precision, *NIT is
1498*38fd1498Szrj converted to the larger type, the conversion code is inserted before the
1499*38fd1498Szrj loop, and *NIT is updated to the new definition. When BUMP_IN_LATCH is true,
1500*38fd1498Szrj the induction variable is incremented in the loop latch, otherwise it is
1501*38fd1498Szrj incremented in the loop header. Return the induction variable that was
1502*38fd1498Szrj created. */
1503*38fd1498Szrj
1504*38fd1498Szrj tree
canonicalize_loop_ivs(struct loop * loop,tree * nit,bool bump_in_latch)1505*38fd1498Szrj canonicalize_loop_ivs (struct loop *loop, tree *nit, bool bump_in_latch)
1506*38fd1498Szrj {
1507*38fd1498Szrj unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
1508*38fd1498Szrj unsigned original_precision = precision;
1509*38fd1498Szrj tree type, var_before;
1510*38fd1498Szrj gimple_stmt_iterator gsi;
1511*38fd1498Szrj gphi_iterator psi;
1512*38fd1498Szrj gcond *stmt;
1513*38fd1498Szrj edge exit = single_dom_exit (loop);
1514*38fd1498Szrj gimple_seq stmts;
1515*38fd1498Szrj bool unsigned_p = false;
1516*38fd1498Szrj
1517*38fd1498Szrj for (psi = gsi_start_phis (loop->header);
1518*38fd1498Szrj !gsi_end_p (psi); gsi_next (&psi))
1519*38fd1498Szrj {
1520*38fd1498Szrj gphi *phi = psi.phi ();
1521*38fd1498Szrj tree res = PHI_RESULT (phi);
1522*38fd1498Szrj bool uns;
1523*38fd1498Szrj
1524*38fd1498Szrj type = TREE_TYPE (res);
1525*38fd1498Szrj if (virtual_operand_p (res)
1526*38fd1498Szrj || (!INTEGRAL_TYPE_P (type)
1527*38fd1498Szrj && !POINTER_TYPE_P (type))
1528*38fd1498Szrj || TYPE_PRECISION (type) < precision)
1529*38fd1498Szrj continue;
1530*38fd1498Szrj
1531*38fd1498Szrj uns = POINTER_TYPE_P (type) | TYPE_UNSIGNED (type);
1532*38fd1498Szrj
1533*38fd1498Szrj if (TYPE_PRECISION (type) > precision)
1534*38fd1498Szrj unsigned_p = uns;
1535*38fd1498Szrj else
1536*38fd1498Szrj unsigned_p |= uns;
1537*38fd1498Szrj
1538*38fd1498Szrj precision = TYPE_PRECISION (type);
1539*38fd1498Szrj }
1540*38fd1498Szrj
1541*38fd1498Szrj scalar_int_mode mode = smallest_int_mode_for_size (precision);
1542*38fd1498Szrj precision = GET_MODE_PRECISION (mode);
1543*38fd1498Szrj type = build_nonstandard_integer_type (precision, unsigned_p);
1544*38fd1498Szrj
1545*38fd1498Szrj if (original_precision != precision)
1546*38fd1498Szrj {
1547*38fd1498Szrj *nit = fold_convert (type, *nit);
1548*38fd1498Szrj *nit = force_gimple_operand (*nit, &stmts, true, NULL_TREE);
1549*38fd1498Szrj if (stmts)
1550*38fd1498Szrj gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
1551*38fd1498Szrj }
1552*38fd1498Szrj
1553*38fd1498Szrj if (bump_in_latch)
1554*38fd1498Szrj gsi = gsi_last_bb (loop->latch);
1555*38fd1498Szrj else
1556*38fd1498Szrj gsi = gsi_last_nondebug_bb (loop->header);
1557*38fd1498Szrj create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
1558*38fd1498Szrj loop, &gsi, bump_in_latch, &var_before, NULL);
1559*38fd1498Szrj
1560*38fd1498Szrj rewrite_all_phi_nodes_with_iv (loop, var_before);
1561*38fd1498Szrj
1562*38fd1498Szrj stmt = as_a <gcond *> (last_stmt (exit->src));
1563*38fd1498Szrj /* Make the loop exit if the control condition is not satisfied. */
1564*38fd1498Szrj if (exit->flags & EDGE_TRUE_VALUE)
1565*38fd1498Szrj {
1566*38fd1498Szrj edge te, fe;
1567*38fd1498Szrj
1568*38fd1498Szrj extract_true_false_edges_from_block (exit->src, &te, &fe);
1569*38fd1498Szrj te->flags = EDGE_FALSE_VALUE;
1570*38fd1498Szrj fe->flags = EDGE_TRUE_VALUE;
1571*38fd1498Szrj }
1572*38fd1498Szrj gimple_cond_set_code (stmt, LT_EXPR);
1573*38fd1498Szrj gimple_cond_set_lhs (stmt, var_before);
1574*38fd1498Szrj gimple_cond_set_rhs (stmt, *nit);
1575*38fd1498Szrj update_stmt (stmt);
1576*38fd1498Szrj
1577*38fd1498Szrj return var_before;
1578*38fd1498Szrj }
1579