xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-loop-ivcanon.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Induction variable canonicalization and loop peeling.
2    Copyright (C) 2004-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This pass detects the loops that iterate a constant number of times,
21    adds a canonical induction variable (step -1, tested against 0)
22    and replaces the exit test.  This enables the less powerful rtl
23    level analysis to use this information.
24 
25    This might spoil the code in some cases (by increasing register pressure).
26    Note that in the case the new variable is not needed, ivopts will get rid
27    of it, so it might only be a problem when there are no other linear induction
28    variables.  In that case the created optimization possibilities are likely
29    to pay up.
30 
31    We also perform
32      - complete unrolling (or peeling) when the loops is rolling few enough
33        times
34      - simple peeling (i.e. copying few initial iterations prior the loop)
35        when number of iteration estimate is known (typically by the profile
36        info).  */
37 
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "tm.h"
42 #include "hash-set.h"
43 #include "machmode.h"
44 #include "vec.h"
45 #include "double-int.h"
46 #include "input.h"
47 #include "alias.h"
48 #include "symtab.h"
49 #include "wide-int.h"
50 #include "inchash.h"
51 #include "tree.h"
52 #include "fold-const.h"
53 #include "tm_p.h"
54 #include "profile.h"
55 #include "predict.h"
56 #include "hard-reg-set.h"
57 #include "input.h"
58 #include "function.h"
59 #include "dominance.h"
60 #include "cfg.h"
61 #include "basic-block.h"
62 #include "gimple-pretty-print.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-fold.h"
66 #include "tree-eh.h"
67 #include "gimple-expr.h"
68 #include "is-a.h"
69 #include "gimple.h"
70 #include "gimple-iterator.h"
71 #include "gimple-ssa.h"
72 #include "hash-map.h"
73 #include "plugin-api.h"
74 #include "ipa-ref.h"
75 #include "cgraph.h"
76 #include "tree-cfg.h"
77 #include "tree-phinodes.h"
78 #include "ssa-iterators.h"
79 #include "stringpool.h"
80 #include "tree-ssanames.h"
81 #include "tree-ssa-loop-manip.h"
82 #include "tree-ssa-loop-niter.h"
83 #include "tree-ssa-loop.h"
84 #include "tree-into-ssa.h"
85 #include "cfgloop.h"
86 #include "tree-pass.h"
87 #include "tree-chrec.h"
88 #include "tree-scalar-evolution.h"
89 #include "params.h"
90 #include "flags.h"
91 #include "tree-inline.h"
92 #include "target.h"
93 #include "tree-cfgcleanup.h"
94 #include "builtins.h"
95 
96 /* Specifies types of loops that may be unrolled.  */
97 
98 enum unroll_level
99 {
100   UL_SINGLE_ITER,	/* Only loops that exit immediately in the first
101 			   iteration.  */
102   UL_NO_GROWTH,		/* Only loops whose unrolling will not cause increase
103 			   of code size.  */
104   UL_ALL		/* All suitable loops.  */
105 };
106 
107 /* Adds a canonical induction variable to LOOP iterating NITER times.  EXIT
108    is the exit edge whose condition is replaced.  */
109 
110 static void
111 create_canonical_iv (struct loop *loop, edge exit, tree niter)
112 {
113   edge in;
114   tree type, var;
115   gcond *cond;
116   gimple_stmt_iterator incr_at;
117   enum tree_code cmp;
118 
119   if (dump_file && (dump_flags & TDF_DETAILS))
120     {
121       fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
122       print_generic_expr (dump_file, niter, TDF_SLIM);
123       fprintf (dump_file, " iterations.\n");
124     }
125 
126   cond = as_a <gcond *> (last_stmt (exit->src));
127   in = EDGE_SUCC (exit->src, 0);
128   if (in == exit)
129     in = EDGE_SUCC (exit->src, 1);
130 
131   /* Note that we do not need to worry about overflows, since
132      type of niter is always unsigned and all comparisons are
133      just for equality/nonequality -- i.e. everything works
134      with a modulo arithmetics.  */
135 
136   type = TREE_TYPE (niter);
137   niter = fold_build2 (PLUS_EXPR, type,
138 		       niter,
139 		       build_int_cst (type, 1));
140   incr_at = gsi_last_bb (in->src);
141   create_iv (niter,
142 	     build_int_cst (type, -1),
143 	     NULL_TREE, loop,
144 	     &incr_at, false, NULL, &var);
145 
146   cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
147   gimple_cond_set_code (cond, cmp);
148   gimple_cond_set_lhs (cond, var);
149   gimple_cond_set_rhs (cond, build_int_cst (type, 0));
150   update_stmt (cond);
151 }
152 
153 /* Describe size of loop as detected by tree_estimate_loop_size.  */
154 struct loop_size
155 {
156   /* Number of instructions in the loop.  */
157   int overall;
158 
159   /* Number of instructions that will be likely optimized out in
160      peeled iterations of loop  (i.e. computation based on induction
161      variable where induction variable starts at known constant.)  */
162   int eliminated_by_peeling;
163 
164   /* Same statistics for last iteration of loop: it is smaller because
165      instructions after exit are not executed.  */
166   int last_iteration;
167   int last_iteration_eliminated_by_peeling;
168 
169   /* If some IV computation will become constant.  */
170   bool constant_iv;
171 
172   /* Number of call stmts that are not a builtin and are pure or const
173      present on the hot path.  */
174   int num_pure_calls_on_hot_path;
175   /* Number of call stmts that are not a builtin and are not pure nor const
176      present on the hot path.  */
177   int num_non_pure_calls_on_hot_path;
178   /* Number of statements other than calls in the loop.  */
179   int non_call_stmts_on_hot_path;
180   /* Number of branches seen on the hot path.  */
181   int num_branches_on_hot_path;
182 };
183 
184 /* Return true if OP in STMT will be constant after peeling LOOP.  */
185 
186 static bool
187 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
188 {
189   affine_iv iv;
190 
191   if (is_gimple_min_invariant (op))
192     return true;
193 
194   /* We can still fold accesses to constant arrays when index is known.  */
195   if (TREE_CODE (op) != SSA_NAME)
196     {
197       tree base = op;
198 
199       /* First make fast look if we see constant array inside.  */
200       while (handled_component_p (base))
201 	base = TREE_OPERAND (base, 0);
202       if ((DECL_P (base)
203 	   && ctor_for_folding (base) != error_mark_node)
204 	  || CONSTANT_CLASS_P (base))
205 	{
206 	  /* If so, see if we understand all the indices.  */
207 	  base = op;
208 	  while (handled_component_p (base))
209 	    {
210 	      if (TREE_CODE (base) == ARRAY_REF
211 		  && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
212 		return false;
213 	      base = TREE_OPERAND (base, 0);
214 	    }
215 	  return true;
216 	}
217       return false;
218     }
219 
220   /* Induction variables are constants.  */
221   if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
222     return false;
223   if (!is_gimple_min_invariant (iv.base))
224     return false;
225   if (!is_gimple_min_invariant (iv.step))
226     return false;
227   return true;
228 }
229 
230 /* Computes an estimated number of insns in LOOP.
231    EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
232    iteration of the loop.
233    EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
234    of loop.
235    Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
236    Stop estimating after UPPER_BOUND is met.  Return true in this case.  */
237 
238 static bool
239 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
240 			 int upper_bound)
241 {
242   basic_block *body = get_loop_body (loop);
243   gimple_stmt_iterator gsi;
244   unsigned int i;
245   bool after_exit;
246   vec<basic_block> path = get_loop_hot_path (loop);
247 
248   size->overall = 0;
249   size->eliminated_by_peeling = 0;
250   size->last_iteration = 0;
251   size->last_iteration_eliminated_by_peeling = 0;
252   size->num_pure_calls_on_hot_path = 0;
253   size->num_non_pure_calls_on_hot_path = 0;
254   size->non_call_stmts_on_hot_path = 0;
255   size->num_branches_on_hot_path = 0;
256   size->constant_iv = 0;
257 
258   if (dump_file && (dump_flags & TDF_DETAILS))
259     fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
260   for (i = 0; i < loop->num_nodes; i++)
261     {
262       if (edge_to_cancel && body[i] != edge_to_cancel->src
263 	  && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
264 	after_exit = true;
265       else
266 	after_exit = false;
267       if (dump_file && (dump_flags & TDF_DETAILS))
268 	fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
269 
270       for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
271 	{
272 	  gimple stmt = gsi_stmt (gsi);
273 	  int num = estimate_num_insns (stmt, &eni_size_weights);
274 	  bool likely_eliminated = false;
275 	  bool likely_eliminated_last = false;
276 	  bool likely_eliminated_peeled = false;
277 
278 	  if (dump_file && (dump_flags & TDF_DETAILS))
279 	    {
280 	      fprintf (dump_file, "  size: %3i ", num);
281 	      print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
282 	    }
283 
284 	  /* Look for reasons why we might optimize this stmt away. */
285 
286 	  if (gimple_has_side_effects (stmt))
287 	    ;
288 	  /* Exit conditional.  */
289 	  else if (exit && body[i] == exit->src
290 		   && stmt == last_stmt (exit->src))
291 	    {
292 	      if (dump_file && (dump_flags & TDF_DETAILS))
293 	        fprintf (dump_file, "   Exit condition will be eliminated "
294 			 "in peeled copies.\n");
295 	      likely_eliminated_peeled = true;
296 	    }
297 	  else if (edge_to_cancel && body[i] == edge_to_cancel->src
298 		   && stmt == last_stmt (edge_to_cancel->src))
299 	    {
300 	      if (dump_file && (dump_flags & TDF_DETAILS))
301 	        fprintf (dump_file, "   Exit condition will be eliminated "
302 			 "in last copy.\n");
303 	      likely_eliminated_last = true;
304 	    }
305 	  /* Sets of IV variables  */
306 	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
307 	      && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
308 	    {
309 	      if (dump_file && (dump_flags & TDF_DETAILS))
310 	        fprintf (dump_file, "   Induction variable computation will"
311 			 " be folded away.\n");
312 	      likely_eliminated = true;
313 	    }
314 	  /* Assignments of IV variables.  */
315 	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
316 		   && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
317 		   && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
318 		   && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
319 		       || constant_after_peeling (gimple_assign_rhs2 (stmt),
320 		       				  stmt, loop)))
321 	    {
322 	      size->constant_iv = true;
323 	      if (dump_file && (dump_flags & TDF_DETAILS))
324 	        fprintf (dump_file, "   Constant expression will be folded away.\n");
325 	      likely_eliminated = true;
326 	    }
327 	  /* Conditionals.  */
328 	  else if ((gimple_code (stmt) == GIMPLE_COND
329 		    && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
330 		    && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
331 		   || (gimple_code (stmt) == GIMPLE_SWITCH
332 		       && constant_after_peeling (gimple_switch_index (
333 						    as_a <gswitch *> (stmt)),
334 						  stmt, loop)))
335 	    {
336 	      if (dump_file && (dump_flags & TDF_DETAILS))
337 	        fprintf (dump_file, "   Constant conditional.\n");
338 	      likely_eliminated = true;
339 	    }
340 
341 	  size->overall += num;
342 	  if (likely_eliminated || likely_eliminated_peeled)
343 	    size->eliminated_by_peeling += num;
344 	  if (!after_exit)
345 	    {
346 	      size->last_iteration += num;
347 	      if (likely_eliminated || likely_eliminated_last)
348 		size->last_iteration_eliminated_by_peeling += num;
349 	    }
350 	  if ((size->overall * 3 / 2 - size->eliminated_by_peeling
351 	      - size->last_iteration_eliminated_by_peeling) > upper_bound)
352 	    {
353               free (body);
354 	      path.release ();
355 	      return true;
356 	    }
357 	}
358     }
359   while (path.length ())
360     {
361       basic_block bb = path.pop ();
362       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
363 	{
364 	  gimple stmt = gsi_stmt (gsi);
365 	  if (gimple_code (stmt) == GIMPLE_CALL)
366 	    {
367 	      int flags = gimple_call_flags (stmt);
368 	      tree decl = gimple_call_fndecl (stmt);
369 
370 	      if (decl && DECL_IS_BUILTIN (decl)
371 		  && is_inexpensive_builtin (decl))
372 		;
373 	      else if (flags & (ECF_PURE | ECF_CONST))
374 		size->num_pure_calls_on_hot_path++;
375 	      else
376 		size->num_non_pure_calls_on_hot_path++;
377 	      size->num_branches_on_hot_path ++;
378 	    }
379 	  else if (gimple_code (stmt) != GIMPLE_CALL
380 		   && gimple_code (stmt) != GIMPLE_DEBUG)
381 	    size->non_call_stmts_on_hot_path++;
382 	  if (((gimple_code (stmt) == GIMPLE_COND
383 	        && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
384 		    || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
385 	       || (gimple_code (stmt) == GIMPLE_SWITCH
386 		   && !constant_after_peeling (gimple_switch_index (
387 						 as_a <gswitch *> (stmt)),
388 					       stmt, loop)))
389 	      && (!exit || bb != exit->src))
390 	    size->num_branches_on_hot_path++;
391 	}
392     }
393   path.release ();
394   if (dump_file && (dump_flags & TDF_DETAILS))
395     fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
396     	     size->eliminated_by_peeling, size->last_iteration,
397 	     size->last_iteration_eliminated_by_peeling);
398 
399   free (body);
400   return false;
401 }
402 
403 /* Estimate number of insns of completely unrolled loop.
404    It is (NUNROLL + 1) * size of loop body with taking into account
405    the fact that in last copy everything after exit conditional
406    is dead and that some instructions will be eliminated after
407    peeling.
408 
409    Loop body is likely going to simplify further, this is difficult
410    to guess, we just decrease the result by 1/3.  */
411 
412 static unsigned HOST_WIDE_INT
413 estimated_unrolled_size (struct loop_size *size,
414 			 unsigned HOST_WIDE_INT nunroll)
415 {
416   HOST_WIDE_INT unr_insns = ((nunroll)
417   			     * (HOST_WIDE_INT) (size->overall
418 			     			- size->eliminated_by_peeling));
419   if (!nunroll)
420     unr_insns = 0;
421   unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
422 
423   unr_insns = unr_insns * 2 / 3;
424   if (unr_insns <= 0)
425     unr_insns = 1;
426 
427   return unr_insns;
428 }
429 
430 /* Loop LOOP is known to not loop.  See if there is an edge in the loop
431    body that can be remove to make the loop to always exit and at
432    the same time it does not make any code potentially executed
433    during the last iteration dead.
434 
435    After complete unrolling we still may get rid of the conditional
436    on the exit in the last copy even if we have no idea what it does.
437    This is quite common case for loops of form
438 
439      int a[5];
440      for (i=0;i<b;i++)
441        a[i]=0;
442 
443    Here we prove the loop to iterate 5 times but we do not know
444    it from induction variable.
445 
446    For now we handle only simple case where there is exit condition
447    just before the latch block and the latch block contains no statements
448    with side effect that may otherwise terminate the execution of loop
449    (such as by EH or by terminating the program or longjmp).
450 
451    In the general case we may want to cancel the paths leading to statements
452    loop-niter identified as having undefined effect in the last iteration.
453    The other cases are hopefully rare and will be cleaned up later.  */
454 
455 static edge
456 loop_edge_to_cancel (struct loop *loop)
457 {
458   vec<edge> exits;
459   unsigned i;
460   edge edge_to_cancel;
461   gimple_stmt_iterator gsi;
462 
463   /* We want only one predecestor of the loop.  */
464   if (EDGE_COUNT (loop->latch->preds) > 1)
465     return NULL;
466 
467   exits = get_loop_exit_edges (loop);
468 
469   FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
470     {
471        /* Find the other edge than the loop exit
472           leaving the conditoinal.  */
473        if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
474          continue;
475        if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
476          edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
477        else
478          edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
479 
480       /* We only can handle conditionals.  */
481       if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
482 	continue;
483 
484       /* We should never have conditionals in the loop latch. */
485       gcc_assert (edge_to_cancel->dest != loop->header);
486 
487       /* Check that it leads to loop latch.  */
488       if (edge_to_cancel->dest != loop->latch)
489         continue;
490 
491       exits.release ();
492 
493       /* Verify that the code in loop latch does nothing that may end program
494          execution without really reaching the exit.  This may include
495 	 non-pure/const function calls, EH statements, volatile ASMs etc.  */
496       for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
497 	if (gimple_has_side_effects (gsi_stmt (gsi)))
498 	   return NULL;
499       return edge_to_cancel;
500     }
501   exits.release ();
502   return NULL;
503 }
504 
505 /* Remove all tests for exits that are known to be taken after LOOP was
506    peeled NPEELED times. Put gcc_unreachable before every statement
507    known to not be executed.  */
508 
509 static bool
510 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
511 {
512   struct nb_iter_bound *elt;
513   bool changed = false;
514 
515   for (elt = loop->bounds; elt; elt = elt->next)
516     {
517       /* If statement is known to be undefined after peeling, turn it
518 	 into unreachable (or trap when debugging experience is supposed
519 	 to be good).  */
520       if (!elt->is_exit
521 	  && wi::ltu_p (elt->bound, npeeled))
522 	{
523 	  gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
524 	  gcall *stmt = gimple_build_call
525 	      (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
526 	  gimple_set_location (stmt, gimple_location (elt->stmt));
527 	  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
528 	  split_block (gimple_bb (stmt), stmt);
529 	  changed = true;
530 	  if (dump_file && (dump_flags & TDF_DETAILS))
531 	    {
532 	      fprintf (dump_file, "Forced statement unreachable: ");
533 	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
534 	    }
535 	}
536       /* If we know the exit will be taken after peeling, update.  */
537       else if (elt->is_exit
538 	       && wi::leu_p (elt->bound, npeeled))
539 	{
540 	  basic_block bb = gimple_bb (elt->stmt);
541 	  edge exit_edge = EDGE_SUCC (bb, 0);
542 
543 	  if (dump_file && (dump_flags & TDF_DETAILS))
544 	    {
545 	      fprintf (dump_file, "Forced exit to be taken: ");
546 	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
547 	    }
548 	  if (!loop_exit_edge_p (loop, exit_edge))
549 	    exit_edge = EDGE_SUCC (bb, 1);
550 	  gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
551 	  gcond *cond_stmt = as_a <gcond *> (elt->stmt);
552 	  if (exit_edge->flags & EDGE_TRUE_VALUE)
553 	    gimple_cond_make_true (cond_stmt);
554 	  else
555 	    gimple_cond_make_false (cond_stmt);
556 	  update_stmt (cond_stmt);
557 	  changed = true;
558 	}
559     }
560   return changed;
561 }
562 
563 /* Remove all exits that are known to be never taken because of the loop bound
564    discovered.  */
565 
566 static bool
567 remove_redundant_iv_tests (struct loop *loop)
568 {
569   struct nb_iter_bound *elt;
570   bool changed = false;
571 
572   if (!loop->any_upper_bound)
573     return false;
574   for (elt = loop->bounds; elt; elt = elt->next)
575     {
576       /* Exit is pointless if it won't be taken before loop reaches
577 	 upper bound.  */
578       if (elt->is_exit && loop->any_upper_bound
579           && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
580 	{
581 	  basic_block bb = gimple_bb (elt->stmt);
582 	  edge exit_edge = EDGE_SUCC (bb, 0);
583 	  struct tree_niter_desc niter;
584 
585 	  if (!loop_exit_edge_p (loop, exit_edge))
586 	    exit_edge = EDGE_SUCC (bb, 1);
587 
588 	  /* Only when we know the actual number of iterations, not
589 	     just a bound, we can remove the exit.  */
590 	  if (!number_of_iterations_exit (loop, exit_edge,
591 					  &niter, false, false)
592 	      || !integer_onep (niter.assumptions)
593 	      || !integer_zerop (niter.may_be_zero)
594 	      || !niter.niter
595 	      || TREE_CODE (niter.niter) != INTEGER_CST
596 	      || !wi::ltu_p (loop->nb_iterations_upper_bound,
597 			     wi::to_widest (niter.niter)))
598 	    continue;
599 
600 	  if (dump_file && (dump_flags & TDF_DETAILS))
601 	    {
602 	      fprintf (dump_file, "Removed pointless exit: ");
603 	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
604 	    }
605 	  gcond *cond_stmt = as_a <gcond *> (elt->stmt);
606 	  if (exit_edge->flags & EDGE_TRUE_VALUE)
607 	    gimple_cond_make_false (cond_stmt);
608 	  else
609 	    gimple_cond_make_true (cond_stmt);
610 	  update_stmt (cond_stmt);
611 	  changed = true;
612 	}
613     }
614   return changed;
615 }
616 
617 /* Stores loops that will be unlooped after we process whole loop tree. */
618 static vec<loop_p> loops_to_unloop;
619 static vec<int> loops_to_unloop_nunroll;
620 
621 /* Cancel all fully unrolled loops by putting __builtin_unreachable
622    on the latch edge.
623    We do it after all unrolling since unlooping moves basic blocks
624    across loop boundaries trashing loop closed SSA form as well
625    as SCEV info needed to be intact during unrolling.
626 
627    IRRED_INVALIDATED is used to bookkeep if information about
628    irreducible regions may become invalid as a result
629    of the transformation.
630    LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
631    when we need to go into loop closed SSA form.  */
632 
633 static void
634 unloop_loops (bitmap loop_closed_ssa_invalidated,
635 	      bool *irred_invalidated)
636 {
637   while (loops_to_unloop.length ())
638     {
639       struct loop *loop = loops_to_unloop.pop ();
640       int n_unroll = loops_to_unloop_nunroll.pop ();
641       basic_block latch = loop->latch;
642       edge latch_edge = loop_latch_edge (loop);
643       int flags = latch_edge->flags;
644       location_t locus = latch_edge->goto_locus;
645       gcall *stmt;
646       gimple_stmt_iterator gsi;
647 
648       remove_exits_and_undefined_stmts (loop, n_unroll);
649 
650       /* Unloop destroys the latch edge.  */
651       unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
652 
653       /* Create new basic block for the latch edge destination and wire
654 	 it in.  */
655       stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
656       latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
657       latch_edge->probability = 0;
658       latch_edge->count = 0;
659       latch_edge->flags |= flags;
660       latch_edge->goto_locus = locus;
661 
662       latch_edge->dest->loop_father = current_loops->tree_root;
663       latch_edge->dest->count = 0;
664       latch_edge->dest->frequency = 0;
665       set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
666 
667       gsi = gsi_start_bb (latch_edge->dest);
668       gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
669     }
670   loops_to_unloop.release ();
671   loops_to_unloop_nunroll.release ();
672 }
673 
674 /* Tries to unroll LOOP completely, i.e. NITER times.
675    UL determines which loops we are allowed to unroll.
676    EXIT is the exit of the loop that should be eliminated.
677    MAXITER specfy bound on number of iterations, -1 if it is
678    not known or too large for HOST_WIDE_INT.  The location
679    LOCUS corresponding to the loop is used when emitting
680    a summary of the unroll to the dump file.  */
681 
682 static bool
683 try_unroll_loop_completely (struct loop *loop,
684 			    edge exit, tree niter,
685 			    enum unroll_level ul,
686 			    HOST_WIDE_INT maxiter,
687 			    location_t locus)
688 {
689   unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns;
690   struct loop_size size;
691   bool n_unroll_found = false;
692   edge edge_to_cancel = NULL;
693   int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
694 
695   /* See if we proved number of iterations to be low constant.
696 
697      EXIT is an edge that will be removed in all but last iteration of
698      the loop.
699 
700      EDGE_TO_CACNEL is an edge that will be removed from the last iteration
701      of the unrolled sequence and is expected to make the final loop not
702      rolling.
703 
704      If the number of execution of loop is determined by standard induction
705      variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
706      from the iv test.  */
707   if (tree_fits_uhwi_p (niter))
708     {
709       n_unroll = tree_to_uhwi (niter);
710       n_unroll_found = true;
711       edge_to_cancel = EDGE_SUCC (exit->src, 0);
712       if (edge_to_cancel == exit)
713 	edge_to_cancel = EDGE_SUCC (exit->src, 1);
714     }
715   /* We do not know the number of iterations and thus we can not eliminate
716      the EXIT edge.  */
717   else
718     exit = NULL;
719 
720   /* See if we can improve our estimate by using recorded loop bounds.  */
721   if (maxiter >= 0
722       && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
723     {
724       n_unroll = maxiter;
725       n_unroll_found = true;
726       /* Loop terminates before the IV variable test, so we can not
727 	 remove it in the last iteration.  */
728       edge_to_cancel = NULL;
729     }
730 
731   if (!n_unroll_found)
732     return false;
733 
734   if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
735     {
736       if (dump_file && (dump_flags & TDF_DETAILS))
737 	fprintf (dump_file, "Not unrolling loop %d "
738 		 "(--param max-completely-peeled-times limit reached).\n",
739 		 loop->num);
740       return false;
741     }
742 
743   if (!edge_to_cancel)
744     edge_to_cancel = loop_edge_to_cancel (loop);
745 
746   if (n_unroll)
747     {
748       sbitmap wont_exit;
749       edge e;
750       unsigned i;
751       bool large;
752       vec<edge> to_remove = vNULL;
753       if (ul == UL_SINGLE_ITER)
754 	return false;
755 
756       large = tree_estimate_loop_size
757 		 (loop, exit, edge_to_cancel, &size,
758 		  PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
759       ninsns = size.overall;
760       if (large)
761 	{
762 	  if (dump_file && (dump_flags & TDF_DETAILS))
763 	    fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
764 		     loop->num);
765 	  return false;
766 	}
767 
768       unr_insns = estimated_unrolled_size (&size, n_unroll);
769       if (dump_file && (dump_flags & TDF_DETAILS))
770 	{
771 	  fprintf (dump_file, "  Loop size: %d\n", (int) ninsns);
772 	  fprintf (dump_file, "  Estimated size after unrolling: %d\n",
773 		   (int) unr_insns);
774 	}
775 
776       /* If the code is going to shrink, we don't need to be extra cautious
777 	 on guessing if the unrolling is going to be profitable.  */
778       if (unr_insns
779 	  /* If there is IV variable that will become constant, we save
780 	     one instruction in the loop prologue we do not account
781 	     otherwise.  */
782 	  <= ninsns + (size.constant_iv != false))
783 	;
784       /* We unroll only inner loops, because we do not consider it profitable
785 	 otheriwse.  We still can cancel loopback edge of not rolling loop;
786 	 this is always a good idea.  */
787       else if (ul == UL_NO_GROWTH)
788 	{
789 	  if (dump_file && (dump_flags & TDF_DETAILS))
790 	    fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
791 		     loop->num);
792 	  return false;
793 	}
794       /* Outer loops tend to be less interesting candidates for complete
795 	 unrolling unless we can do a lot of propagation into the inner loop
796 	 body.  For now we disable outer loop unrolling when the code would
797 	 grow.  */
798       else if (loop->inner)
799 	{
800 	  if (dump_file && (dump_flags & TDF_DETAILS))
801 	    fprintf (dump_file, "Not unrolling loop %d: "
802 		     "it is not innermost and code would grow.\n",
803 		     loop->num);
804 	  return false;
805 	}
806       /* If there is call on a hot path through the loop, then
807 	 there is most probably not much to optimize.  */
808       else if (size.num_non_pure_calls_on_hot_path)
809 	{
810 	  if (dump_file && (dump_flags & TDF_DETAILS))
811 	    fprintf (dump_file, "Not unrolling loop %d: "
812 		     "contains call and code would grow.\n",
813 		     loop->num);
814 	  return false;
815 	}
816       /* If there is pure/const call in the function, then we
817 	 can still optimize the unrolled loop body if it contains
818 	 some other interesting code than the calls and code
819 	 storing or cumulating the return value.  */
820       else if (size.num_pure_calls_on_hot_path
821 	       /* One IV increment, one test, one ivtmp store
822 		  and one useful stmt.  That is about minimal loop
823 		  doing pure call.  */
824 	       && (size.non_call_stmts_on_hot_path
825 		   <= 3 + size.num_pure_calls_on_hot_path))
826 	{
827 	  if (dump_file && (dump_flags & TDF_DETAILS))
828 	    fprintf (dump_file, "Not unrolling loop %d: "
829 		     "contains just pure calls and code would grow.\n",
830 		     loop->num);
831 	  return false;
832 	}
833       /* Complette unrolling is major win when control flow is removed and
834 	 one big basic block is created.  If the loop contains control flow
835 	 the optimization may still be a win because of eliminating the loop
836 	 overhead but it also may blow the branch predictor tables.
837 	 Limit number of branches on the hot path through the peeled
838 	 sequence.  */
839       else if (size.num_branches_on_hot_path * (int)n_unroll
840 	       > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
841 	{
842 	  if (dump_file && (dump_flags & TDF_DETAILS))
843 	    fprintf (dump_file, "Not unrolling loop %d: "
844 		     " number of branches on hot path in the unrolled sequence"
845 		     " reach --param max-peel-branches limit.\n",
846 		     loop->num);
847 	  return false;
848 	}
849       else if (unr_insns
850 	       > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
851 	{
852 	  if (dump_file && (dump_flags & TDF_DETAILS))
853 	    fprintf (dump_file, "Not unrolling loop %d: "
854 		     "(--param max-completely-peeled-insns limit reached).\n",
855 		     loop->num);
856 	  return false;
857 	}
858       dump_printf_loc (report_flags, locus,
859                        "loop turned into non-loop; it never loops.\n");
860 
861       initialize_original_copy_tables ();
862       wont_exit = sbitmap_alloc (n_unroll + 1);
863       bitmap_ones (wont_exit);
864       bitmap_clear_bit (wont_exit, 0);
865 
866       if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
867 						 n_unroll, wont_exit,
868 						 exit, &to_remove,
869 						 DLTHE_FLAG_UPDATE_FREQ
870 						 | DLTHE_FLAG_COMPLETTE_PEEL))
871 	{
872           free_original_copy_tables ();
873 	  free (wont_exit);
874 	  if (dump_file && (dump_flags & TDF_DETAILS))
875 	    fprintf (dump_file, "Failed to duplicate the loop\n");
876 	  return false;
877 	}
878 
879       FOR_EACH_VEC_ELT (to_remove, i, e)
880 	{
881 	  bool ok = remove_path (e);
882 	  gcc_assert (ok);
883 	}
884 
885       to_remove.release ();
886       free (wont_exit);
887       free_original_copy_tables ();
888     }
889 
890 
891   /* Remove the conditional from the last copy of the loop.  */
892   if (edge_to_cancel)
893     {
894       gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
895       if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
896 	gimple_cond_make_false (cond);
897       else
898 	gimple_cond_make_true (cond);
899       update_stmt (cond);
900       /* Do not remove the path. Doing so may remove outer loop
901 	 and confuse bookkeeping code in tree_unroll_loops_completelly.  */
902     }
903 
904   /* Store the loop for later unlooping and exit removal.  */
905   loops_to_unloop.safe_push (loop);
906   loops_to_unloop_nunroll.safe_push (n_unroll);
907 
908   if (dump_enabled_p ())
909     {
910       if (!n_unroll)
911         dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
912                          "loop turned into non-loop; it never loops\n");
913       else
914         {
915           dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
916                            "loop with %d iterations completely unrolled",
917 			   (int) (n_unroll + 1));
918           if (profile_info)
919             dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
920                          " (header execution count %d)",
921                          (int)loop->header->count);
922           dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
923         }
924     }
925 
926   if (dump_file && (dump_flags & TDF_DETAILS))
927     {
928       if (exit)
929         fprintf (dump_file, "Exit condition of peeled iterations was "
930 		 "eliminated.\n");
931       if (edge_to_cancel)
932         fprintf (dump_file, "Last iteration exit edge was proved true.\n");
933       else
934         fprintf (dump_file, "Latch of last iteration was marked by "
935 		 "__builtin_unreachable ().\n");
936     }
937 
938   return true;
939 }
940 
941 /* Return number of instructions after peeling.  */
942 static unsigned HOST_WIDE_INT
943 estimated_peeled_sequence_size (struct loop_size *size,
944 			        unsigned HOST_WIDE_INT npeel)
945 {
946   return MAX (npeel * (HOST_WIDE_INT) (size->overall
947 			     	       - size->eliminated_by_peeling), 1);
948 }
949 
950 /* If the loop is expected to iterate N times and is
951    small enough, duplicate the loop body N+1 times before
952    the loop itself.  This way the hot path will never
953    enter the loop.
954    Parameters are the same as for try_unroll_loops_completely */
955 
956 static bool
957 try_peel_loop (struct loop *loop,
958 	       edge exit, tree niter,
959 	       HOST_WIDE_INT maxiter)
960 {
961   int npeel;
962   struct loop_size size;
963   int peeled_size;
964   sbitmap wont_exit;
965   unsigned i;
966   vec<edge> to_remove = vNULL;
967   edge e;
968 
969   /* If the iteration bound is known and large, then we can safely eliminate
970      the check in peeled copies.  */
971   if (TREE_CODE (niter) != INTEGER_CST)
972     exit = NULL;
973 
974   if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0)
975     return false;
976 
977   /* Peel only innermost loops.  */
978   if (loop->inner)
979     {
980       if (dump_file)
981         fprintf (dump_file, "Not peeling: outer loop\n");
982       return false;
983     }
984 
985   if (!optimize_loop_for_speed_p (loop))
986     {
987       if (dump_file)
988         fprintf (dump_file, "Not peeling: cold loop\n");
989       return false;
990     }
991 
992   /* Check if there is an estimate on the number of iterations.  */
993   npeel = estimated_loop_iterations_int (loop);
994   if (npeel < 0)
995     {
996       if (dump_file)
997         fprintf (dump_file, "Not peeling: number of iterations is not "
998 	         "estimated\n");
999       return false;
1000     }
1001   if (maxiter >= 0 && maxiter <= npeel)
1002     {
1003       if (dump_file)
1004         fprintf (dump_file, "Not peeling: upper bound is known so can "
1005 		 "unroll completely\n");
1006       return false;
1007     }
1008 
1009   /* We want to peel estimated number of iterations + 1 (so we never
1010      enter the loop on quick path).  Check against PARAM_MAX_PEEL_TIMES
1011      and be sure to avoid overflows.  */
1012   if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
1013     {
1014       if (dump_file)
1015         fprintf (dump_file, "Not peeling: rolls too much "
1016 		 "(%i + 1 > --param max-peel-times)\n", npeel);
1017       return false;
1018     }
1019   npeel++;
1020 
1021   /* Check peeled loops size.  */
1022   tree_estimate_loop_size (loop, exit, NULL, &size,
1023 			   PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
1024   if ((peeled_size = estimated_peeled_sequence_size (&size, npeel))
1025       > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
1026     {
1027       if (dump_file)
1028         fprintf (dump_file, "Not peeling: peeled sequence size is too large "
1029 		 "(%i insns > --param max-peel-insns)", peeled_size);
1030       return false;
1031     }
1032 
1033   /* Duplicate possibly eliminating the exits.  */
1034   initialize_original_copy_tables ();
1035   wont_exit = sbitmap_alloc (npeel + 1);
1036   bitmap_ones (wont_exit);
1037   bitmap_clear_bit (wont_exit, 0);
1038   if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1039 					     npeel, wont_exit,
1040 					     exit, &to_remove,
1041 					     DLTHE_FLAG_UPDATE_FREQ
1042 					     | DLTHE_FLAG_COMPLETTE_PEEL))
1043     {
1044       free_original_copy_tables ();
1045       free (wont_exit);
1046       return false;
1047     }
1048   FOR_EACH_VEC_ELT (to_remove, i, e)
1049     {
1050       bool ok = remove_path (e);
1051       gcc_assert (ok);
1052     }
1053   free (wont_exit);
1054   free_original_copy_tables ();
1055   if (dump_file && (dump_flags & TDF_DETAILS))
1056     {
1057       fprintf (dump_file, "Peeled loop %d, %i times.\n",
1058 	       loop->num, npeel);
1059     }
1060   if (loop->any_upper_bound)
1061     loop->nb_iterations_upper_bound -= npeel;
1062   loop->nb_iterations_estimate = 0;
1063   /* Make sure to mark loop cold so we do not try to peel it more.  */
1064   scale_loop_profile (loop, 1, 0);
1065   loop->header->count = 0;
1066   return true;
1067 }
1068 /* Adds a canonical induction variable to LOOP if suitable.
1069    CREATE_IV is true if we may create a new iv.  UL determines
1070    which loops we are allowed to completely unroll.  If TRY_EVAL is true, we try
1071    to determine the number of iterations of a loop by direct evaluation.
1072    Returns true if cfg is changed.   */
1073 
1074 static bool
1075 canonicalize_loop_induction_variables (struct loop *loop,
1076 				       bool create_iv, enum unroll_level ul,
1077 				       bool try_eval)
1078 {
1079   edge exit = NULL;
1080   tree niter;
1081   HOST_WIDE_INT maxiter;
1082   bool modified = false;
1083   location_t locus = UNKNOWN_LOCATION;
1084 
1085   niter = number_of_latch_executions (loop);
1086   exit = single_exit (loop);
1087   if (TREE_CODE (niter) == INTEGER_CST)
1088     locus = gimple_location (last_stmt (exit->src));
1089   else
1090     {
1091       /* If the loop has more than one exit, try checking all of them
1092 	 for # of iterations determinable through scev.  */
1093       if (!exit)
1094 	niter = find_loop_niter (loop, &exit);
1095 
1096       /* Finally if everything else fails, try brute force evaluation.  */
1097       if (try_eval
1098 	  && (chrec_contains_undetermined (niter)
1099 	      || TREE_CODE (niter) != INTEGER_CST))
1100 	niter = find_loop_niter_by_eval (loop, &exit);
1101 
1102       if (exit)
1103         locus = gimple_location (last_stmt (exit->src));
1104 
1105       if (TREE_CODE (niter) != INTEGER_CST)
1106 	exit = NULL;
1107     }
1108 
1109   /* We work exceptionally hard here to estimate the bound
1110      by find_loop_niter_by_eval.  Be sure to keep it for future.  */
1111   if (niter && TREE_CODE (niter) == INTEGER_CST)
1112     {
1113       record_niter_bound (loop, wi::to_widest (niter),
1114 			  exit == single_likely_exit (loop), true);
1115     }
1116 
1117   /* Force re-computation of loop bounds so we can remove redundant exits.  */
1118   maxiter = max_loop_iterations_int (loop);
1119 
1120   if (dump_file && (dump_flags & TDF_DETAILS)
1121       && TREE_CODE (niter) == INTEGER_CST)
1122     {
1123       fprintf (dump_file, "Loop %d iterates ", loop->num);
1124       print_generic_expr (dump_file, niter, TDF_SLIM);
1125       fprintf (dump_file, " times.\n");
1126     }
1127   if (dump_file && (dump_flags & TDF_DETAILS)
1128       && maxiter >= 0)
1129     {
1130       fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
1131 	       (int)maxiter);
1132     }
1133 
1134   /* Remove exits that are known to be never taken based on loop bound.
1135      Needs to be called after compilation of max_loop_iterations_int that
1136      populates the loop bounds.  */
1137   modified |= remove_redundant_iv_tests (loop);
1138 
1139   if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
1140     return true;
1141 
1142   if (create_iv
1143       && niter && !chrec_contains_undetermined (niter)
1144       && exit && just_once_each_iteration_p (loop, exit->src))
1145     create_canonical_iv (loop, exit, niter);
1146 
1147   if (ul == UL_ALL)
1148     modified |= try_peel_loop (loop, exit, niter, maxiter);
1149 
1150   return modified;
1151 }
1152 
1153 /* The main entry point of the pass.  Adds canonical induction variables
1154    to the suitable loops.  */
1155 
1156 unsigned int
1157 canonicalize_induction_variables (void)
1158 {
1159   struct loop *loop;
1160   bool changed = false;
1161   bool irred_invalidated = false;
1162   bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1163 
1164   free_numbers_of_iterations_estimates ();
1165   estimate_numbers_of_iterations ();
1166 
1167   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1168     {
1169       changed |= canonicalize_loop_induction_variables (loop,
1170 							true, UL_SINGLE_ITER,
1171 							true);
1172     }
1173   gcc_assert (!need_ssa_update_p (cfun));
1174 
1175   unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1176   if (irred_invalidated
1177       && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1178     mark_irreducible_loops ();
1179 
1180   /* Clean up the information about numbers of iterations, since brute force
1181      evaluation could reveal new information.  */
1182   scev_reset ();
1183 
1184   if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1185     {
1186       gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1187       rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1188     }
1189   BITMAP_FREE (loop_closed_ssa_invalidated);
1190 
1191   if (changed)
1192     return TODO_cleanup_cfg;
1193   return 0;
1194 }
1195 
1196 /* Propagate constant SSA_NAMEs defined in basic block BB.  */
1197 
1198 static void
1199 propagate_constants_for_unrolling (basic_block bb)
1200 {
1201   /* Look for degenerate PHI nodes with constant argument.  */
1202   for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1203     {
1204       gphi *phi = gsi.phi ();
1205       tree result = gimple_phi_result (phi);
1206       tree arg = gimple_phi_arg_def (phi, 0);
1207 
1208       if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result)
1209 	  && gimple_phi_num_args (phi) == 1
1210 	  && TREE_CODE (arg) == INTEGER_CST)
1211 	{
1212 	  replace_uses_by (result, arg);
1213 	  gsi_remove (&gsi, true);
1214 	  release_ssa_name (result);
1215 	}
1216       else
1217 	gsi_next (&gsi);
1218     }
1219 
1220   /* Look for assignments to SSA names with constant RHS.  */
1221   for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1222     {
1223       gimple stmt = gsi_stmt (gsi);
1224       tree lhs;
1225 
1226       if (is_gimple_assign (stmt)
1227 	  && gimple_assign_rhs_code (stmt) == INTEGER_CST
1228 	  && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1229 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1230 	{
1231 	  replace_uses_by (lhs, gimple_assign_rhs1 (stmt));
1232 	  gsi_remove (&gsi, true);
1233 	  release_ssa_name (lhs);
1234 	}
1235       else
1236 	gsi_next (&gsi);
1237     }
1238 }
1239 
1240 /* Process loops from innermost to outer, stopping at the innermost
1241    loop we unrolled.  */
1242 
1243 static bool
1244 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1245 				vec<loop_p, va_heap>& father_stack,
1246 				struct loop *loop)
1247 {
1248   struct loop *loop_father;
1249   bool changed = false;
1250   struct loop *inner;
1251   enum unroll_level ul;
1252 
1253   /* Process inner loops first.  */
1254   for (inner = loop->inner; inner != NULL; inner = inner->next)
1255     changed |= tree_unroll_loops_completely_1 (may_increase_size,
1256 					       unroll_outer, father_stack,
1257 					       inner);
1258 
1259   /* If we changed an inner loop we cannot process outer loops in this
1260      iteration because SSA form is not up-to-date.  Continue with
1261      siblings of outer loops instead.  */
1262   if (changed)
1263     return true;
1264 
1265   /* Don't unroll #pragma omp simd loops until the vectorizer
1266      attempts to vectorize those.  */
1267   if (loop->force_vectorize)
1268     return false;
1269 
1270   /* Try to unroll this loop.  */
1271   loop_father = loop_outer (loop);
1272   if (!loop_father)
1273     return false;
1274 
1275   if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1276       /* Unroll outermost loops only if asked to do so or they do
1277 	 not cause code growth.  */
1278       && (unroll_outer || loop_outer (loop_father)))
1279     ul = UL_ALL;
1280   else
1281     ul = UL_NO_GROWTH;
1282 
1283   if (canonicalize_loop_induction_variables
1284         (loop, false, ul, !flag_tree_loop_ivcanon))
1285     {
1286       /* If we'll continue unrolling, we need to propagate constants
1287 	 within the new basic blocks to fold away induction variable
1288 	 computations; otherwise, the size might blow up before the
1289 	 iteration is complete and the IR eventually cleaned up.  */
1290       if (loop_outer (loop_father) && !loop_father->aux)
1291 	{
1292 	  father_stack.safe_push (loop_father);
1293 	  loop_father->aux = loop_father;
1294 	}
1295 
1296       return true;
1297     }
1298 
1299   return false;
1300 }
1301 
1302 /* Unroll LOOPS completely if they iterate just few times.  Unless
1303    MAY_INCREASE_SIZE is true, perform the unrolling only if the
1304    size of the code does not increase.  */
1305 
1306 unsigned int
1307 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1308 {
1309   auto_vec<loop_p, 16> father_stack;
1310   bool changed;
1311   int iteration = 0;
1312   bool irred_invalidated = false;
1313 
1314   do
1315     {
1316       changed = false;
1317       bitmap loop_closed_ssa_invalidated = NULL;
1318 
1319       if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1320 	loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1321 
1322       free_numbers_of_iterations_estimates ();
1323       estimate_numbers_of_iterations ();
1324 
1325       changed = tree_unroll_loops_completely_1 (may_increase_size,
1326 						unroll_outer, father_stack,
1327 						current_loops->tree_root);
1328       if (changed)
1329 	{
1330 	  struct loop **iter;
1331 	  unsigned i;
1332 
1333 	  /* Be sure to skip unlooped loops while procesing father_stack
1334 	     array.  */
1335 	  FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
1336 	    (*iter)->aux = NULL;
1337 	  FOR_EACH_VEC_ELT (father_stack, i, iter)
1338 	    if (!(*iter)->aux)
1339 	      *iter = NULL;
1340           unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1341 
1342 	  /* We can not use TODO_update_ssa_no_phi because VOPS gets confused.  */
1343 	  if (loop_closed_ssa_invalidated
1344 	      && !bitmap_empty_p (loop_closed_ssa_invalidated))
1345             rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1346 					  TODO_update_ssa);
1347 	  else
1348 	    update_ssa (TODO_update_ssa);
1349 
1350 	  /* Propagate the constants within the new basic blocks.  */
1351 	  FOR_EACH_VEC_ELT (father_stack, i, iter)
1352 	    if (*iter)
1353 	      {
1354 		unsigned j;
1355 		basic_block *body = get_loop_body_in_dom_order (*iter);
1356 		for (j = 0; j < (*iter)->num_nodes; j++)
1357 		  propagate_constants_for_unrolling (body[j]);
1358 		free (body);
1359 		(*iter)->aux = NULL;
1360 	      }
1361 	  father_stack.truncate (0);
1362 
1363 	  /* This will take care of removing completely unrolled loops
1364 	     from the loop structures so we can continue unrolling now
1365 	     innermost loops.  */
1366 	  if (cleanup_tree_cfg ())
1367 	    update_ssa (TODO_update_ssa_only_virtuals);
1368 
1369 	  /* Clean up the information about numbers of iterations, since
1370 	     complete unrolling might have invalidated it.  */
1371 	  scev_reset ();
1372 #ifdef ENABLE_CHECKING
1373 	  if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1374 	    verify_loop_closed_ssa (true);
1375 #endif
1376 	}
1377       if (loop_closed_ssa_invalidated)
1378         BITMAP_FREE (loop_closed_ssa_invalidated);
1379     }
1380   while (changed
1381 	 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1382 
1383   father_stack.release ();
1384 
1385   if (irred_invalidated
1386       && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1387     mark_irreducible_loops ();
1388 
1389   return 0;
1390 }
1391 
1392 /* Canonical induction variable creation pass.  */
1393 
1394 namespace {
1395 
1396 const pass_data pass_data_iv_canon =
1397 {
1398   GIMPLE_PASS, /* type */
1399   "ivcanon", /* name */
1400   OPTGROUP_LOOP, /* optinfo_flags */
1401   TV_TREE_LOOP_IVCANON, /* tv_id */
1402   ( PROP_cfg | PROP_ssa ), /* properties_required */
1403   0, /* properties_provided */
1404   0, /* properties_destroyed */
1405   0, /* todo_flags_start */
1406   0, /* todo_flags_finish */
1407 };
1408 
1409 class pass_iv_canon : public gimple_opt_pass
1410 {
1411 public:
1412   pass_iv_canon (gcc::context *ctxt)
1413     : gimple_opt_pass (pass_data_iv_canon, ctxt)
1414   {}
1415 
1416   /* opt_pass methods: */
1417   virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1418   virtual unsigned int execute (function *fun);
1419 
1420 }; // class pass_iv_canon
1421 
1422 unsigned int
1423 pass_iv_canon::execute (function *fun)
1424 {
1425   if (number_of_loops (fun) <= 1)
1426     return 0;
1427 
1428   return canonicalize_induction_variables ();
1429 }
1430 
1431 } // anon namespace
1432 
1433 gimple_opt_pass *
1434 make_pass_iv_canon (gcc::context *ctxt)
1435 {
1436   return new pass_iv_canon (ctxt);
1437 }
1438 
1439 /* Complete unrolling of loops.  */
1440 
1441 namespace {
1442 
1443 const pass_data pass_data_complete_unroll =
1444 {
1445   GIMPLE_PASS, /* type */
1446   "cunroll", /* name */
1447   OPTGROUP_LOOP, /* optinfo_flags */
1448   TV_COMPLETE_UNROLL, /* tv_id */
1449   ( PROP_cfg | PROP_ssa ), /* properties_required */
1450   0, /* properties_provided */
1451   0, /* properties_destroyed */
1452   0, /* todo_flags_start */
1453   0, /* todo_flags_finish */
1454 };
1455 
1456 class pass_complete_unroll : public gimple_opt_pass
1457 {
1458 public:
1459   pass_complete_unroll (gcc::context *ctxt)
1460     : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1461   {}
1462 
1463   /* opt_pass methods: */
1464   virtual unsigned int execute (function *);
1465 
1466 }; // class pass_complete_unroll
1467 
1468 unsigned int
1469 pass_complete_unroll::execute (function *fun)
1470 {
1471   if (number_of_loops (fun) <= 1)
1472     return 0;
1473 
1474   return tree_unroll_loops_completely (flag_unroll_loops
1475 				       || flag_peel_loops
1476 				       || optimize >= 3, true);
1477 }
1478 
1479 } // anon namespace
1480 
1481 gimple_opt_pass *
1482 make_pass_complete_unroll (gcc::context *ctxt)
1483 {
1484   return new pass_complete_unroll (ctxt);
1485 }
1486 
1487 /* Complete unrolling of inner loops.  */
1488 
1489 namespace {
1490 
1491 const pass_data pass_data_complete_unrolli =
1492 {
1493   GIMPLE_PASS, /* type */
1494   "cunrolli", /* name */
1495   OPTGROUP_LOOP, /* optinfo_flags */
1496   TV_COMPLETE_UNROLL, /* tv_id */
1497   ( PROP_cfg | PROP_ssa ), /* properties_required */
1498   0, /* properties_provided */
1499   0, /* properties_destroyed */
1500   0, /* todo_flags_start */
1501   0, /* todo_flags_finish */
1502 };
1503 
1504 class pass_complete_unrolli : public gimple_opt_pass
1505 {
1506 public:
1507   pass_complete_unrolli (gcc::context *ctxt)
1508     : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1509   {}
1510 
1511   /* opt_pass methods: */
1512   virtual bool gate (function *) { return optimize >= 2; }
1513   virtual unsigned int execute (function *);
1514 
1515 }; // class pass_complete_unrolli
1516 
1517 unsigned int
1518 pass_complete_unrolli::execute (function *fun)
1519 {
1520   unsigned ret = 0;
1521 
1522   loop_optimizer_init (LOOPS_NORMAL
1523 		       | LOOPS_HAVE_RECORDED_EXITS);
1524   if (number_of_loops (fun) > 1)
1525     {
1526       scev_initialize ();
1527       ret = tree_unroll_loops_completely (optimize >= 3, false);
1528       free_numbers_of_iterations_estimates ();
1529       scev_finalize ();
1530     }
1531   loop_optimizer_finalize ();
1532 
1533   return ret;
1534 }
1535 
1536 } // anon namespace
1537 
1538 gimple_opt_pass *
1539 make_pass_complete_unrolli (gcc::context *ctxt)
1540 {
1541   return new pass_complete_unrolli (ctxt);
1542 }
1543 
1544 
1545