xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/cfgloopmanip.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* Loop manipulation code for GNU compiler.
2    Copyright (C) 2002-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "cfganal.h"
29 #include "cfgloop.h"
30 #include "gimple-iterator.h"
31 #include "gimplify-me.h"
32 #include "tree-ssa-loop-manip.h"
33 #include "dumpfile.h"
34 
35 static void copy_loops_to (struct loop **, int,
36 			   struct loop *);
37 static void loop_redirect_edge (edge, basic_block);
38 static void remove_bbs (basic_block *, int);
39 static bool rpe_enum_p (const_basic_block, const void *);
40 static int find_path (edge, basic_block **);
41 static void fix_loop_placements (struct loop *, bool *);
42 static bool fix_bb_placement (basic_block);
43 static void fix_bb_placements (basic_block, bool *, bitmap);
44 
45 /* Checks whether basic block BB is dominated by DATA.  */
46 static bool
47 rpe_enum_p (const_basic_block bb, const void *data)
48 {
49   return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
50 }
51 
52 /* Remove basic blocks BBS.  NBBS is the number of the basic blocks.  */
53 
54 static void
55 remove_bbs (basic_block *bbs, int nbbs)
56 {
57   int i;
58 
59   for (i = 0; i < nbbs; i++)
60     delete_basic_block (bbs[i]);
61 }
62 
63 /* Find path -- i.e. the basic blocks dominated by edge E and put them
64    into array BBS, that will be allocated large enough to contain them.
65    E->dest must have exactly one predecessor for this to work (it is
66    easy to achieve and we do not put it here because we do not want to
67    alter anything by this function).  The number of basic blocks in the
68    path is returned.  */
69 static int
70 find_path (edge e, basic_block **bbs)
71 {
72   gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
73 
74   /* Find bbs in the path.  */
75   *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
76   return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
77 			     n_basic_blocks_for_fn (cfun), e->dest);
78 }
79 
80 /* Fix placement of basic block BB inside loop hierarchy --
81    Let L be a loop to that BB belongs.  Then every successor of BB must either
82      1) belong to some superloop of loop L, or
83      2) be a header of loop K such that K->outer is superloop of L
84    Returns true if we had to move BB into other loop to enforce this condition,
85    false if the placement of BB was already correct (provided that placements
86    of its successors are correct).  */
87 static bool
88 fix_bb_placement (basic_block bb)
89 {
90   edge e;
91   edge_iterator ei;
92   struct loop *loop = current_loops->tree_root, *act;
93 
94   FOR_EACH_EDGE (e, ei, bb->succs)
95     {
96       if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
97 	continue;
98 
99       act = e->dest->loop_father;
100       if (act->header == e->dest)
101 	act = loop_outer (act);
102 
103       if (flow_loop_nested_p (loop, act))
104 	loop = act;
105     }
106 
107   if (loop == bb->loop_father)
108     return false;
109 
110   remove_bb_from_loops (bb);
111   add_bb_to_loop (bb, loop);
112 
113   return true;
114 }
115 
116 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
117    of LOOP to that leads at least one exit edge of LOOP, and set it
118    as the immediate superloop of LOOP.  Return true if the immediate superloop
119    of LOOP changed.
120 
121    IRRED_INVALIDATED is set to true if a change in the loop structures might
122    invalidate the information about irreducible regions.  */
123 
124 static bool
125 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
126 {
127   unsigned i;
128   edge e;
129   vec<edge> exits = get_loop_exit_edges (loop);
130   struct loop *father = current_loops->tree_root, *act;
131   bool ret = false;
132 
133   FOR_EACH_VEC_ELT (exits, i, e)
134     {
135       act = find_common_loop (loop, e->dest->loop_father);
136       if (flow_loop_nested_p (father, act))
137 	father = act;
138     }
139 
140   if (father != loop_outer (loop))
141     {
142       for (act = loop_outer (loop); act != father; act = loop_outer (act))
143 	act->num_nodes -= loop->num_nodes;
144       flow_loop_tree_node_remove (loop);
145       flow_loop_tree_node_add (father, loop);
146 
147       /* The exit edges of LOOP no longer exits its original immediate
148 	 superloops; remove them from the appropriate exit lists.  */
149       FOR_EACH_VEC_ELT (exits, i, e)
150 	{
151 	  /* We may need to recompute irreducible loops.  */
152 	  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
153 	    *irred_invalidated = true;
154 	  rescan_loop_exit (e, false, false);
155 	}
156 
157       ret = true;
158     }
159 
160   exits.release ();
161   return ret;
162 }
163 
164 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
165    enforce condition stated in description of fix_bb_placement. We
166    start from basic block FROM that had some of its successors removed, so that
167    his placement no longer has to be correct, and iteratively fix placement of
168    its predecessors that may change if placement of FROM changed.  Also fix
169    placement of subloops of FROM->loop_father, that might also be altered due
170    to this change; the condition for them is similar, except that instead of
171    successors we consider edges coming out of the loops.
172 
173    If the changes may invalidate the information about irreducible regions,
174    IRRED_INVALIDATED is set to true.
175 
176    If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
177    changed loop_father are collected there. */
178 
179 static void
180 fix_bb_placements (basic_block from,
181 		   bool *irred_invalidated,
182 		   bitmap loop_closed_ssa_invalidated)
183 {
184   basic_block *queue, *qtop, *qbeg, *qend;
185   struct loop *base_loop, *target_loop;
186   edge e;
187 
188   /* We pass through blocks back-reachable from FROM, testing whether some
189      of their successors moved to outer loop.  It may be necessary to
190      iterate several times, but it is finite, as we stop unless we move
191      the basic block up the loop structure.  The whole story is a bit
192      more complicated due to presence of subloops, those are moved using
193      fix_loop_placement.  */
194 
195   base_loop = from->loop_father;
196   /* If we are already in the outermost loop, the basic blocks cannot be moved
197      outside of it.  If FROM is the header of the base loop, it cannot be moved
198      outside of it, either.  In both cases, we can end now.  */
199   if (base_loop == current_loops->tree_root
200       || from == base_loop->header)
201     return;
202 
203   auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
204   bitmap_clear (in_queue);
205   bitmap_set_bit (in_queue, from->index);
206   /* Prevent us from going out of the base_loop.  */
207   bitmap_set_bit (in_queue, base_loop->header->index);
208 
209   queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
210   qtop = queue + base_loop->num_nodes + 1;
211   qbeg = queue;
212   qend = queue + 1;
213   *qbeg = from;
214 
215   while (qbeg != qend)
216     {
217       edge_iterator ei;
218       from = *qbeg;
219       qbeg++;
220       if (qbeg == qtop)
221 	qbeg = queue;
222       bitmap_clear_bit (in_queue, from->index);
223 
224       if (from->loop_father->header == from)
225 	{
226 	  /* Subloop header, maybe move the loop upward.  */
227 	  if (!fix_loop_placement (from->loop_father, irred_invalidated))
228 	    continue;
229 	  target_loop = loop_outer (from->loop_father);
230 	  if (loop_closed_ssa_invalidated)
231 	    {
232 	      basic_block *bbs = get_loop_body (from->loop_father);
233 	      for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
234 		bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
235 	      free (bbs);
236 	    }
237 	}
238       else
239 	{
240 	  /* Ordinary basic block.  */
241 	  if (!fix_bb_placement (from))
242 	    continue;
243 	  target_loop = from->loop_father;
244 	  if (loop_closed_ssa_invalidated)
245 	    bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
246 	}
247 
248       FOR_EACH_EDGE (e, ei, from->succs)
249 	{
250 	  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
251 	    *irred_invalidated = true;
252 	}
253 
254       /* Something has changed, insert predecessors into queue.  */
255       FOR_EACH_EDGE (e, ei, from->preds)
256 	{
257 	  basic_block pred = e->src;
258 	  struct loop *nca;
259 
260 	  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
261 	    *irred_invalidated = true;
262 
263 	  if (bitmap_bit_p (in_queue, pred->index))
264 	    continue;
265 
266 	  /* If it is subloop, then it either was not moved, or
267 	     the path up the loop tree from base_loop do not contain
268 	     it.  */
269 	  nca = find_common_loop (pred->loop_father, base_loop);
270 	  if (pred->loop_father != base_loop
271 	      && (nca == base_loop
272 		  || nca != pred->loop_father))
273 	    pred = pred->loop_father->header;
274 	  else if (!flow_loop_nested_p (target_loop, pred->loop_father))
275 	    {
276 	      /* If PRED is already higher in the loop hierarchy than the
277 		 TARGET_LOOP to that we moved FROM, the change of the position
278 		 of FROM does not affect the position of PRED, so there is no
279 		 point in processing it.  */
280 	      continue;
281 	    }
282 
283 	  if (bitmap_bit_p (in_queue, pred->index))
284 	    continue;
285 
286 	  /* Schedule the basic block.  */
287 	  *qend = pred;
288 	  qend++;
289 	  if (qend == qtop)
290 	    qend = queue;
291 	  bitmap_set_bit (in_queue, pred->index);
292 	}
293     }
294   free (queue);
295 }
296 
297 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
298    and update loop structures and dominators.  Return true if we were able
299    to remove the path, false otherwise (and nothing is affected then).  */
300 bool
301 remove_path (edge e, bool *irred_invalidated,
302 	     bitmap loop_closed_ssa_invalidated)
303 {
304   edge ae;
305   basic_block *rem_bbs, *bord_bbs, from, bb;
306   vec<basic_block> dom_bbs;
307   int i, nrem, n_bord_bbs;
308   bool local_irred_invalidated = false;
309   edge_iterator ei;
310   struct loop *l, *f;
311 
312   if (! irred_invalidated)
313     irred_invalidated = &local_irred_invalidated;
314 
315   if (!can_remove_branch_p (e))
316     return false;
317 
318   /* Keep track of whether we need to update information about irreducible
319      regions.  This is the case if the removed area is a part of the
320      irreducible region, or if the set of basic blocks that belong to a loop
321      that is inside an irreducible region is changed, or if such a loop is
322      removed.  */
323   if (e->flags & EDGE_IRREDUCIBLE_LOOP)
324     *irred_invalidated = true;
325 
326   /* We need to check whether basic blocks are dominated by the edge
327      e, but we only have basic block dominators.  This is easy to
328      fix -- when e->dest has exactly one predecessor, this corresponds
329      to blocks dominated by e->dest, if not, split the edge.  */
330   if (!single_pred_p (e->dest))
331     e = single_pred_edge (split_edge (e));
332 
333   /* It may happen that by removing path we remove one or more loops
334      we belong to.  In this case first unloop the loops, then proceed
335      normally.   We may assume that e->dest is not a header of any loop,
336      as it now has exactly one predecessor.  */
337   for (l = e->src->loop_father; loop_outer (l); l = f)
338     {
339       f = loop_outer (l);
340       if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
341         unloop (l, irred_invalidated, loop_closed_ssa_invalidated);
342     }
343 
344   /* Identify the path.  */
345   nrem = find_path (e, &rem_bbs);
346 
347   n_bord_bbs = 0;
348   bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
349   auto_sbitmap seen (last_basic_block_for_fn (cfun));
350   bitmap_clear (seen);
351 
352   /* Find "border" hexes -- i.e. those with predecessor in removed path.  */
353   for (i = 0; i < nrem; i++)
354     bitmap_set_bit (seen, rem_bbs[i]->index);
355   if (!*irred_invalidated)
356     FOR_EACH_EDGE (ae, ei, e->src->succs)
357       if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
358 	  && !bitmap_bit_p (seen, ae->dest->index)
359 	  && ae->flags & EDGE_IRREDUCIBLE_LOOP)
360 	{
361 	  *irred_invalidated = true;
362 	  break;
363 	}
364 
365   for (i = 0; i < nrem; i++)
366     {
367       bb = rem_bbs[i];
368       FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
369 	if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
370 	    && !bitmap_bit_p (seen, ae->dest->index))
371 	  {
372 	    bitmap_set_bit (seen, ae->dest->index);
373 	    bord_bbs[n_bord_bbs++] = ae->dest;
374 
375 	    if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
376 	      *irred_invalidated = true;
377 	  }
378     }
379 
380   /* Remove the path.  */
381   from = e->src;
382   remove_branch (e);
383   dom_bbs.create (0);
384 
385   /* Cancel loops contained in the path.  */
386   for (i = 0; i < nrem; i++)
387     if (rem_bbs[i]->loop_father->header == rem_bbs[i])
388       cancel_loop_tree (rem_bbs[i]->loop_father);
389 
390   remove_bbs (rem_bbs, nrem);
391   free (rem_bbs);
392 
393   /* Find blocks whose dominators may be affected.  */
394   bitmap_clear (seen);
395   for (i = 0; i < n_bord_bbs; i++)
396     {
397       basic_block ldom;
398 
399       bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
400       if (bitmap_bit_p (seen, bb->index))
401 	continue;
402       bitmap_set_bit (seen, bb->index);
403 
404       for (ldom = first_dom_son (CDI_DOMINATORS, bb);
405 	   ldom;
406 	   ldom = next_dom_son (CDI_DOMINATORS, ldom))
407 	if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
408 	  dom_bbs.safe_push (ldom);
409     }
410 
411   /* Recount dominators.  */
412   iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
413   dom_bbs.release ();
414   free (bord_bbs);
415 
416   /* Fix placements of basic blocks inside loops and the placement of
417      loops in the loop tree.  */
418   fix_bb_placements (from, irred_invalidated, loop_closed_ssa_invalidated);
419   fix_loop_placements (from->loop_father, irred_invalidated);
420 
421   if (local_irred_invalidated
422       && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
423     mark_irreducible_loops ();
424 
425   return true;
426 }
427 
428 /* Creates place for a new LOOP in loops structure of FN.  */
429 
430 void
431 place_new_loop (struct function *fn, struct loop *loop)
432 {
433   loop->num = number_of_loops (fn);
434   vec_safe_push (loops_for_fn (fn)->larray, loop);
435 }
436 
437 /* Given LOOP structure with filled header and latch, find the body of the
438    corresponding loop and add it to loops tree.  Insert the LOOP as a son of
439    outer.  */
440 
441 void
442 add_loop (struct loop *loop, struct loop *outer)
443 {
444   basic_block *bbs;
445   int i, n;
446   struct loop *subloop;
447   edge e;
448   edge_iterator ei;
449 
450   /* Add it to loop structure.  */
451   place_new_loop (cfun, loop);
452   flow_loop_tree_node_add (outer, loop);
453 
454   /* Find its nodes.  */
455   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
456   n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
457 
458   for (i = 0; i < n; i++)
459     {
460       if (bbs[i]->loop_father == outer)
461 	{
462 	  remove_bb_from_loops (bbs[i]);
463 	  add_bb_to_loop (bbs[i], loop);
464 	  continue;
465 	}
466 
467       loop->num_nodes++;
468 
469       /* If we find a direct subloop of OUTER, move it to LOOP.  */
470       subloop = bbs[i]->loop_father;
471       if (loop_outer (subloop) == outer
472 	  && subloop->header == bbs[i])
473 	{
474 	  flow_loop_tree_node_remove (subloop);
475 	  flow_loop_tree_node_add (loop, subloop);
476 	}
477     }
478 
479   /* Update the information about loop exit edges.  */
480   for (i = 0; i < n; i++)
481     {
482       FOR_EACH_EDGE (e, ei, bbs[i]->succs)
483 	{
484 	  rescan_loop_exit (e, false, false);
485 	}
486     }
487 
488   free (bbs);
489 }
490 
491 /* Scale profile of loop by P.  */
492 
493 void
494 scale_loop_frequencies (struct loop *loop, profile_probability p)
495 {
496   basic_block *bbs;
497 
498   bbs = get_loop_body (loop);
499   scale_bbs_frequencies (bbs, loop->num_nodes, p);
500   free (bbs);
501 }
502 
503 /* Scale profile in LOOP by P.
504    If ITERATION_BOUND is non-zero, scale even further if loop is predicted
505    to iterate too many times.
506    Before caling this function, preheader block profile should be already
507    scaled to final count.  This is necessary because loop iterations are
508    determined by comparing header edge count to latch ege count and thus
509    they need to be scaled synchronously.  */
510 
511 void
512 scale_loop_profile (struct loop *loop, profile_probability p,
513 		    gcov_type iteration_bound)
514 {
515   edge e, preheader_e;
516   edge_iterator ei;
517 
518   if (dump_file && (dump_flags & TDF_DETAILS))
519     {
520       fprintf (dump_file, ";; Scaling loop %i with scale ",
521 	       loop->num);
522       p.dump (dump_file);
523       fprintf (dump_file, " bounding iterations to %i\n",
524 	       (int)iteration_bound);
525     }
526 
527   /* Scale the probabilities.  */
528   scale_loop_frequencies (loop, p);
529 
530   if (iteration_bound == 0)
531     return;
532 
533   gcov_type iterations = expected_loop_iterations_unbounded (loop, NULL, true);
534 
535   if (dump_file && (dump_flags & TDF_DETAILS))
536     {
537       fprintf (dump_file, ";; guessed iterations after scaling %i\n",
538 	       (int)iterations);
539     }
540 
541   /* See if loop is predicted to iterate too many times.  */
542   if (iterations <= iteration_bound)
543     return;
544 
545   preheader_e = loop_preheader_edge (loop);
546 
547   /* We could handle also loops without preheaders, but bounding is
548      currently used only by optimizers that have preheaders constructed.  */
549   gcc_checking_assert (preheader_e);
550   profile_count count_in = preheader_e->count ();
551 
552   if (count_in > profile_count::zero ()
553       && loop->header->count.initialized_p ())
554     {
555       profile_count count_delta = profile_count::zero ();
556 
557       e = single_exit (loop);
558       if (e)
559 	{
560 	  edge other_e;
561 	  FOR_EACH_EDGE (other_e, ei, e->src->succs)
562 	    if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
563 		&& e != other_e)
564 	      break;
565 
566 	  /* Probability of exit must be 1/iterations.  */
567 	  count_delta = e->count ();
568 	  e->probability = profile_probability::always ()
569 				    .apply_scale (1, iteration_bound);
570 	  other_e->probability = e->probability.invert ();
571 
572 	  /* In code below we only handle the following two updates.  */
573 	  if (other_e->dest != loop->header
574 	      && other_e->dest != loop->latch
575 	      && (dump_file && (dump_flags & TDF_DETAILS)))
576 	    {
577 	      fprintf (dump_file, ";; giving up on update of paths from "
578 		       "exit condition to latch\n");
579 	    }
580 	}
581       else
582         if (dump_file && (dump_flags & TDF_DETAILS))
583 	  fprintf (dump_file, ";; Loop has multiple exit edges; "
584 	      		      "giving up on exit condition update\n");
585 
586       /* Roughly speaking we want to reduce the loop body profile by the
587 	 difference of loop iterations.  We however can do better if
588 	 we look at the actual profile, if it is available.  */
589       p = profile_probability::always ();
590 
591       count_in = count_in.apply_scale (iteration_bound, 1);
592       p = count_in.probability_in (loop->header->count);
593       if (!(p > profile_probability::never ()))
594 	p = profile_probability::very_unlikely ();
595 
596       if (p == profile_probability::always ()
597 	  || !p.initialized_p ())
598 	return;
599 
600       /* If latch exists, change its count, since we changed
601 	 probability of exit.  Theoretically we should update everything from
602 	 source of exit edge to latch, but for vectorizer this is enough.  */
603       if (loop->latch && loop->latch != e->src)
604 	loop->latch->count += count_delta;
605 
606       /* Scale the probabilities.  */
607       scale_loop_frequencies (loop, p);
608 
609       /* Change latch's count back.  */
610       if (loop->latch && loop->latch != e->src)
611 	loop->latch->count -= count_delta;
612 
613       if (dump_file && (dump_flags & TDF_DETAILS))
614 	fprintf (dump_file, ";; guessed iterations are now %i\n",
615 		 (int)expected_loop_iterations_unbounded (loop, NULL, true));
616     }
617 }
618 
619 /* Recompute dominance information for basic blocks outside LOOP.  */
620 
621 static void
622 update_dominators_in_loop (struct loop *loop)
623 {
624   vec<basic_block> dom_bbs = vNULL;
625   basic_block *body;
626   unsigned i;
627 
628   auto_sbitmap seen (last_basic_block_for_fn (cfun));
629   bitmap_clear (seen);
630   body = get_loop_body (loop);
631 
632   for (i = 0; i < loop->num_nodes; i++)
633     bitmap_set_bit (seen, body[i]->index);
634 
635   for (i = 0; i < loop->num_nodes; i++)
636     {
637       basic_block ldom;
638 
639       for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
640 	   ldom;
641 	   ldom = next_dom_son (CDI_DOMINATORS, ldom))
642 	if (!bitmap_bit_p (seen, ldom->index))
643 	  {
644 	    bitmap_set_bit (seen, ldom->index);
645 	    dom_bbs.safe_push (ldom);
646 	  }
647     }
648 
649   iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
650   free (body);
651   dom_bbs.release ();
652 }
653 
654 /* Creates an if region as shown above. CONDITION is used to create
655    the test for the if.
656 
657    |
658    |     -------------                 -------------
659    |     |  pred_bb  |                 |  pred_bb  |
660    |     -------------                 -------------
661    |           |                             |
662    |           |                             | ENTRY_EDGE
663    |           | ENTRY_EDGE                  V
664    |           |             ====>     -------------
665    |           |                       |  cond_bb  |
666    |           |                       | CONDITION |
667    |           |                       -------------
668    |           V                        /         \
669    |     -------------         e_false /           \ e_true
670    |     |  succ_bb  |                V             V
671    |     -------------         -----------       -----------
672    |                           | false_bb |      | true_bb |
673    |                           -----------       -----------
674    |                                   \           /
675    |                                    \         /
676    |                                     V       V
677    |                                   -------------
678    |                                   |  join_bb  |
679    |                                   -------------
680    |                                         | exit_edge (result)
681    |                                         V
682    |                                    -----------
683    |                                    | succ_bb |
684    |                                    -----------
685    |
686  */
687 
688 edge
689 create_empty_if_region_on_edge (edge entry_edge, tree condition)
690 {
691 
692   basic_block cond_bb, true_bb, false_bb, join_bb;
693   edge e_true, e_false, exit_edge;
694   gcond *cond_stmt;
695   tree simple_cond;
696   gimple_stmt_iterator gsi;
697 
698   cond_bb = split_edge (entry_edge);
699 
700   /* Insert condition in cond_bb.  */
701   gsi = gsi_last_bb (cond_bb);
702   simple_cond =
703     force_gimple_operand_gsi (&gsi, condition, true, NULL,
704 			      false, GSI_NEW_STMT);
705   cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
706   gsi = gsi_last_bb (cond_bb);
707   gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
708 
709   join_bb = split_edge (single_succ_edge (cond_bb));
710 
711   e_true = single_succ_edge (cond_bb);
712   true_bb = split_edge (e_true);
713 
714   e_false = make_edge (cond_bb, join_bb, 0);
715   false_bb = split_edge (e_false);
716 
717   e_true->flags &= ~EDGE_FALLTHRU;
718   e_true->flags |= EDGE_TRUE_VALUE;
719   e_false->flags &= ~EDGE_FALLTHRU;
720   e_false->flags |= EDGE_FALSE_VALUE;
721 
722   set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
723   set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
724   set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
725   set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
726 
727   exit_edge = single_succ_edge (join_bb);
728 
729   if (single_pred_p (exit_edge->dest))
730     set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
731 
732   return exit_edge;
733 }
734 
735 /* create_empty_loop_on_edge
736    |
737    |    - pred_bb -                   ------ pred_bb ------
738    |   |           |                 | iv0 = initial_value |
739    |    -----|-----                   ---------|-----------
740    |         |                       ______    | entry_edge
741    |         | entry_edge           /      |   |
742    |         |             ====>   |      -V---V- loop_header -------------
743    |         V                     |     | iv_before = phi (iv0, iv_after) |
744    |    - succ_bb -                |      ---|-----------------------------
745    |   |           |               |         |
746    |    -----------                |      ---V--- loop_body ---------------
747    |                               |     | iv_after = iv_before + stride   |
748    |                               |     | if (iv_before < upper_bound)    |
749    |                               |      ---|--------------\--------------
750    |                               |         |               \ exit_e
751    |                               |         V                \
752    |                               |       - loop_latch -      V- succ_bb -
753    |                               |      |              |     |           |
754    |                               |       /-------------       -----------
755    |                                \ ___ /
756 
757    Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
758    that is used before the increment of IV. IV_BEFORE should be used for
759    adding code to the body that uses the IV.  OUTER is the outer loop in
760    which the new loop should be inserted.
761 
762    Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
763    inserted on the loop entry edge.  This implies that this function
764    should be used only when the UPPER_BOUND expression is a loop
765    invariant.  */
766 
767 struct loop *
768 create_empty_loop_on_edge (edge entry_edge,
769 			   tree initial_value,
770 			   tree stride, tree upper_bound,
771 			   tree iv,
772 			   tree *iv_before,
773 			   tree *iv_after,
774 			   struct loop *outer)
775 {
776   basic_block loop_header, loop_latch, succ_bb, pred_bb;
777   struct loop *loop;
778   gimple_stmt_iterator gsi;
779   gimple_seq stmts;
780   gcond *cond_expr;
781   tree exit_test;
782   edge exit_e;
783 
784   gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
785 
786   /* Create header, latch and wire up the loop.  */
787   pred_bb = entry_edge->src;
788   loop_header = split_edge (entry_edge);
789   loop_latch = split_edge (single_succ_edge (loop_header));
790   succ_bb = single_succ (loop_latch);
791   make_edge (loop_header, succ_bb, 0);
792   redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
793 
794   /* Set immediate dominator information.  */
795   set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
796   set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
797   set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
798 
799   /* Initialize a loop structure and put it in a loop hierarchy.  */
800   loop = alloc_loop ();
801   loop->header = loop_header;
802   loop->latch = loop_latch;
803   add_loop (loop, outer);
804 
805   /* TODO: Fix counts.  */
806   scale_loop_frequencies (loop, profile_probability::even ());
807 
808   /* Update dominators.  */
809   update_dominators_in_loop (loop);
810 
811   /* Modify edge flags.  */
812   exit_e = single_exit (loop);
813   exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
814   single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
815 
816   /* Construct IV code in loop.  */
817   initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
818   if (stmts)
819     {
820       gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
821       gsi_commit_edge_inserts ();
822     }
823 
824   upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
825   if (stmts)
826     {
827       gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
828       gsi_commit_edge_inserts ();
829     }
830 
831   gsi = gsi_last_bb (loop_header);
832   create_iv (initial_value, stride, iv, loop, &gsi, false,
833 	     iv_before, iv_after);
834 
835   /* Insert loop exit condition.  */
836   cond_expr = gimple_build_cond
837     (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
838 
839   exit_test = gimple_cond_lhs (cond_expr);
840   exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
841 					false, GSI_NEW_STMT);
842   gimple_cond_set_lhs (cond_expr, exit_test);
843   gsi = gsi_last_bb (exit_e->src);
844   gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
845 
846   split_block_after_labels (loop_header);
847 
848   return loop;
849 }
850 
851 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
852    latch to header and update loop tree and dominators
853    accordingly. Everything between them plus LATCH_EDGE destination must
854    be dominated by HEADER_EDGE destination, and back-reachable from
855    LATCH_EDGE source.  HEADER_EDGE is redirected to basic block SWITCH_BB,
856    FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
857    TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
858    Returns the newly created loop.  Frequencies and counts in the new loop
859    are scaled by FALSE_SCALE and in the old one by TRUE_SCALE.  */
860 
861 struct loop *
862 loopify (edge latch_edge, edge header_edge,
863 	 basic_block switch_bb, edge true_edge, edge false_edge,
864 	 bool redirect_all_edges, profile_probability true_scale,
865 	 profile_probability false_scale)
866 {
867   basic_block succ_bb = latch_edge->dest;
868   basic_block pred_bb = header_edge->src;
869   struct loop *loop = alloc_loop ();
870   struct loop *outer = loop_outer (succ_bb->loop_father);
871   profile_count cnt;
872 
873   loop->header = header_edge->dest;
874   loop->latch = latch_edge->src;
875 
876   cnt = header_edge->count ();
877 
878   /* Redirect edges.  */
879   loop_redirect_edge (latch_edge, loop->header);
880   loop_redirect_edge (true_edge, succ_bb);
881 
882   /* During loop versioning, one of the switch_bb edge is already properly
883      set. Do not redirect it again unless redirect_all_edges is true.  */
884   if (redirect_all_edges)
885     {
886       loop_redirect_edge (header_edge, switch_bb);
887       loop_redirect_edge (false_edge, loop->header);
888 
889       /* Update dominators.  */
890       set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
891       set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
892     }
893 
894   set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
895 
896   /* Compute new loop.  */
897   add_loop (loop, outer);
898 
899   /* Add switch_bb to appropriate loop.  */
900   if (switch_bb->loop_father)
901     remove_bb_from_loops (switch_bb);
902   add_bb_to_loop (switch_bb, outer);
903 
904   /* Fix counts.  */
905   if (redirect_all_edges)
906     {
907       switch_bb->count = cnt;
908     }
909   scale_loop_frequencies (loop, false_scale);
910   scale_loop_frequencies (succ_bb->loop_father, true_scale);
911   update_dominators_in_loop (loop);
912 
913   return loop;
914 }
915 
916 /* Remove the latch edge of a LOOP and update loops to indicate that
917    the LOOP was removed.  After this function, original loop latch will
918    have no successor, which caller is expected to fix somehow.
919 
920    If this may cause the information about irreducible regions to become
921    invalid, IRRED_INVALIDATED is set to true.
922 
923    LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
924    basic blocks that had non-trivial update on their loop_father.*/
925 
926 void
927 unloop (struct loop *loop, bool *irred_invalidated,
928 	bitmap loop_closed_ssa_invalidated)
929 {
930   basic_block *body;
931   struct loop *ploop;
932   unsigned i, n;
933   basic_block latch = loop->latch;
934   bool dummy = false;
935 
936   if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
937     *irred_invalidated = true;
938 
939   /* This is relatively straightforward.  The dominators are unchanged, as
940      loop header dominates loop latch, so the only thing we have to care of
941      is the placement of loops and basic blocks inside the loop tree.  We
942      move them all to the loop->outer, and then let fix_bb_placements do
943      its work.  */
944 
945   body = get_loop_body (loop);
946   n = loop->num_nodes;
947   for (i = 0; i < n; i++)
948     if (body[i]->loop_father == loop)
949       {
950 	remove_bb_from_loops (body[i]);
951 	add_bb_to_loop (body[i], loop_outer (loop));
952       }
953   free (body);
954 
955   while (loop->inner)
956     {
957       ploop = loop->inner;
958       flow_loop_tree_node_remove (ploop);
959       flow_loop_tree_node_add (loop_outer (loop), ploop);
960     }
961 
962   /* Remove the loop and free its data.  */
963   delete_loop (loop);
964 
965   remove_edge (single_succ_edge (latch));
966 
967   /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
968      there is an irreducible region inside the cancelled loop, the flags will
969      be still correct.  */
970   fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
971 }
972 
973 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
974    condition stated in description of fix_loop_placement holds for them.
975    It is used in case when we removed some edges coming out of LOOP, which
976    may cause the right placement of LOOP inside loop tree to change.
977 
978    IRRED_INVALIDATED is set to true if a change in the loop structures might
979    invalidate the information about irreducible regions.  */
980 
981 static void
982 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
983 {
984   struct loop *outer;
985 
986   while (loop_outer (loop))
987     {
988       outer = loop_outer (loop);
989       if (!fix_loop_placement (loop, irred_invalidated))
990 	break;
991 
992       /* Changing the placement of a loop in the loop tree may alter the
993 	 validity of condition 2) of the description of fix_bb_placement
994 	 for its preheader, because the successor is the header and belongs
995 	 to the loop.  So call fix_bb_placements to fix up the placement
996 	 of the preheader and (possibly) of its predecessors.  */
997       fix_bb_placements (loop_preheader_edge (loop)->src,
998 			 irred_invalidated, NULL);
999       loop = outer;
1000     }
1001 }
1002 
1003 /* Duplicate loop bounds and other information we store about
1004    the loop into its duplicate.  */
1005 
1006 void
1007 copy_loop_info (struct loop *loop, struct loop *target)
1008 {
1009   gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
1010   target->any_upper_bound = loop->any_upper_bound;
1011   target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1012   target->any_likely_upper_bound = loop->any_likely_upper_bound;
1013   target->nb_iterations_likely_upper_bound
1014     = loop->nb_iterations_likely_upper_bound;
1015   target->any_estimate = loop->any_estimate;
1016   target->nb_iterations_estimate = loop->nb_iterations_estimate;
1017   target->estimate_state = loop->estimate_state;
1018   target->safelen = loop->safelen;
1019   target->constraints = loop->constraints;
1020   target->can_be_parallel = loop->can_be_parallel;
1021   target->warned_aggressive_loop_optimizations
1022     |= loop->warned_aggressive_loop_optimizations;
1023   target->dont_vectorize = loop->dont_vectorize;
1024   target->force_vectorize = loop->force_vectorize;
1025   target->in_oacc_kernels_region = loop->in_oacc_kernels_region;
1026   target->unroll = loop->unroll;
1027   target->owned_clique = loop->owned_clique;
1028 }
1029 
1030 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1031    created loop into loops structure.  If AFTER is non-null
1032    the new loop is added at AFTER->next, otherwise in front of TARGETs
1033    sibling list.  */
1034 struct loop *
1035 duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
1036 {
1037   struct loop *cloop;
1038   cloop = alloc_loop ();
1039   place_new_loop (cfun, cloop);
1040 
1041   copy_loop_info (loop, cloop);
1042 
1043   /* Mark the new loop as copy of LOOP.  */
1044   set_loop_copy (loop, cloop);
1045 
1046   /* Add it to target.  */
1047   flow_loop_tree_node_add (target, cloop, after);
1048 
1049   return cloop;
1050 }
1051 
1052 /* Copies structure of subloops of LOOP into TARGET loop, placing
1053    newly created loops into loop tree at the end of TARGETs sibling
1054    list in the original order.  */
1055 void
1056 duplicate_subloops (struct loop *loop, struct loop *target)
1057 {
1058   struct loop *aloop, *cloop, *tail;
1059 
1060   for (tail = target->inner; tail && tail->next; tail = tail->next)
1061     ;
1062   for (aloop = loop->inner; aloop; aloop = aloop->next)
1063     {
1064       cloop = duplicate_loop (aloop, target, tail);
1065       tail = cloop;
1066       gcc_assert(!tail->next);
1067       duplicate_subloops (aloop, cloop);
1068     }
1069 }
1070 
1071 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1072    into TARGET loop, placing newly created loops into loop tree adding
1073    them to TARGETs sibling list at the end in order.  */
1074 static void
1075 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1076 {
1077   struct loop *aloop, *tail;
1078   int i;
1079 
1080   for (tail = target->inner; tail && tail->next; tail = tail->next)
1081     ;
1082   for (i = 0; i < n; i++)
1083     {
1084       aloop = duplicate_loop (copied_loops[i], target, tail);
1085       tail = aloop;
1086       gcc_assert(!tail->next);
1087       duplicate_subloops (copied_loops[i], aloop);
1088     }
1089 }
1090 
1091 /* Redirects edge E to basic block DEST.  */
1092 static void
1093 loop_redirect_edge (edge e, basic_block dest)
1094 {
1095   if (e->dest == dest)
1096     return;
1097 
1098   redirect_edge_and_branch_force (e, dest);
1099 }
1100 
1101 /* Check whether LOOP's body can be duplicated.  */
1102 bool
1103 can_duplicate_loop_p (const struct loop *loop)
1104 {
1105   int ret;
1106   basic_block *bbs = get_loop_body (loop);
1107 
1108   ret = can_copy_bbs_p (bbs, loop->num_nodes);
1109   free (bbs);
1110 
1111   return ret;
1112 }
1113 
1114 /* Duplicates body of LOOP to given edge E NDUPL times.  Takes care of updating
1115    loop structure and dominators (order of inner subloops is retained).
1116    E's destination must be LOOP header for this to work, i.e. it must be entry
1117    or latch edge of this loop; these are unique, as the loops must have
1118    preheaders for this function to work correctly (in case E is latch, the
1119    function unrolls the loop, if E is entry edge, it peels the loop).  Store
1120    edges created by copying ORIG edge from copies corresponding to set bits in
1121    WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies
1122    are numbered in order given by control flow through them) into TO_REMOVE
1123    array.  Returns false if duplication is
1124    impossible.  */
1125 
1126 bool
1127 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1128 			       unsigned int ndupl, sbitmap wont_exit,
1129 			       edge orig, vec<edge> *to_remove,
1130 			       int flags)
1131 {
1132   struct loop *target, *aloop;
1133   struct loop **orig_loops;
1134   unsigned n_orig_loops;
1135   basic_block header = loop->header, latch = loop->latch;
1136   basic_block *new_bbs, *bbs, *first_active;
1137   basic_block new_bb, bb, first_active_latch = NULL;
1138   edge ae, latch_edge;
1139   edge spec_edges[2], new_spec_edges[2];
1140   const int SE_LATCH = 0;
1141   const int SE_ORIG = 1;
1142   unsigned i, j, n;
1143   int is_latch = (latch == e->src);
1144   profile_probability *scale_step = NULL;
1145   profile_probability scale_main = profile_probability::always ();
1146   profile_probability scale_act = profile_probability::always ();
1147   profile_count after_exit_num = profile_count::zero (),
1148 	        after_exit_den = profile_count::zero ();
1149   bool scale_after_exit = false;
1150   int add_irreducible_flag;
1151   basic_block place_after;
1152   bitmap bbs_to_scale = NULL;
1153   bitmap_iterator bi;
1154 
1155   gcc_assert (e->dest == loop->header);
1156   gcc_assert (ndupl > 0);
1157 
1158   if (orig)
1159     {
1160       /* Orig must be edge out of the loop.  */
1161       gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1162       gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1163     }
1164 
1165   n = loop->num_nodes;
1166   bbs = get_loop_body_in_dom_order (loop);
1167   gcc_assert (bbs[0] == loop->header);
1168   gcc_assert (bbs[n  - 1] == loop->latch);
1169 
1170   /* Check whether duplication is possible.  */
1171   if (!can_copy_bbs_p (bbs, loop->num_nodes))
1172     {
1173       free (bbs);
1174       return false;
1175     }
1176   new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1177 
1178   /* In case we are doing loop peeling and the loop is in the middle of
1179      irreducible region, the peeled copies will be inside it too.  */
1180   add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1181   gcc_assert (!is_latch || !add_irreducible_flag);
1182 
1183   /* Find edge from latch.  */
1184   latch_edge = loop_latch_edge (loop);
1185 
1186   if (flags & DLTHE_FLAG_UPDATE_FREQ)
1187     {
1188       /* Calculate coefficients by that we have to scale counts
1189 	 of duplicated loop bodies.  */
1190       profile_count count_in = header->count;
1191       profile_count count_le = latch_edge->count ();
1192       profile_count count_out_orig = orig ? orig->count () : count_in - count_le;
1193       profile_probability prob_pass_thru = count_le.probability_in (count_in);
1194       profile_probability prob_pass_wont_exit =
1195 	      (count_le + count_out_orig).probability_in (count_in);
1196 
1197       if (orig && orig->probability.initialized_p ()
1198 	  && !(orig->probability == profile_probability::always ()))
1199 	{
1200 	  /* The blocks that are dominated by a removed exit edge ORIG have
1201 	     frequencies scaled by this.  */
1202 	  if (orig->count ().initialized_p ())
1203 	    {
1204 	      after_exit_num = orig->src->count;
1205 	      after_exit_den = after_exit_num - orig->count ();
1206 	      scale_after_exit = true;
1207 	    }
1208 	  bbs_to_scale = BITMAP_ALLOC (NULL);
1209 	  for (i = 0; i < n; i++)
1210 	    {
1211 	      if (bbs[i] != orig->src
1212 		  && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1213 		bitmap_set_bit (bbs_to_scale, i);
1214 	    }
1215 	}
1216 
1217       scale_step = XNEWVEC (profile_probability, ndupl);
1218 
1219       for (i = 1; i <= ndupl; i++)
1220 	scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1221 				? prob_pass_wont_exit
1222 				: prob_pass_thru;
1223 
1224       /* Complete peeling is special as the probability of exit in last
1225 	 copy becomes 1.  */
1226       if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1227 	{
1228 	  profile_count wanted_count = e->count ();
1229 
1230 	  gcc_assert (!is_latch);
1231 	  /* First copy has count of incoming edge.  Each subsequent
1232 	     count should be reduced by prob_pass_wont_exit.  Caller
1233 	     should've managed the flags so all except for original loop
1234 	     has won't exist set.  */
1235 	  scale_act = wanted_count.probability_in (count_in);
1236 	  /* Now simulate the duplication adjustments and compute header
1237 	     frequency of the last copy.  */
1238 	  for (i = 0; i < ndupl; i++)
1239 	    wanted_count = wanted_count.apply_probability (scale_step [i]);
1240 	  scale_main = wanted_count.probability_in (count_in);
1241 	}
1242       /* Here we insert loop bodies inside the loop itself (for loop unrolling).
1243 	 First iteration will be original loop followed by duplicated bodies.
1244 	 It is necessary to scale down the original so we get right overall
1245 	 number of iterations.  */
1246       else if (is_latch)
1247 	{
1248 	  profile_probability prob_pass_main = bitmap_bit_p (wont_exit, 0)
1249 							? prob_pass_wont_exit
1250 							: prob_pass_thru;
1251 	  profile_probability p = prob_pass_main;
1252 	  profile_count scale_main_den = count_in;
1253 	  for (i = 0; i < ndupl; i++)
1254 	    {
1255 	      scale_main_den += count_in.apply_probability (p);
1256 	      p = p * scale_step[i];
1257 	    }
1258 	  /* If original loop is executed COUNT_IN times, the unrolled
1259 	     loop will account SCALE_MAIN_DEN times.  */
1260 	  scale_main = count_in.probability_in (scale_main_den);
1261 	  scale_act = scale_main * prob_pass_main;
1262 	}
1263       else
1264 	{
1265 	  profile_count preheader_count = e->count ();
1266 	  for (i = 0; i < ndupl; i++)
1267 	    scale_main = scale_main * scale_step[i];
1268 	  scale_act = preheader_count.probability_in (count_in);
1269 	}
1270     }
1271 
1272   /* Loop the new bbs will belong to.  */
1273   target = e->src->loop_father;
1274 
1275   /* Original loops.  */
1276   n_orig_loops = 0;
1277   for (aloop = loop->inner; aloop; aloop = aloop->next)
1278     n_orig_loops++;
1279   orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1280   for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1281     orig_loops[i] = aloop;
1282 
1283   set_loop_copy (loop, target);
1284 
1285   first_active = XNEWVEC (basic_block, n);
1286   if (is_latch)
1287     {
1288       memcpy (first_active, bbs, n * sizeof (basic_block));
1289       first_active_latch = latch;
1290     }
1291 
1292   spec_edges[SE_ORIG] = orig;
1293   spec_edges[SE_LATCH] = latch_edge;
1294 
1295   place_after = e->src;
1296   for (j = 0; j < ndupl; j++)
1297     {
1298       /* Copy loops.  */
1299       copy_loops_to (orig_loops, n_orig_loops, target);
1300 
1301       /* Copy bbs.  */
1302       copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1303 		place_after, true);
1304       place_after = new_spec_edges[SE_LATCH]->src;
1305 
1306       if (flags & DLTHE_RECORD_COPY_NUMBER)
1307 	for (i = 0; i < n; i++)
1308 	  {
1309 	    gcc_assert (!new_bbs[i]->aux);
1310 	    new_bbs[i]->aux = (void *)(size_t)(j + 1);
1311 	  }
1312 
1313       /* Note whether the blocks and edges belong to an irreducible loop.  */
1314       if (add_irreducible_flag)
1315 	{
1316 	  for (i = 0; i < n; i++)
1317 	    new_bbs[i]->flags |= BB_DUPLICATED;
1318 	  for (i = 0; i < n; i++)
1319 	    {
1320 	      edge_iterator ei;
1321 	      new_bb = new_bbs[i];
1322 	      if (new_bb->loop_father == target)
1323 		new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1324 
1325 	      FOR_EACH_EDGE (ae, ei, new_bb->succs)
1326 		if ((ae->dest->flags & BB_DUPLICATED)
1327 		    && (ae->src->loop_father == target
1328 			|| ae->dest->loop_father == target))
1329 		  ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1330 	    }
1331 	  for (i = 0; i < n; i++)
1332 	    new_bbs[i]->flags &= ~BB_DUPLICATED;
1333 	}
1334 
1335       /* Redirect the special edges.  */
1336       if (is_latch)
1337 	{
1338 	  redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1339 	  redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1340 					  loop->header);
1341 	  set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1342 	  latch = loop->latch = new_bbs[n - 1];
1343 	  e = latch_edge = new_spec_edges[SE_LATCH];
1344 	}
1345       else
1346 	{
1347 	  redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1348 					  loop->header);
1349 	  redirect_edge_and_branch_force (e, new_bbs[0]);
1350 	  set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1351 	  e = new_spec_edges[SE_LATCH];
1352 	}
1353 
1354       /* Record exit edge in this copy.  */
1355       if (orig && bitmap_bit_p (wont_exit, j + 1))
1356 	{
1357 	  if (to_remove)
1358 	    to_remove->safe_push (new_spec_edges[SE_ORIG]);
1359 	  force_edge_cold (new_spec_edges[SE_ORIG], true);
1360 
1361 	  /* Scale the frequencies of the blocks dominated by the exit.  */
1362 	  if (bbs_to_scale && scale_after_exit)
1363 	    {
1364 	      EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1365 		scale_bbs_frequencies_profile_count (new_bbs + i, 1, after_exit_num,
1366 						     after_exit_den);
1367 	    }
1368 	}
1369 
1370       /* Record the first copy in the control flow order if it is not
1371 	 the original loop (i.e. in case of peeling).  */
1372       if (!first_active_latch)
1373 	{
1374 	  memcpy (first_active, new_bbs, n * sizeof (basic_block));
1375 	  first_active_latch = new_bbs[n - 1];
1376 	}
1377 
1378       /* Set counts and frequencies.  */
1379       if (flags & DLTHE_FLAG_UPDATE_FREQ)
1380 	{
1381 	  scale_bbs_frequencies (new_bbs, n, scale_act);
1382 	  scale_act = scale_act * scale_step[j];
1383 	}
1384     }
1385   free (new_bbs);
1386   free (orig_loops);
1387 
1388   /* Record the exit edge in the original loop body, and update the frequencies.  */
1389   if (orig && bitmap_bit_p (wont_exit, 0))
1390     {
1391       if (to_remove)
1392 	to_remove->safe_push (orig);
1393       force_edge_cold (orig, true);
1394 
1395       /* Scale the frequencies of the blocks dominated by the exit.  */
1396       if (bbs_to_scale && scale_after_exit)
1397 	{
1398 	  EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1399 	    scale_bbs_frequencies_profile_count (bbs + i, 1, after_exit_num,
1400 						 after_exit_den);
1401 	}
1402     }
1403 
1404   /* Update the original loop.  */
1405   if (!is_latch)
1406     set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1407   if (flags & DLTHE_FLAG_UPDATE_FREQ)
1408     {
1409       scale_bbs_frequencies (bbs, n, scale_main);
1410       free (scale_step);
1411     }
1412 
1413   /* Update dominators of outer blocks if affected.  */
1414   for (i = 0; i < n; i++)
1415     {
1416       basic_block dominated, dom_bb;
1417       vec<basic_block> dom_bbs;
1418       unsigned j;
1419 
1420       bb = bbs[i];
1421       bb->aux = 0;
1422 
1423       dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1424       FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1425 	{
1426 	  if (flow_bb_inside_loop_p (loop, dominated))
1427 	    continue;
1428 	  dom_bb = nearest_common_dominator (
1429 			CDI_DOMINATORS, first_active[i], first_active_latch);
1430 	  set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1431 	}
1432       dom_bbs.release ();
1433     }
1434   free (first_active);
1435 
1436   free (bbs);
1437   BITMAP_FREE (bbs_to_scale);
1438 
1439   return true;
1440 }
1441 
1442 /* A callback for make_forwarder block, to redirect all edges except for
1443    MFB_KJ_EDGE to the entry part.  E is the edge for that we should decide
1444    whether to redirect it.  */
1445 
1446 edge mfb_kj_edge;
1447 bool
1448 mfb_keep_just (edge e)
1449 {
1450   return e != mfb_kj_edge;
1451 }
1452 
1453 /* True when a candidate preheader BLOCK has predecessors from LOOP.  */
1454 
1455 static bool
1456 has_preds_from_loop (basic_block block, struct loop *loop)
1457 {
1458   edge e;
1459   edge_iterator ei;
1460 
1461   FOR_EACH_EDGE (e, ei, block->preds)
1462     if (e->src->loop_father == loop)
1463       return true;
1464   return false;
1465 }
1466 
1467 /* Creates a pre-header for a LOOP.  Returns newly created block.  Unless
1468    CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1469    entry; otherwise we also force preheader block to have only one successor.
1470    When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1471    to be a fallthru predecessor to the loop header and to have only
1472    predecessors from outside of the loop.
1473    The function also updates dominators.  */
1474 
1475 basic_block
1476 create_preheader (struct loop *loop, int flags)
1477 {
1478   edge e;
1479   basic_block dummy;
1480   int nentry = 0;
1481   bool irred = false;
1482   bool latch_edge_was_fallthru;
1483   edge one_succ_pred = NULL, single_entry = NULL;
1484   edge_iterator ei;
1485 
1486   FOR_EACH_EDGE (e, ei, loop->header->preds)
1487     {
1488       if (e->src == loop->latch)
1489 	continue;
1490       irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1491       nentry++;
1492       single_entry = e;
1493       if (single_succ_p (e->src))
1494 	one_succ_pred = e;
1495     }
1496   gcc_assert (nentry);
1497   if (nentry == 1)
1498     {
1499       bool need_forwarder_block = false;
1500 
1501       /* We do not allow entry block to be the loop preheader, since we
1502 	     cannot emit code there.  */
1503       if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1504         need_forwarder_block = true;
1505       else
1506         {
1507           /* If we want simple preheaders, also force the preheader to have
1508              just a single successor.  */
1509           if ((flags & CP_SIMPLE_PREHEADERS)
1510               && !single_succ_p (single_entry->src))
1511             need_forwarder_block = true;
1512           /* If we want fallthru preheaders, also create forwarder block when
1513              preheader ends with a jump or has predecessors from loop.  */
1514           else if ((flags & CP_FALLTHRU_PREHEADERS)
1515                    && (JUMP_P (BB_END (single_entry->src))
1516                        || has_preds_from_loop (single_entry->src, loop)))
1517             need_forwarder_block = true;
1518         }
1519       if (! need_forwarder_block)
1520 	return NULL;
1521     }
1522 
1523   mfb_kj_edge = loop_latch_edge (loop);
1524   latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1525   if (nentry == 1
1526       && ((flags & CP_FALLTHRU_PREHEADERS) == 0
1527   	  || (single_entry->flags & EDGE_CROSSING) == 0))
1528     dummy = split_edge (single_entry);
1529   else
1530     {
1531       edge fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1532       dummy = fallthru->src;
1533       loop->header = fallthru->dest;
1534     }
1535 
1536   /* Try to be clever in placing the newly created preheader.  The idea is to
1537      avoid breaking any "fallthruness" relationship between blocks.
1538 
1539      The preheader was created just before the header and all incoming edges
1540      to the header were redirected to the preheader, except the latch edge.
1541      So the only problematic case is when this latch edge was a fallthru
1542      edge: it is not anymore after the preheader creation so we have broken
1543      the fallthruness.  We're therefore going to look for a better place.  */
1544   if (latch_edge_was_fallthru)
1545     {
1546       if (one_succ_pred)
1547 	e = one_succ_pred;
1548       else
1549 	e = EDGE_PRED (dummy, 0);
1550 
1551       move_block_after (dummy, e->src);
1552     }
1553 
1554   if (irred)
1555     {
1556       dummy->flags |= BB_IRREDUCIBLE_LOOP;
1557       single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1558     }
1559 
1560   if (dump_file)
1561     fprintf (dump_file, "Created preheader block for loop %i\n",
1562 	     loop->num);
1563 
1564   if (flags & CP_FALLTHRU_PREHEADERS)
1565     gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1566                 && !JUMP_P (BB_END (dummy)));
1567 
1568   return dummy;
1569 }
1570 
1571 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader.  */
1572 
1573 void
1574 create_preheaders (int flags)
1575 {
1576   struct loop *loop;
1577 
1578   if (!current_loops)
1579     return;
1580 
1581   FOR_EACH_LOOP (loop, 0)
1582     create_preheader (loop, flags);
1583   loops_state_set (LOOPS_HAVE_PREHEADERS);
1584 }
1585 
1586 /* Forces all loop latches to have only single successor.  */
1587 
1588 void
1589 force_single_succ_latches (void)
1590 {
1591   struct loop *loop;
1592   edge e;
1593 
1594   FOR_EACH_LOOP (loop, 0)
1595     {
1596       if (loop->latch != loop->header && single_succ_p (loop->latch))
1597 	continue;
1598 
1599       e = find_edge (loop->latch, loop->header);
1600       gcc_checking_assert (e != NULL);
1601 
1602       split_edge (e);
1603     }
1604   loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1605 }
1606 
1607 /* This function is called from loop_version.  It splits the entry edge
1608    of the loop we want to version, adds the versioning condition, and
1609    adjust the edges to the two versions of the loop appropriately.
1610    e is an incoming edge. Returns the basic block containing the
1611    condition.
1612 
1613    --- edge e ---- > [second_head]
1614 
1615    Split it and insert new conditional expression and adjust edges.
1616 
1617     --- edge e ---> [cond expr] ---> [first_head]
1618 			|
1619 			+---------> [second_head]
1620 
1621   THEN_PROB is the probability of then branch of the condition.
1622   ELSE_PROB is the probability of else branch. Note that they may be both
1623   REG_BR_PROB_BASE when condition is IFN_LOOP_VECTORIZED or
1624   IFN_LOOP_DIST_ALIAS.  */
1625 
1626 static basic_block
1627 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1628 			   edge e, void *cond_expr,
1629 			   profile_probability then_prob,
1630 			   profile_probability else_prob)
1631 {
1632   basic_block new_head = NULL;
1633   edge e1;
1634 
1635   gcc_assert (e->dest == second_head);
1636 
1637   /* Split edge 'e'. This will create a new basic block, where we can
1638      insert conditional expr.  */
1639   new_head = split_edge (e);
1640 
1641   lv_add_condition_to_bb (first_head, second_head, new_head,
1642 			  cond_expr);
1643 
1644   /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there.  */
1645   e = single_succ_edge (new_head);
1646   e1 = make_edge (new_head, first_head,
1647 		  current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1648   e1->probability = then_prob;
1649   e->probability = else_prob;
1650 
1651   set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1652   set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1653 
1654   /* Adjust loop header phi nodes.  */
1655   lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1656 
1657   return new_head;
1658 }
1659 
1660 /* Main entry point for Loop Versioning transformation.
1661 
1662    This transformation given a condition and a loop, creates
1663    -if (condition) { loop_copy1 } else { loop_copy2 },
1664    where loop_copy1 is the loop transformed in one way, and loop_copy2
1665    is the loop transformed in another way (or unchanged). COND_EXPR
1666    may be a run time test for things that were not resolved by static
1667    analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1668 
1669    If non-NULL, CONDITION_BB is set to the basic block containing the
1670    condition.
1671 
1672    THEN_PROB is the probability of the then edge of the if.  THEN_SCALE
1673    is the ratio by that the frequencies in the original loop should
1674    be scaled.  ELSE_SCALE is the ratio by that the frequencies in the
1675    new loop should be scaled.
1676 
1677    If PLACE_AFTER is true, we place the new loop after LOOP in the
1678    instruction stream, otherwise it is placed before LOOP.  */
1679 
1680 struct loop *
1681 loop_version (struct loop *loop,
1682 	      void *cond_expr, basic_block *condition_bb,
1683 	      profile_probability then_prob, profile_probability else_prob,
1684 	      profile_probability then_scale, profile_probability else_scale,
1685 	      bool place_after)
1686 {
1687   basic_block first_head, second_head;
1688   edge entry, latch_edge, true_edge, false_edge;
1689   int irred_flag;
1690   struct loop *nloop;
1691   basic_block cond_bb;
1692 
1693   /* Record entry and latch edges for the loop */
1694   entry = loop_preheader_edge (loop);
1695   irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1696   entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1697 
1698   /* Note down head of loop as first_head.  */
1699   first_head = entry->dest;
1700 
1701   /* Duplicate loop.  */
1702   if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1703 					       NULL, NULL, NULL, 0))
1704     {
1705       entry->flags |= irred_flag;
1706       return NULL;
1707     }
1708 
1709   /* After duplication entry edge now points to new loop head block.
1710      Note down new head as second_head.  */
1711   second_head = entry->dest;
1712 
1713   /* Split loop entry edge and insert new block with cond expr.  */
1714   cond_bb =  lv_adjust_loop_entry_edge (first_head, second_head,
1715 					entry, cond_expr, then_prob, else_prob);
1716   if (condition_bb)
1717     *condition_bb = cond_bb;
1718 
1719   if (!cond_bb)
1720     {
1721       entry->flags |= irred_flag;
1722       return NULL;
1723     }
1724 
1725   latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1726 
1727   extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1728   nloop = loopify (latch_edge,
1729 		   single_pred_edge (get_bb_copy (loop->header)),
1730 		   cond_bb, true_edge, false_edge,
1731 		   false /* Do not redirect all edges.  */,
1732 		   then_scale, else_scale);
1733 
1734   copy_loop_info (loop, nloop);
1735 
1736   /* loopify redirected latch_edge. Update its PENDING_STMTS.  */
1737   lv_flush_pending_stmts (latch_edge);
1738 
1739   /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS.  */
1740   extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1741   lv_flush_pending_stmts (false_edge);
1742   /* Adjust irreducible flag.  */
1743   if (irred_flag)
1744     {
1745       cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1746       loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1747       loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1748       single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1749     }
1750 
1751   if (place_after)
1752     {
1753       basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1754       unsigned i;
1755 
1756       after = loop->latch;
1757 
1758       for (i = 0; i < nloop->num_nodes; i++)
1759 	{
1760 	  move_block_after (bbs[i], after);
1761 	  after = bbs[i];
1762 	}
1763       free (bbs);
1764     }
1765 
1766   /* At this point condition_bb is loop preheader with two successors,
1767      first_head and second_head.   Make sure that loop preheader has only
1768      one successor.  */
1769   split_edge (loop_preheader_edge (loop));
1770   split_edge (loop_preheader_edge (nloop));
1771 
1772   return nloop;
1773 }
1774