xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/cfgloop.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Natural loop discovery code for GNU compiler.
2    Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "gimple-ssa.h"
29 #include "diagnostic-core.h"
30 #include "cfganal.h"
31 #include "cfgloop.h"
32 #include "gimple-iterator.h"
33 #include "dumpfile.h"
34 
35 static void flow_loops_cfg_dump (FILE *);
36 
37 /* Dump loop related CFG information.  */
38 
39 static void
40 flow_loops_cfg_dump (FILE *file)
41 {
42   basic_block bb;
43 
44   if (!file)
45     return;
46 
47   FOR_EACH_BB_FN (bb, cfun)
48     {
49       edge succ;
50       edge_iterator ei;
51 
52       fprintf (file, ";; %d succs { ", bb->index);
53       FOR_EACH_EDGE (succ, ei, bb->succs)
54 	fprintf (file, "%d ", succ->dest->index);
55       fprintf (file, "}\n");
56     }
57 }
58 
59 /* Return nonzero if the nodes of LOOP are a subset of OUTER.  */
60 
61 bool
62 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
63 {
64   unsigned odepth = loop_depth (outer);
65 
66   return (loop_depth (loop) > odepth
67 	  && (*loop->superloops)[odepth] == outer);
68 }
69 
70 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
71    loops within LOOP.  */
72 
73 struct loop *
74 superloop_at_depth (struct loop *loop, unsigned depth)
75 {
76   unsigned ldepth = loop_depth (loop);
77 
78   gcc_assert (depth <= ldepth);
79 
80   if (depth == ldepth)
81     return loop;
82 
83   return (*loop->superloops)[depth];
84 }
85 
86 /* Returns the list of the latch edges of LOOP.  */
87 
88 static vec<edge>
89 get_loop_latch_edges (const struct loop *loop)
90 {
91   edge_iterator ei;
92   edge e;
93   vec<edge> ret = vNULL;
94 
95   FOR_EACH_EDGE (e, ei, loop->header->preds)
96     {
97       if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
98 	ret.safe_push (e);
99     }
100 
101   return ret;
102 }
103 
104 /* Dump the loop information specified by LOOP to the stream FILE
105    using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
106 
107 void
108 flow_loop_dump (const struct loop *loop, FILE *file,
109 		void (*loop_dump_aux) (const struct loop *, FILE *, int),
110 		int verbose)
111 {
112   basic_block *bbs;
113   unsigned i;
114   vec<edge> latches;
115   edge e;
116 
117   if (! loop || ! loop->header)
118     return;
119 
120   fprintf (file, ";;\n;; Loop %d\n", loop->num);
121 
122   fprintf (file, ";;  header %d, ", loop->header->index);
123   if (loop->latch)
124     fprintf (file, "latch %d\n", loop->latch->index);
125   else
126     {
127       fprintf (file, "multiple latches:");
128       latches = get_loop_latch_edges (loop);
129       FOR_EACH_VEC_ELT (latches, i, e)
130 	fprintf (file, " %d", e->src->index);
131       latches.release ();
132       fprintf (file, "\n");
133     }
134 
135   fprintf (file, ";;  depth %d, outer %ld\n",
136 	   loop_depth (loop), (long) (loop_outer (loop)
137 				      ? loop_outer (loop)->num : -1));
138 
139   if (loop->latch)
140     {
141       bool read_profile_p;
142       gcov_type nit = expected_loop_iterations_unbounded (loop, &read_profile_p);
143       if (read_profile_p && !loop->any_estimate)
144 	fprintf (file, ";;  profile-based iteration count: %" PRIu64 "\n",
145 		 (uint64_t) nit);
146     }
147 
148   fprintf (file, ";;  nodes:");
149   bbs = get_loop_body (loop);
150   for (i = 0; i < loop->num_nodes; i++)
151     fprintf (file, " %d", bbs[i]->index);
152   free (bbs);
153   fprintf (file, "\n");
154 
155   if (loop_dump_aux)
156     loop_dump_aux (loop, file, verbose);
157 }
158 
159 /* Dump the loop information about loops to the stream FILE,
160    using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
161 
162 void
163 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
164 {
165   struct loop *loop;
166 
167   if (!current_loops || ! file)
168     return;
169 
170   fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
171 
172   FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
173     {
174       flow_loop_dump (loop, file, loop_dump_aux, verbose);
175     }
176 
177   if (verbose)
178     flow_loops_cfg_dump (file);
179 }
180 
181 /* Free data allocated for LOOP.  */
182 
183 void
184 flow_loop_free (struct loop *loop)
185 {
186   struct loop_exit *exit, *next;
187 
188   vec_free (loop->superloops);
189 
190   /* Break the list of the loop exit records.  They will be freed when the
191      corresponding edge is rescanned or removed, and this avoids
192      accessing the (already released) head of the list stored in the
193      loop structure.  */
194   for (exit = loop->exits->next; exit != loop->exits; exit = next)
195     {
196       next = exit->next;
197       exit->next = exit;
198       exit->prev = exit;
199     }
200 
201   ggc_free (loop->exits);
202   ggc_free (loop);
203 }
204 
205 /* Free all the memory allocated for LOOPS.  */
206 
207 void
208 flow_loops_free (struct loops *loops)
209 {
210   if (loops->larray)
211     {
212       unsigned i;
213       loop_p loop;
214 
215       /* Free the loop descriptors.  */
216       FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
217 	{
218 	  if (!loop)
219 	    continue;
220 
221 	  flow_loop_free (loop);
222 	}
223 
224       vec_free (loops->larray);
225     }
226 }
227 
228 /* Find the nodes contained within the LOOP with header HEADER.
229    Return the number of nodes within the loop.  */
230 
231 int
232 flow_loop_nodes_find (basic_block header, struct loop *loop)
233 {
234   vec<basic_block> stack = vNULL;
235   int num_nodes = 1;
236   edge latch;
237   edge_iterator latch_ei;
238 
239   header->loop_father = loop;
240 
241   FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
242     {
243       if (latch->src->loop_father == loop
244 	  || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
245 	continue;
246 
247       num_nodes++;
248       stack.safe_push (latch->src);
249       latch->src->loop_father = loop;
250 
251       while (!stack.is_empty ())
252 	{
253 	  basic_block node;
254 	  edge e;
255 	  edge_iterator ei;
256 
257 	  node = stack.pop ();
258 
259 	  FOR_EACH_EDGE (e, ei, node->preds)
260 	    {
261 	      basic_block ancestor = e->src;
262 
263 	      if (ancestor->loop_father != loop)
264 		{
265 		  ancestor->loop_father = loop;
266 		  num_nodes++;
267 		  stack.safe_push (ancestor);
268 		}
269 	    }
270 	}
271     }
272   stack.release ();
273 
274   return num_nodes;
275 }
276 
277 /* Records the vector of superloops of the loop LOOP, whose immediate
278    superloop is FATHER.  */
279 
280 static void
281 establish_preds (struct loop *loop, struct loop *father)
282 {
283   loop_p ploop;
284   unsigned depth = loop_depth (father) + 1;
285   unsigned i;
286 
287   loop->superloops = 0;
288   vec_alloc (loop->superloops, depth);
289   FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
290     loop->superloops->quick_push (ploop);
291   loop->superloops->quick_push (father);
292 
293   for (ploop = loop->inner; ploop; ploop = ploop->next)
294     establish_preds (ploop, loop);
295 }
296 
297 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
298    added loop.  If LOOP has some children, take care of that their
299    pred field will be initialized correctly.  */
300 
301 void
302 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
303 {
304   loop->next = father->inner;
305   father->inner = loop;
306 
307   establish_preds (loop, father);
308 }
309 
310 /* Remove LOOP from the loop hierarchy tree.  */
311 
312 void
313 flow_loop_tree_node_remove (struct loop *loop)
314 {
315   struct loop *prev, *father;
316 
317   father = loop_outer (loop);
318 
319   /* Remove loop from the list of sons.  */
320   if (father->inner == loop)
321     father->inner = loop->next;
322   else
323     {
324       for (prev = father->inner; prev->next != loop; prev = prev->next)
325 	continue;
326       prev->next = loop->next;
327     }
328 
329   loop->superloops = NULL;
330 }
331 
332 /* Allocates and returns new loop structure.  */
333 
334 struct loop *
335 alloc_loop (void)
336 {
337   struct loop *loop = ggc_cleared_alloc<struct loop> ();
338 
339   loop->exits = ggc_cleared_alloc<loop_exit> ();
340   loop->exits->next = loop->exits->prev = loop->exits;
341   loop->can_be_parallel = false;
342   loop->constraints = 0;
343   loop->nb_iterations_upper_bound = 0;
344   loop->nb_iterations_likely_upper_bound = 0;
345   loop->nb_iterations_estimate = 0;
346   return loop;
347 }
348 
349 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
350    (including the root of the loop tree).  */
351 
352 void
353 init_loops_structure (struct function *fn,
354 		      struct loops *loops, unsigned num_loops)
355 {
356   struct loop *root;
357 
358   memset (loops, 0, sizeof *loops);
359   vec_alloc (loops->larray, num_loops);
360 
361   /* Dummy loop containing whole function.  */
362   root = alloc_loop ();
363   root->num_nodes = n_basic_blocks_for_fn (fn);
364   root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
365   root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
366   ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
367   EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
368 
369   loops->larray->quick_push (root);
370   loops->tree_root = root;
371 }
372 
373 /* Returns whether HEADER is a loop header.  */
374 
375 bool
376 bb_loop_header_p (basic_block header)
377 {
378   edge_iterator ei;
379   edge e;
380 
381   /* If we have an abnormal predecessor, do not consider the
382      loop (not worth the problems).  */
383   if (bb_has_abnormal_pred (header))
384     return false;
385 
386   /* Look for back edges where a predecessor is dominated
387      by this block.  A natural loop has a single entry
388      node (header) that dominates all the nodes in the
389      loop.  It also has single back edge to the header
390      from a latch node.  */
391   FOR_EACH_EDGE (e, ei, header->preds)
392     {
393       basic_block latch = e->src;
394       if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
395 	  && dominated_by_p (CDI_DOMINATORS, latch, header))
396 	return true;
397     }
398 
399   return false;
400 }
401 
402 /* Find all the natural loops in the function and save in LOOPS structure and
403    recalculate loop_father information in basic block structures.
404    If LOOPS is non-NULL then the loop structures for already recorded loops
405    will be re-used and their number will not change.  We assume that no
406    stale loops exist in LOOPS.
407    When LOOPS is NULL it is allocated and re-built from scratch.
408    Return the built LOOPS structure.  */
409 
410 struct loops *
411 flow_loops_find (struct loops *loops)
412 {
413   bool from_scratch = (loops == NULL);
414   int *rc_order;
415   int b;
416   unsigned i;
417 
418   /* Ensure that the dominators are computed.  */
419   calculate_dominance_info (CDI_DOMINATORS);
420 
421   if (!loops)
422     {
423       loops = ggc_cleared_alloc<struct loops> ();
424       init_loops_structure (cfun, loops, 1);
425     }
426 
427   /* Ensure that loop exits were released.  */
428   gcc_assert (loops->exits == NULL);
429 
430   /* Taking care of this degenerate case makes the rest of
431      this code simpler.  */
432   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
433     return loops;
434 
435   /* The root loop node contains all basic-blocks.  */
436   loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
437 
438   /* Compute depth first search order of the CFG so that outer
439      natural loops will be found before inner natural loops.  */
440   rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
441   pre_and_rev_post_order_compute (NULL, rc_order, false);
442 
443   /* Gather all loop headers in reverse completion order and allocate
444      loop structures for loops that are not already present.  */
445   auto_vec<loop_p> larray (loops->larray->length ());
446   for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
447     {
448       basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
449       if (bb_loop_header_p (header))
450 	{
451 	  struct loop *loop;
452 
453 	  /* The current active loop tree has valid loop-fathers for
454 	     header blocks.  */
455 	  if (!from_scratch
456 	      && header->loop_father->header == header)
457 	    {
458 	      loop = header->loop_father;
459 	      /* If we found an existing loop remove it from the
460 		 loop tree.  It is going to be inserted again
461 		 below.  */
462 	      flow_loop_tree_node_remove (loop);
463 	    }
464 	  else
465 	    {
466 	      /* Otherwise allocate a new loop structure for the loop.  */
467 	      loop = alloc_loop ();
468 	      /* ???  We could re-use unused loop slots here.  */
469 	      loop->num = loops->larray->length ();
470 	      vec_safe_push (loops->larray, loop);
471 	      loop->header = header;
472 
473 	      if (!from_scratch
474 		  && dump_file && (dump_flags & TDF_DETAILS))
475 		fprintf (dump_file, "flow_loops_find: discovered new "
476 			 "loop %d with header %d\n",
477 			 loop->num, header->index);
478 	    }
479 	  /* Reset latch, we recompute it below.  */
480 	  loop->latch = NULL;
481 	  larray.safe_push (loop);
482 	}
483 
484       /* Make blocks part of the loop root node at start.  */
485       header->loop_father = loops->tree_root;
486     }
487 
488   free (rc_order);
489 
490   /* Now iterate over the loops found, insert them into the loop tree
491      and assign basic-block ownership.  */
492   for (i = 0; i < larray.length (); ++i)
493     {
494       struct loop *loop = larray[i];
495       basic_block header = loop->header;
496       edge_iterator ei;
497       edge e;
498 
499       flow_loop_tree_node_add (header->loop_father, loop);
500       loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
501 
502       /* Look for the latch for this header block, if it has just a
503 	 single one.  */
504       FOR_EACH_EDGE (e, ei, header->preds)
505 	{
506 	  basic_block latch = e->src;
507 
508 	  if (flow_bb_inside_loop_p (loop, latch))
509 	    {
510 	      if (loop->latch != NULL)
511 		{
512 		  /* More than one latch edge.  */
513 		  loop->latch = NULL;
514 		  break;
515 		}
516 	      loop->latch = latch;
517 	    }
518 	}
519     }
520 
521   return loops;
522 }
523 
524 /* Ratio of frequencies of edges so that one of more latch edges is
525    considered to belong to inner loop with same header.  */
526 #define HEAVY_EDGE_RATIO 8
527 
528 /* Minimum number of samples for that we apply
529    find_subloop_latch_edge_by_profile heuristics.  */
530 #define HEAVY_EDGE_MIN_SAMPLES 10
531 
532 /* If the profile info is available, finds an edge in LATCHES that much more
533    frequent than the remaining edges.  Returns such an edge, or NULL if we do
534    not find one.
535 
536    We do not use guessed profile here, only the measured one.  The guessed
537    profile is usually too flat and unreliable for this (and it is mostly based
538    on the loop structure of the program, so it does not make much sense to
539    derive the loop structure from it).  */
540 
541 static edge
542 find_subloop_latch_edge_by_profile (vec<edge> latches)
543 {
544   unsigned i;
545   edge e, me = NULL;
546   gcov_type mcount = 0, tcount = 0;
547 
548   FOR_EACH_VEC_ELT (latches, i, e)
549     {
550       if (e->count > mcount)
551 	{
552 	  me = e;
553 	  mcount = e->count;
554 	}
555       tcount += e->count;
556     }
557 
558   if (tcount < HEAVY_EDGE_MIN_SAMPLES
559       || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
560     return NULL;
561 
562   if (dump_file)
563     fprintf (dump_file,
564 	     "Found latch edge %d -> %d using profile information.\n",
565 	     me->src->index, me->dest->index);
566   return me;
567 }
568 
569 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
570    on the structure of induction variables.  Returns this edge, or NULL if we
571    do not find any.
572 
573    We are quite conservative, and look just for an obvious simple innermost
574    loop (which is the case where we would lose the most performance by not
575    disambiguating the loop).  More precisely, we look for the following
576    situation: The source of the chosen latch edge dominates sources of all
577    the other latch edges.  Additionally, the header does not contain a phi node
578    such that the argument from the chosen edge is equal to the argument from
579    another edge.  */
580 
581 static edge
582 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
583 {
584   edge e, latch = latches[0];
585   unsigned i;
586   gphi *phi;
587   gphi_iterator psi;
588   tree lop;
589   basic_block bb;
590 
591   /* Find the candidate for the latch edge.  */
592   for (i = 1; latches.iterate (i, &e); i++)
593     if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
594       latch = e;
595 
596   /* Verify that it dominates all the latch edges.  */
597   FOR_EACH_VEC_ELT (latches, i, e)
598     if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
599       return NULL;
600 
601   /* Check for a phi node that would deny that this is a latch edge of
602      a subloop.  */
603   for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
604     {
605       phi = psi.phi ();
606       lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
607 
608       /* Ignore the values that are not changed inside the subloop.  */
609       if (TREE_CODE (lop) != SSA_NAME
610 	  || SSA_NAME_DEF_STMT (lop) == phi)
611 	continue;
612       bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
613       if (!bb || !flow_bb_inside_loop_p (loop, bb))
614 	continue;
615 
616       FOR_EACH_VEC_ELT (latches, i, e)
617 	if (e != latch
618 	    && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
619 	  return NULL;
620     }
621 
622   if (dump_file)
623     fprintf (dump_file,
624 	     "Found latch edge %d -> %d using iv structure.\n",
625 	     latch->src->index, latch->dest->index);
626   return latch;
627 }
628 
629 /* If we can determine that one of the several latch edges of LOOP behaves
630    as a latch edge of a separate subloop, returns this edge.  Otherwise
631    returns NULL.  */
632 
633 static edge
634 find_subloop_latch_edge (struct loop *loop)
635 {
636   vec<edge> latches = get_loop_latch_edges (loop);
637   edge latch = NULL;
638 
639   if (latches.length () > 1)
640     {
641       latch = find_subloop_latch_edge_by_profile (latches);
642 
643       if (!latch
644 	  /* We consider ivs to guess the latch edge only in SSA.  Perhaps we
645 	     should use cfghook for this, but it is hard to imagine it would
646 	     be useful elsewhere.  */
647 	  && current_ir_type () == IR_GIMPLE)
648 	latch = find_subloop_latch_edge_by_ivs (loop, latches);
649     }
650 
651   latches.release ();
652   return latch;
653 }
654 
655 /* Callback for make_forwarder_block.  Returns true if the edge E is marked
656    in the set MFB_REIS_SET.  */
657 
658 static hash_set<edge> *mfb_reis_set;
659 static bool
660 mfb_redirect_edges_in_set (edge e)
661 {
662   return mfb_reis_set->contains (e);
663 }
664 
665 /* Creates a subloop of LOOP with latch edge LATCH.  */
666 
667 static void
668 form_subloop (struct loop *loop, edge latch)
669 {
670   edge_iterator ei;
671   edge e, new_entry;
672   struct loop *new_loop;
673 
674   mfb_reis_set = new hash_set<edge>;
675   FOR_EACH_EDGE (e, ei, loop->header->preds)
676     {
677       if (e != latch)
678 	mfb_reis_set->add (e);
679     }
680   new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
681 				    NULL);
682   delete mfb_reis_set;
683 
684   loop->header = new_entry->src;
685 
686   /* Find the blocks and subloops that belong to the new loop, and add it to
687      the appropriate place in the loop tree.  */
688   new_loop = alloc_loop ();
689   new_loop->header = new_entry->dest;
690   new_loop->latch = latch->src;
691   add_loop (new_loop, loop);
692 }
693 
694 /* Make all the latch edges of LOOP to go to a single forwarder block --
695    a new latch of LOOP.  */
696 
697 static void
698 merge_latch_edges (struct loop *loop)
699 {
700   vec<edge> latches = get_loop_latch_edges (loop);
701   edge latch, e;
702   unsigned i;
703 
704   gcc_assert (latches.length () > 0);
705 
706   if (latches.length () == 1)
707     loop->latch = latches[0]->src;
708   else
709     {
710       if (dump_file)
711 	fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
712 
713       mfb_reis_set = new hash_set<edge>;
714       FOR_EACH_VEC_ELT (latches, i, e)
715 	mfb_reis_set->add (e);
716       latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
717 				    NULL);
718       delete mfb_reis_set;
719 
720       loop->header = latch->dest;
721       loop->latch = latch->src;
722     }
723 
724   latches.release ();
725 }
726 
727 /* LOOP may have several latch edges.  Transform it into (possibly several)
728    loops with single latch edge.  */
729 
730 static void
731 disambiguate_multiple_latches (struct loop *loop)
732 {
733   edge e;
734 
735   /* We eliminate the multiple latches by splitting the header to the forwarder
736      block F and the rest R, and redirecting the edges.  There are two cases:
737 
738      1) If there is a latch edge E that corresponds to a subloop (we guess
739         that based on profile -- if it is taken much more often than the
740 	remaining edges; and on trees, using the information about induction
741 	variables of the loops), we redirect E to R, all the remaining edges to
742 	F, then rescan the loops and try again for the outer loop.
743      2) If there is no such edge, we redirect all latch edges to F, and the
744         entry edges to R, thus making F the single latch of the loop.  */
745 
746   if (dump_file)
747     fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
748 	     loop->num);
749 
750   /* During latch merging, we may need to redirect the entry edges to a new
751      block.  This would cause problems if the entry edge was the one from the
752      entry block.  To avoid having to handle this case specially, split
753      such entry edge.  */
754   e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
755   if (e)
756     split_edge (e);
757 
758   while (1)
759     {
760       e = find_subloop_latch_edge (loop);
761       if (!e)
762 	break;
763 
764       form_subloop (loop, e);
765     }
766 
767   merge_latch_edges (loop);
768 }
769 
770 /* Split loops with multiple latch edges.  */
771 
772 void
773 disambiguate_loops_with_multiple_latches (void)
774 {
775   struct loop *loop;
776 
777   FOR_EACH_LOOP (loop, 0)
778     {
779       if (!loop->latch)
780 	disambiguate_multiple_latches (loop);
781     }
782 }
783 
784 /* Return nonzero if basic block BB belongs to LOOP.  */
785 bool
786 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
787 {
788   struct loop *source_loop;
789 
790   if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
791       || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
792     return 0;
793 
794   source_loop = bb->loop_father;
795   return loop == source_loop || flow_loop_nested_p (loop, source_loop);
796 }
797 
798 /* Enumeration predicate for get_loop_body_with_size.  */
799 static bool
800 glb_enum_p (const_basic_block bb, const void *glb_loop)
801 {
802   const struct loop *const loop = (const struct loop *) glb_loop;
803   return (bb != loop->header
804 	  && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
805 }
806 
807 /* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
808    order against direction of edges from latch.  Specially, if
809    header != latch, latch is the 1-st block.  LOOP cannot be the fake
810    loop tree root, and its size must be at most MAX_SIZE.  The blocks
811    in the LOOP body are stored to BODY, and the size of the LOOP is
812    returned.  */
813 
814 unsigned
815 get_loop_body_with_size (const struct loop *loop, basic_block *body,
816 			 unsigned max_size)
817 {
818   return dfs_enumerate_from (loop->header, 1, glb_enum_p,
819 			     body, max_size, loop);
820 }
821 
822 /* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
823    order against direction of edges from latch.  Specially, if
824    header != latch, latch is the 1-st block.  */
825 
826 basic_block *
827 get_loop_body (const struct loop *loop)
828 {
829   basic_block *body, bb;
830   unsigned tv = 0;
831 
832   gcc_assert (loop->num_nodes);
833 
834   body = XNEWVEC (basic_block, loop->num_nodes);
835 
836   if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
837     {
838       /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
839 	 special-case the fake loop that contains the whole function.  */
840       gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
841       body[tv++] = loop->header;
842       body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
843       FOR_EACH_BB_FN (bb, cfun)
844 	body[tv++] = bb;
845     }
846   else
847     tv = get_loop_body_with_size (loop, body, loop->num_nodes);
848 
849   gcc_assert (tv == loop->num_nodes);
850   return body;
851 }
852 
853 /* Fills dominance descendants inside LOOP of the basic block BB into
854    array TOVISIT from index *TV.  */
855 
856 static void
857 fill_sons_in_loop (const struct loop *loop, basic_block bb,
858 		   basic_block *tovisit, int *tv)
859 {
860   basic_block son, postpone = NULL;
861 
862   tovisit[(*tv)++] = bb;
863   for (son = first_dom_son (CDI_DOMINATORS, bb);
864        son;
865        son = next_dom_son (CDI_DOMINATORS, son))
866     {
867       if (!flow_bb_inside_loop_p (loop, son))
868 	continue;
869 
870       if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
871 	{
872 	  postpone = son;
873 	  continue;
874 	}
875       fill_sons_in_loop (loop, son, tovisit, tv);
876     }
877 
878   if (postpone)
879     fill_sons_in_loop (loop, postpone, tovisit, tv);
880 }
881 
882 /* Gets body of a LOOP (that must be different from the outermost loop)
883    sorted by dominance relation.  Additionally, if a basic block s dominates
884    the latch, then only blocks dominated by s are be after it.  */
885 
886 basic_block *
887 get_loop_body_in_dom_order (const struct loop *loop)
888 {
889   basic_block *tovisit;
890   int tv;
891 
892   gcc_assert (loop->num_nodes);
893 
894   tovisit = XNEWVEC (basic_block, loop->num_nodes);
895 
896   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
897 
898   tv = 0;
899   fill_sons_in_loop (loop, loop->header, tovisit, &tv);
900 
901   gcc_assert (tv == (int) loop->num_nodes);
902 
903   return tovisit;
904 }
905 
906 /* Gets body of a LOOP sorted via provided BB_COMPARATOR.  */
907 
908 basic_block *
909 get_loop_body_in_custom_order (const struct loop *loop,
910 			       int (*bb_comparator) (const void *, const void *))
911 {
912   basic_block *bbs = get_loop_body (loop);
913 
914   qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
915 
916   return bbs;
917 }
918 
919 /* Get body of a LOOP in breadth first sort order.  */
920 
921 basic_block *
922 get_loop_body_in_bfs_order (const struct loop *loop)
923 {
924   basic_block *blocks;
925   basic_block bb;
926   bitmap visited;
927   unsigned int i = 1;
928   unsigned int vc = 0;
929 
930   gcc_assert (loop->num_nodes);
931   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
932 
933   blocks = XNEWVEC (basic_block, loop->num_nodes);
934   visited = BITMAP_ALLOC (NULL);
935   blocks[0] = loop->header;
936   bitmap_set_bit (visited, loop->header->index);
937   while (i < loop->num_nodes)
938     {
939       edge e;
940       edge_iterator ei;
941       gcc_assert (i > vc);
942       bb = blocks[vc++];
943 
944       FOR_EACH_EDGE (e, ei, bb->succs)
945 	{
946 	  if (flow_bb_inside_loop_p (loop, e->dest))
947 	    {
948 	      /* This bb is now visited.  */
949 	      if (bitmap_set_bit (visited, e->dest->index))
950 		blocks[i++] = e->dest;
951 	    }
952 	}
953     }
954 
955   BITMAP_FREE (visited);
956   return blocks;
957 }
958 
959 /* Hash function for struct loop_exit.  */
960 
961 hashval_t
962 loop_exit_hasher::hash (loop_exit *exit)
963 {
964   return htab_hash_pointer (exit->e);
965 }
966 
967 /* Equality function for struct loop_exit.  Compares with edge.  */
968 
969 bool
970 loop_exit_hasher::equal (loop_exit *exit, edge e)
971 {
972   return exit->e == e;
973 }
974 
975 /* Frees the list of loop exit descriptions EX.  */
976 
977 void
978 loop_exit_hasher::remove (loop_exit *exit)
979 {
980   loop_exit *next;
981   for (; exit; exit = next)
982     {
983       next = exit->next_e;
984 
985       exit->next->prev = exit->prev;
986       exit->prev->next = exit->next;
987 
988       ggc_free (exit);
989     }
990 }
991 
992 /* Returns the list of records for E as an exit of a loop.  */
993 
994 static struct loop_exit *
995 get_exit_descriptions (edge e)
996 {
997   return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
998 }
999 
1000 /* Updates the lists of loop exits in that E appears.
1001    If REMOVED is true, E is being removed, and we
1002    just remove it from the lists of exits.
1003    If NEW_EDGE is true and E is not a loop exit, we
1004    do not try to remove it from loop exit lists.  */
1005 
1006 void
1007 rescan_loop_exit (edge e, bool new_edge, bool removed)
1008 {
1009   struct loop_exit *exits = NULL, *exit;
1010   struct loop *aloop, *cloop;
1011 
1012   if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1013     return;
1014 
1015   if (!removed
1016       && e->src->loop_father != NULL
1017       && e->dest->loop_father != NULL
1018       && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1019     {
1020       cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1021       for (aloop = e->src->loop_father;
1022 	   aloop != cloop;
1023 	   aloop = loop_outer (aloop))
1024 	{
1025 	  exit = ggc_alloc<loop_exit> ();
1026 	  exit->e = e;
1027 
1028 	  exit->next = aloop->exits->next;
1029 	  exit->prev = aloop->exits;
1030 	  exit->next->prev = exit;
1031 	  exit->prev->next = exit;
1032 
1033 	  exit->next_e = exits;
1034 	  exits = exit;
1035 	}
1036     }
1037 
1038   if (!exits && new_edge)
1039     return;
1040 
1041   loop_exit **slot
1042     = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1043 						 exits ? INSERT : NO_INSERT);
1044   if (!slot)
1045     return;
1046 
1047   if (exits)
1048     {
1049       if (*slot)
1050 	loop_exit_hasher::remove (*slot);
1051       *slot = exits;
1052     }
1053   else
1054     current_loops->exits->clear_slot (slot);
1055 }
1056 
1057 /* For each loop, record list of exit edges, and start maintaining these
1058    lists.  */
1059 
1060 void
1061 record_loop_exits (void)
1062 {
1063   basic_block bb;
1064   edge_iterator ei;
1065   edge e;
1066 
1067   if (!current_loops)
1068     return;
1069 
1070   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1071     return;
1072   loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1073 
1074   gcc_assert (current_loops->exits == NULL);
1075   current_loops->exits
1076     = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1077 
1078   FOR_EACH_BB_FN (bb, cfun)
1079     {
1080       FOR_EACH_EDGE (e, ei, bb->succs)
1081 	{
1082 	  rescan_loop_exit (e, true, false);
1083 	}
1084     }
1085 }
1086 
1087 /* Dumps information about the exit in *SLOT to FILE.
1088    Callback for htab_traverse.  */
1089 
1090 int
1091 dump_recorded_exit (loop_exit **slot, FILE *file)
1092 {
1093   struct loop_exit *exit = *slot;
1094   unsigned n = 0;
1095   edge e = exit->e;
1096 
1097   for (; exit != NULL; exit = exit->next_e)
1098     n++;
1099 
1100   fprintf (file, "Edge %d->%d exits %u loops\n",
1101 	   e->src->index, e->dest->index, n);
1102 
1103   return 1;
1104 }
1105 
1106 /* Dumps the recorded exits of loops to FILE.  */
1107 
1108 extern void dump_recorded_exits (FILE *);
1109 void
1110 dump_recorded_exits (FILE *file)
1111 {
1112   if (!current_loops->exits)
1113     return;
1114   current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1115 }
1116 
1117 /* Releases lists of loop exits.  */
1118 
1119 void
1120 release_recorded_exits (function *fn)
1121 {
1122   gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
1123   loops_for_fn (fn)->exits->empty ();
1124   loops_for_fn (fn)->exits = NULL;
1125   loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1126 }
1127 
1128 /* Returns the list of the exit edges of a LOOP.  */
1129 
1130 vec<edge>
1131 get_loop_exit_edges (const struct loop *loop)
1132 {
1133   vec<edge> edges = vNULL;
1134   edge e;
1135   unsigned i;
1136   basic_block *body;
1137   edge_iterator ei;
1138   struct loop_exit *exit;
1139 
1140   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1141 
1142   /* If we maintain the lists of exits, use them.  Otherwise we must
1143      scan the body of the loop.  */
1144   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1145     {
1146       for (exit = loop->exits->next; exit->e; exit = exit->next)
1147 	edges.safe_push (exit->e);
1148     }
1149   else
1150     {
1151       body = get_loop_body (loop);
1152       for (i = 0; i < loop->num_nodes; i++)
1153 	FOR_EACH_EDGE (e, ei, body[i]->succs)
1154 	  {
1155 	    if (!flow_bb_inside_loop_p (loop, e->dest))
1156 	      edges.safe_push (e);
1157 	  }
1158       free (body);
1159     }
1160 
1161   return edges;
1162 }
1163 
1164 /* Counts the number of conditional branches inside LOOP.  */
1165 
1166 unsigned
1167 num_loop_branches (const struct loop *loop)
1168 {
1169   unsigned i, n;
1170   basic_block * body;
1171 
1172   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1173 
1174   body = get_loop_body (loop);
1175   n = 0;
1176   for (i = 0; i < loop->num_nodes; i++)
1177     if (EDGE_COUNT (body[i]->succs) >= 2)
1178       n++;
1179   free (body);
1180 
1181   return n;
1182 }
1183 
1184 /* Adds basic block BB to LOOP.  */
1185 void
1186 add_bb_to_loop (basic_block bb, struct loop *loop)
1187 {
1188   unsigned i;
1189   loop_p ploop;
1190   edge_iterator ei;
1191   edge e;
1192 
1193   gcc_assert (bb->loop_father == NULL);
1194   bb->loop_father = loop;
1195   loop->num_nodes++;
1196   FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1197     ploop->num_nodes++;
1198 
1199   FOR_EACH_EDGE (e, ei, bb->succs)
1200     {
1201       rescan_loop_exit (e, true, false);
1202     }
1203   FOR_EACH_EDGE (e, ei, bb->preds)
1204     {
1205       rescan_loop_exit (e, true, false);
1206     }
1207 }
1208 
1209 /* Remove basic block BB from loops.  */
1210 void
1211 remove_bb_from_loops (basic_block bb)
1212 {
1213   unsigned i;
1214   struct loop *loop = bb->loop_father;
1215   loop_p ploop;
1216   edge_iterator ei;
1217   edge e;
1218 
1219   gcc_assert (loop != NULL);
1220   loop->num_nodes--;
1221   FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1222     ploop->num_nodes--;
1223   bb->loop_father = NULL;
1224 
1225   FOR_EACH_EDGE (e, ei, bb->succs)
1226     {
1227       rescan_loop_exit (e, false, true);
1228     }
1229   FOR_EACH_EDGE (e, ei, bb->preds)
1230     {
1231       rescan_loop_exit (e, false, true);
1232     }
1233 }
1234 
1235 /* Finds nearest common ancestor in loop tree for given loops.  */
1236 struct loop *
1237 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1238 {
1239   unsigned sdepth, ddepth;
1240 
1241   if (!loop_s) return loop_d;
1242   if (!loop_d) return loop_s;
1243 
1244   sdepth = loop_depth (loop_s);
1245   ddepth = loop_depth (loop_d);
1246 
1247   if (sdepth < ddepth)
1248     loop_d = (*loop_d->superloops)[sdepth];
1249   else if (sdepth > ddepth)
1250     loop_s = (*loop_s->superloops)[ddepth];
1251 
1252   while (loop_s != loop_d)
1253     {
1254       loop_s = loop_outer (loop_s);
1255       loop_d = loop_outer (loop_d);
1256     }
1257   return loop_s;
1258 }
1259 
1260 /* Removes LOOP from structures and frees its data.  */
1261 
1262 void
1263 delete_loop (struct loop *loop)
1264 {
1265   /* Remove the loop from structure.  */
1266   flow_loop_tree_node_remove (loop);
1267 
1268   /* Remove loop from loops array.  */
1269   (*current_loops->larray)[loop->num] = NULL;
1270 
1271   /* Free loop data.  */
1272   flow_loop_free (loop);
1273 }
1274 
1275 /* Cancels the LOOP; it must be innermost one.  */
1276 
1277 static void
1278 cancel_loop (struct loop *loop)
1279 {
1280   basic_block *bbs;
1281   unsigned i;
1282   struct loop *outer = loop_outer (loop);
1283 
1284   gcc_assert (!loop->inner);
1285 
1286   /* Move blocks up one level (they should be removed as soon as possible).  */
1287   bbs = get_loop_body (loop);
1288   for (i = 0; i < loop->num_nodes; i++)
1289     bbs[i]->loop_father = outer;
1290 
1291   free (bbs);
1292   delete_loop (loop);
1293 }
1294 
1295 /* Cancels LOOP and all its subloops.  */
1296 void
1297 cancel_loop_tree (struct loop *loop)
1298 {
1299   while (loop->inner)
1300     cancel_loop_tree (loop->inner);
1301   cancel_loop (loop);
1302 }
1303 
1304 /* Checks that information about loops is correct
1305      -- sizes of loops are all right
1306      -- results of get_loop_body really belong to the loop
1307      -- loop header have just single entry edge and single latch edge
1308      -- loop latches have only single successor that is header of their loop
1309      -- irreducible loops are correctly marked
1310      -- the cached loop depth and loop father of each bb is correct
1311   */
1312 DEBUG_FUNCTION void
1313 verify_loop_structure (void)
1314 {
1315   unsigned *sizes, i, j;
1316   basic_block bb, *bbs;
1317   struct loop *loop;
1318   int err = 0;
1319   edge e;
1320   unsigned num = number_of_loops (cfun);
1321   struct loop_exit *exit, *mexit;
1322   bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1323 
1324   if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1325     {
1326       error ("loop verification on loop tree that needs fixup");
1327       err = 1;
1328     }
1329 
1330   /* We need up-to-date dominators, compute or verify them.  */
1331   if (!dom_available)
1332     calculate_dominance_info (CDI_DOMINATORS);
1333   else
1334     verify_dominators (CDI_DOMINATORS);
1335 
1336   /* Check the loop tree root.  */
1337   if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1338       || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1339       || (current_loops->tree_root->num_nodes
1340 	  != (unsigned) n_basic_blocks_for_fn (cfun)))
1341     {
1342       error ("corrupt loop tree root");
1343       err = 1;
1344     }
1345 
1346   /* Check the headers.  */
1347   FOR_EACH_BB_FN (bb, cfun)
1348     if (bb_loop_header_p (bb))
1349       {
1350 	if (bb->loop_father->header == NULL)
1351 	  {
1352 	    error ("loop with header %d marked for removal", bb->index);
1353 	    err = 1;
1354 	  }
1355 	else if (bb->loop_father->header != bb)
1356 	  {
1357 	    error ("loop with header %d not in loop tree", bb->index);
1358 	    err = 1;
1359 	  }
1360       }
1361     else if (bb->loop_father->header == bb)
1362       {
1363 	error ("non-loop with header %d not marked for removal", bb->index);
1364 	err = 1;
1365       }
1366 
1367   /* Check the recorded loop father and sizes of loops.  */
1368   auto_sbitmap visited (last_basic_block_for_fn (cfun));
1369   bitmap_clear (visited);
1370   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1371   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1372     {
1373       unsigned n;
1374 
1375       if (loop->header == NULL)
1376 	{
1377 	  error ("removed loop %d in loop tree", loop->num);
1378 	  err = 1;
1379 	  continue;
1380 	}
1381 
1382       n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1383       if (loop->num_nodes != n)
1384 	{
1385 	  error ("size of loop %d should be %d, not %d",
1386 		 loop->num, n, loop->num_nodes);
1387 	  err = 1;
1388 	}
1389 
1390       for (j = 0; j < n; j++)
1391 	{
1392 	  bb = bbs[j];
1393 
1394 	  if (!flow_bb_inside_loop_p (loop, bb))
1395 	    {
1396 	      error ("bb %d does not belong to loop %d",
1397 		     bb->index, loop->num);
1398 	      err = 1;
1399 	    }
1400 
1401 	  /* Ignore this block if it is in an inner loop.  */
1402 	  if (bitmap_bit_p (visited, bb->index))
1403 	    continue;
1404 	  bitmap_set_bit (visited, bb->index);
1405 
1406 	  if (bb->loop_father != loop)
1407 	    {
1408 	      error ("bb %d has father loop %d, should be loop %d",
1409 		     bb->index, bb->loop_father->num, loop->num);
1410 	      err = 1;
1411 	    }
1412 	}
1413     }
1414   free (bbs);
1415 
1416   /* Check headers and latches.  */
1417   FOR_EACH_LOOP (loop, 0)
1418     {
1419       i = loop->num;
1420       if (loop->header == NULL)
1421 	continue;
1422       if (!bb_loop_header_p (loop->header))
1423 	{
1424 	  error ("loop %d%'s header is not a loop header", i);
1425 	  err = 1;
1426 	}
1427       if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1428 	  && EDGE_COUNT (loop->header->preds) != 2)
1429 	{
1430 	  error ("loop %d%'s header does not have exactly 2 entries", i);
1431 	  err = 1;
1432 	}
1433       if (loop->latch)
1434 	{
1435 	  if (!find_edge (loop->latch, loop->header))
1436 	    {
1437 	      error ("loop %d%'s latch does not have an edge to its header", i);
1438 	      err = 1;
1439 	    }
1440 	  if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1441 	    {
1442 	      error ("loop %d%'s latch is not dominated by its header", i);
1443 	      err = 1;
1444 	    }
1445 	}
1446       if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1447 	{
1448 	  if (!single_succ_p (loop->latch))
1449 	    {
1450 	      error ("loop %d%'s latch does not have exactly 1 successor", i);
1451 	      err = 1;
1452 	    }
1453 	  if (single_succ (loop->latch) != loop->header)
1454 	    {
1455 	      error ("loop %d%'s latch does not have header as successor", i);
1456 	      err = 1;
1457 	    }
1458 	  if (loop->latch->loop_father != loop)
1459 	    {
1460 	      error ("loop %d%'s latch does not belong directly to it", i);
1461 	      err = 1;
1462 	    }
1463 	}
1464       if (loop->header->loop_father != loop)
1465 	{
1466 	  error ("loop %d%'s header does not belong directly to it", i);
1467 	  err = 1;
1468 	}
1469       if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1470 	  && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1471 	{
1472 	  error ("loop %d%'s latch is marked as part of irreducible region", i);
1473 	  err = 1;
1474 	}
1475     }
1476 
1477   /* Check irreducible loops.  */
1478   if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1479     {
1480       /* Record old info.  */
1481       auto_sbitmap irreds (last_basic_block_for_fn (cfun));
1482       FOR_EACH_BB_FN (bb, cfun)
1483 	{
1484 	  edge_iterator ei;
1485 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
1486 	    bitmap_set_bit (irreds, bb->index);
1487 	  else
1488 	    bitmap_clear_bit (irreds, bb->index);
1489 	  FOR_EACH_EDGE (e, ei, bb->succs)
1490 	    if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1491 	      e->flags |= EDGE_ALL_FLAGS + 1;
1492 	}
1493 
1494       /* Recount it.  */
1495       mark_irreducible_loops ();
1496 
1497       /* Compare.  */
1498       FOR_EACH_BB_FN (bb, cfun)
1499 	{
1500 	  edge_iterator ei;
1501 
1502 	  if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1503 	      && !bitmap_bit_p (irreds, bb->index))
1504 	    {
1505 	      error ("basic block %d should be marked irreducible", bb->index);
1506 	      err = 1;
1507 	    }
1508 	  else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1509 	      && bitmap_bit_p (irreds, bb->index))
1510 	    {
1511 	      error ("basic block %d should not be marked irreducible", bb->index);
1512 	      err = 1;
1513 	    }
1514 	  FOR_EACH_EDGE (e, ei, bb->succs)
1515 	    {
1516 	      if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1517 		  && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1518 		{
1519 		  error ("edge from %d to %d should be marked irreducible",
1520 			 e->src->index, e->dest->index);
1521 		  err = 1;
1522 		}
1523 	      else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1524 		       && (e->flags & (EDGE_ALL_FLAGS + 1)))
1525 		{
1526 		  error ("edge from %d to %d should not be marked irreducible",
1527 			 e->src->index, e->dest->index);
1528 		  err = 1;
1529 		}
1530 	      e->flags &= ~(EDGE_ALL_FLAGS + 1);
1531 	    }
1532 	}
1533     }
1534 
1535   /* Check the recorded loop exits.  */
1536   FOR_EACH_LOOP (loop, 0)
1537     {
1538       if (!loop->exits || loop->exits->e != NULL)
1539 	{
1540 	  error ("corrupted head of the exits list of loop %d",
1541 		 loop->num);
1542 	  err = 1;
1543 	}
1544       else
1545 	{
1546 	  /* Check that the list forms a cycle, and all elements except
1547 	     for the head are nonnull.  */
1548 	  for (mexit = loop->exits, exit = mexit->next, i = 0;
1549 	       exit->e && exit != mexit;
1550 	       exit = exit->next)
1551 	    {
1552 	      if (i++ & 1)
1553 		mexit = mexit->next;
1554 	    }
1555 
1556 	  if (exit != loop->exits)
1557 	    {
1558 	      error ("corrupted exits list of loop %d", loop->num);
1559 	      err = 1;
1560 	    }
1561 	}
1562 
1563       if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1564 	{
1565 	  if (loop->exits->next != loop->exits)
1566 	    {
1567 	      error ("nonempty exits list of loop %d, but exits are not recorded",
1568 		     loop->num);
1569 	      err = 1;
1570 	    }
1571 	}
1572     }
1573 
1574   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1575     {
1576       unsigned n_exits = 0, eloops;
1577 
1578       sizes = XCNEWVEC (unsigned, num);
1579       memset (sizes, 0, sizeof (unsigned) * num);
1580       FOR_EACH_BB_FN (bb, cfun)
1581 	{
1582 	  edge_iterator ei;
1583 	  if (bb->loop_father == current_loops->tree_root)
1584 	    continue;
1585 	  FOR_EACH_EDGE (e, ei, bb->succs)
1586 	    {
1587 	      if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1588 		continue;
1589 
1590 	      n_exits++;
1591 	      exit = get_exit_descriptions (e);
1592 	      if (!exit)
1593 		{
1594 		  error ("exit %d->%d not recorded",
1595 			 e->src->index, e->dest->index);
1596 		  err = 1;
1597 		}
1598 	      eloops = 0;
1599 	      for (; exit; exit = exit->next_e)
1600 		eloops++;
1601 
1602 	      for (loop = bb->loop_father;
1603 		   loop != e->dest->loop_father
1604 		   /* When a loop exit is also an entry edge which
1605 		      can happen when avoiding CFG manipulations
1606 		      then the last loop exited is the outer loop
1607 		      of the loop entered.  */
1608 		   && loop != loop_outer (e->dest->loop_father);
1609 		   loop = loop_outer (loop))
1610 		{
1611 		  eloops--;
1612 		  sizes[loop->num]++;
1613 		}
1614 
1615 	      if (eloops != 0)
1616 		{
1617 		  error ("wrong list of exited loops for edge  %d->%d",
1618 			 e->src->index, e->dest->index);
1619 		  err = 1;
1620 		}
1621 	    }
1622 	}
1623 
1624       if (n_exits != current_loops->exits->elements ())
1625 	{
1626 	  error ("too many loop exits recorded");
1627 	  err = 1;
1628 	}
1629 
1630       FOR_EACH_LOOP (loop, 0)
1631 	{
1632 	  eloops = 0;
1633 	  for (exit = loop->exits->next; exit->e; exit = exit->next)
1634 	    eloops++;
1635 	  if (eloops != sizes[loop->num])
1636 	    {
1637 	      error ("%d exits recorded for loop %d (having %d exits)",
1638 		     eloops, loop->num, sizes[loop->num]);
1639 	      err = 1;
1640 	    }
1641 	}
1642 
1643       free (sizes);
1644     }
1645 
1646   gcc_assert (!err);
1647 
1648   if (!dom_available)
1649     free_dominance_info (CDI_DOMINATORS);
1650 }
1651 
1652 /* Returns latch edge of LOOP.  */
1653 edge
1654 loop_latch_edge (const struct loop *loop)
1655 {
1656   return find_edge (loop->latch, loop->header);
1657 }
1658 
1659 /* Returns preheader edge of LOOP.  */
1660 edge
1661 loop_preheader_edge (const struct loop *loop)
1662 {
1663   edge e;
1664   edge_iterator ei;
1665 
1666   gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1667 
1668   FOR_EACH_EDGE (e, ei, loop->header->preds)
1669     if (e->src != loop->latch)
1670       break;
1671 
1672   return e;
1673 }
1674 
1675 /* Returns true if E is an exit of LOOP.  */
1676 
1677 bool
1678 loop_exit_edge_p (const struct loop *loop, const_edge e)
1679 {
1680   return (flow_bb_inside_loop_p (loop, e->src)
1681 	  && !flow_bb_inside_loop_p (loop, e->dest));
1682 }
1683 
1684 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1685    or more than one exit.  If loops do not have the exits recorded, NULL
1686    is returned always.  */
1687 
1688 edge
1689 single_exit (const struct loop *loop)
1690 {
1691   struct loop_exit *exit = loop->exits->next;
1692 
1693   if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1694     return NULL;
1695 
1696   if (exit->e && exit->next == loop->exits)
1697     return exit->e;
1698   else
1699     return NULL;
1700 }
1701 
1702 /* Returns true when BB has an incoming edge exiting LOOP.  */
1703 
1704 bool
1705 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1706 {
1707   edge e;
1708   edge_iterator ei;
1709 
1710   FOR_EACH_EDGE (e, ei, bb->preds)
1711     if (loop_exit_edge_p (loop, e))
1712       return true;
1713 
1714   return false;
1715 }
1716 
1717 /* Returns true when BB has an outgoing edge exiting LOOP.  */
1718 
1719 bool
1720 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1721 {
1722   edge e;
1723   edge_iterator ei;
1724 
1725   FOR_EACH_EDGE (e, ei, bb->succs)
1726     if (loop_exit_edge_p (loop, e))
1727       return true;
1728 
1729   return false;
1730 }
1731 
1732 /* Return location corresponding to the loop control condition if possible.  */
1733 
1734 location_t
1735 get_loop_location (struct loop *loop)
1736 {
1737   rtx_insn *insn = NULL;
1738   struct niter_desc *desc = NULL;
1739   edge exit;
1740 
1741   /* For a for or while loop, we would like to return the location
1742      of the for or while statement, if possible.  To do this, look
1743      for the branch guarding the loop back-edge.  */
1744 
1745   /* If this is a simple loop with an in_edge, then the loop control
1746      branch is typically at the end of its source.  */
1747   desc = get_simple_loop_desc (loop);
1748   if (desc->in_edge)
1749     {
1750       FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1751         {
1752           if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1753             return INSN_LOCATION (insn);
1754         }
1755     }
1756   /* If loop has a single exit, then the loop control branch
1757      must be at the end of its source.  */
1758   if ((exit = single_exit (loop)))
1759     {
1760       FOR_BB_INSNS_REVERSE (exit->src, insn)
1761         {
1762           if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1763             return INSN_LOCATION (insn);
1764         }
1765     }
1766   /* Next check the latch, to see if it is non-empty.  */
1767   FOR_BB_INSNS_REVERSE (loop->latch, insn)
1768     {
1769       if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1770         return INSN_LOCATION (insn);
1771     }
1772   /* Finally, if none of the above identifies the loop control branch,
1773      return the first location in the loop header.  */
1774   FOR_BB_INSNS (loop->header, insn)
1775     {
1776       if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1777         return INSN_LOCATION (insn);
1778     }
1779   /* If all else fails, simply return the current function location.  */
1780   return DECL_SOURCE_LOCATION (current_function_decl);
1781 }
1782 
1783 /* Records that every statement in LOOP is executed I_BOUND times.
1784    REALISTIC is true if I_BOUND is expected to be close to the real number
1785    of iterations.  UPPER is true if we are sure the loop iterates at most
1786    I_BOUND times.  */
1787 
1788 void
1789 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1790 		    bool realistic, bool upper)
1791 {
1792   /* Update the bounds only when there is no previous estimation, or when the
1793      current estimation is smaller.  */
1794   if (upper
1795       && (!loop->any_upper_bound
1796 	  || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1797     {
1798       loop->any_upper_bound = true;
1799       loop->nb_iterations_upper_bound = i_bound;
1800       if (!loop->any_likely_upper_bound)
1801 	{
1802 	  loop->any_likely_upper_bound = true;
1803 	  loop->nb_iterations_likely_upper_bound = i_bound;
1804 	}
1805     }
1806   if (realistic
1807       && (!loop->any_estimate
1808 	  || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1809     {
1810       loop->any_estimate = true;
1811       loop->nb_iterations_estimate = i_bound;
1812     }
1813   if (!realistic
1814       && (!loop->any_likely_upper_bound
1815           || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
1816     {
1817       loop->any_likely_upper_bound = true;
1818       loop->nb_iterations_likely_upper_bound = i_bound;
1819     }
1820 
1821   /* If an upper bound is smaller than the realistic estimate of the
1822      number of iterations, use the upper bound instead.  */
1823   if (loop->any_upper_bound
1824       && loop->any_estimate
1825       && wi::ltu_p (loop->nb_iterations_upper_bound,
1826 		    loop->nb_iterations_estimate))
1827     loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1828   if (loop->any_upper_bound
1829       && loop->any_likely_upper_bound
1830       && wi::ltu_p (loop->nb_iterations_upper_bound,
1831 		    loop->nb_iterations_likely_upper_bound))
1832     loop->nb_iterations_likely_upper_bound = loop->nb_iterations_upper_bound;
1833 }
1834 
1835 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1836    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
1837    on the number of iterations of LOOP could not be derived, returns -1.  */
1838 
1839 HOST_WIDE_INT
1840 get_estimated_loop_iterations_int (struct loop *loop)
1841 {
1842   widest_int nit;
1843   HOST_WIDE_INT hwi_nit;
1844 
1845   if (!get_estimated_loop_iterations (loop, &nit))
1846     return -1;
1847 
1848   if (!wi::fits_shwi_p (nit))
1849     return -1;
1850   hwi_nit = nit.to_shwi ();
1851 
1852   return hwi_nit < 0 ? -1 : hwi_nit;
1853 }
1854 
1855 /* Returns an upper bound on the number of executions of statements
1856    in the LOOP.  For statements before the loop exit, this exceeds
1857    the number of execution of the latch by one.  */
1858 
1859 HOST_WIDE_INT
1860 max_stmt_executions_int (struct loop *loop)
1861 {
1862   HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1863   HOST_WIDE_INT snit;
1864 
1865   if (nit == -1)
1866     return -1;
1867 
1868   snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1869 
1870   /* If the computation overflows, return -1.  */
1871   return snit < 0 ? -1 : snit;
1872 }
1873 
1874 /* Returns an likely upper bound on the number of executions of statements
1875    in the LOOP.  For statements before the loop exit, this exceeds
1876    the number of execution of the latch by one.  */
1877 
1878 HOST_WIDE_INT
1879 likely_max_stmt_executions_int (struct loop *loop)
1880 {
1881   HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
1882   HOST_WIDE_INT snit;
1883 
1884   if (nit == -1)
1885     return -1;
1886 
1887   snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1888 
1889   /* If the computation overflows, return -1.  */
1890   return snit < 0 ? -1 : snit;
1891 }
1892 
1893 /* Sets NIT to the estimated number of executions of the latch of the
1894    LOOP.  If we have no reliable estimate, the function returns false, otherwise
1895    returns true.  */
1896 
1897 bool
1898 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1899 {
1900   /* Even if the bound is not recorded, possibly we can derrive one from
1901      profile.  */
1902   if (!loop->any_estimate)
1903     {
1904       if (loop->header->count)
1905 	{
1906           *nit = gcov_type_to_wide_int
1907 		   (expected_loop_iterations_unbounded (loop) + 1);
1908 	  return true;
1909 	}
1910       return false;
1911     }
1912 
1913   *nit = loop->nb_iterations_estimate;
1914   return true;
1915 }
1916 
1917 /* Sets NIT to an upper bound for the maximum number of executions of the
1918    latch of the LOOP.  If we have no reliable estimate, the function returns
1919    false, otherwise returns true.  */
1920 
1921 bool
1922 get_max_loop_iterations (const struct loop *loop, widest_int *nit)
1923 {
1924   if (!loop->any_upper_bound)
1925     return false;
1926 
1927   *nit = loop->nb_iterations_upper_bound;
1928   return true;
1929 }
1930 
1931 /* Similar to get_max_loop_iterations, but returns the estimate only
1932    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
1933    on the number of iterations of LOOP could not be derived, returns -1.  */
1934 
1935 HOST_WIDE_INT
1936 get_max_loop_iterations_int (const struct loop *loop)
1937 {
1938   widest_int nit;
1939   HOST_WIDE_INT hwi_nit;
1940 
1941   if (!get_max_loop_iterations (loop, &nit))
1942     return -1;
1943 
1944   if (!wi::fits_shwi_p (nit))
1945     return -1;
1946   hwi_nit = nit.to_shwi ();
1947 
1948   return hwi_nit < 0 ? -1 : hwi_nit;
1949 }
1950 
1951 /* Sets NIT to an upper bound for the maximum number of executions of the
1952    latch of the LOOP.  If we have no reliable estimate, the function returns
1953    false, otherwise returns true.  */
1954 
1955 bool
1956 get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
1957 {
1958   if (!loop->any_likely_upper_bound)
1959     return false;
1960 
1961   *nit = loop->nb_iterations_likely_upper_bound;
1962   return true;
1963 }
1964 
1965 /* Similar to get_max_loop_iterations, but returns the estimate only
1966    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
1967    on the number of iterations of LOOP could not be derived, returns -1.  */
1968 
1969 HOST_WIDE_INT
1970 get_likely_max_loop_iterations_int (struct loop *loop)
1971 {
1972   widest_int nit;
1973   HOST_WIDE_INT hwi_nit;
1974 
1975   if (!get_likely_max_loop_iterations (loop, &nit))
1976     return -1;
1977 
1978   if (!wi::fits_shwi_p (nit))
1979     return -1;
1980   hwi_nit = nit.to_shwi ();
1981 
1982   return hwi_nit < 0 ? -1 : hwi_nit;
1983 }
1984 
1985 /* Returns the loop depth of the loop BB belongs to.  */
1986 
1987 int
1988 bb_loop_depth (const_basic_block bb)
1989 {
1990   return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1991 }
1992 
1993 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP.  */
1994 
1995 void
1996 mark_loop_for_removal (loop_p loop)
1997 {
1998   if (loop->header == NULL)
1999     return;
2000   loop->former_header = loop->header;
2001   loop->header = NULL;
2002   loop->latch = NULL;
2003   loops_state_set (LOOPS_NEED_FIXUP);
2004 }
2005