xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/loop-doloop.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* Perform doloop optimizations
2    Copyright (C) 2004-2017 Free Software Foundation, Inc.
3    Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "cfghooks.h"
29 #include "memmodel.h"
30 #include "emit-rtl.h"
31 #include "dojump.h"
32 #include "expr.h"
33 #include "cfgloop.h"
34 #include "cfgrtl.h"
35 #include "params.h"
36 #include "dumpfile.h"
37 #include "loop-unroll.h"
38 #include "regs.h"
39 #include "df.h"
40 
41 /* This module is used to modify loops with a determinable number of
42    iterations to use special low-overhead looping instructions.
43 
44    It first validates whether the loop is well behaved and has a
45    determinable number of iterations (either at compile or run-time).
46    It then modifies the loop to use a low-overhead looping pattern as
47    follows:
48 
49    1. A pseudo register is allocated as the loop iteration counter.
50 
51    2. The number of loop iterations is calculated and is stored
52       in the loop counter.
53 
54    3. At the end of the loop, the jump insn is replaced by the
55       doloop_end pattern.  The compare must remain because it might be
56       used elsewhere.  If the loop-variable or condition register are
57       used elsewhere, they will be eliminated by flow.
58 
59    4. An optional doloop_begin pattern is inserted at the top of the
60       loop.
61 
62    TODO The optimization should only performed when either the biv used for exit
63    condition is unused at all except for the exit test, or if we do not have to
64    change its value, since otherwise we have to add a new induction variable,
65    which usually will not pay up (unless the cost of the doloop pattern is
66    somehow extremely lower than the cost of compare & jump, or unless the bct
67    register cannot be used for anything else but doloop -- ??? detect these
68    cases).  */
69 
70 /* Return the loop termination condition for PATTERN or zero
71    if it is not a decrement and branch jump insn.  */
72 
73 rtx
74 doloop_condition_get (rtx_insn *doloop_pat)
75 {
76   rtx cmp;
77   rtx inc;
78   rtx reg;
79   rtx inc_src;
80   rtx condition;
81   rtx pattern;
82   rtx cc_reg = NULL_RTX;
83   rtx reg_orig = NULL_RTX;
84 
85   /* The canonical doloop pattern we expect has one of the following
86      forms:
87 
88      1)  (parallel [(set (pc) (if_then_else (condition)
89 	  			            (label_ref (label))
90 				            (pc)))
91 	             (set (reg) (plus (reg) (const_int -1)))
92 	             (additional clobbers and uses)])
93 
94      The branch must be the first entry of the parallel (also required
95      by jump.c), and the second entry of the parallel must be a set of
96      the loop counter register.  Some targets (IA-64) wrap the set of
97      the loop counter in an if_then_else too.
98 
99      2)  (set (reg) (plus (reg) (const_int -1))
100          (set (pc) (if_then_else (reg != 0)
101 	                         (label_ref (label))
102 			         (pc))).
103 
104      Some targets (ARM) do the comparison before the branch, as in the
105      following form:
106 
107      3) (parallel [(set (cc) (compare ((plus (reg) (const_int -1), 0)))
108                    (set (reg) (plus (reg) (const_int -1)))])
109         (set (pc) (if_then_else (cc == NE)
110                                 (label_ref (label))
111                                 (pc))) */
112 
113   pattern = PATTERN (doloop_pat);
114 
115   if (GET_CODE (pattern) != PARALLEL)
116     {
117       rtx cond;
118       rtx_insn *prev_insn = prev_nondebug_insn (doloop_pat);
119       rtx cmp_arg1, cmp_arg2;
120       rtx cmp_orig;
121 
122       /* In case the pattern is not PARALLEL we expect two forms
123 	 of doloop which are cases 2) and 3) above: in case 2) the
124 	 decrement immediately precedes the branch, while in case 3)
125 	 the compare and decrement instructions immediately precede
126 	 the branch.  */
127 
128       if (prev_insn == NULL_RTX || !INSN_P (prev_insn))
129         return 0;
130 
131       cmp = pattern;
132       if (GET_CODE (PATTERN (prev_insn)) == PARALLEL)
133         {
134 	  /* The third case: the compare and decrement instructions
135 	     immediately precede the branch.  */
136 	  cmp_orig = XVECEXP (PATTERN (prev_insn), 0, 0);
137 	  if (GET_CODE (cmp_orig) != SET)
138 	    return 0;
139 	  if (GET_CODE (SET_SRC (cmp_orig)) != COMPARE)
140 	    return 0;
141 	  cmp_arg1 = XEXP (SET_SRC (cmp_orig), 0);
142           cmp_arg2 = XEXP (SET_SRC (cmp_orig), 1);
143 	  if (cmp_arg2 != const0_rtx
144 	      || GET_CODE (cmp_arg1) != PLUS)
145 	    return 0;
146 	  reg_orig = XEXP (cmp_arg1, 0);
147 	  if (XEXP (cmp_arg1, 1) != GEN_INT (-1)
148 	      || !REG_P (reg_orig))
149 	    return 0;
150 	  cc_reg = SET_DEST (cmp_orig);
151 
152 	  inc = XVECEXP (PATTERN (prev_insn), 0, 1);
153 	}
154       else
155         inc = PATTERN (prev_insn);
156       if (GET_CODE (cmp) == SET && GET_CODE (SET_SRC (cmp)) == IF_THEN_ELSE)
157 	{
158 	  /* We expect the condition to be of the form (reg != 0)  */
159 	  cond = XEXP (SET_SRC (cmp), 0);
160 	  if (GET_CODE (cond) != NE || XEXP (cond, 1) != const0_rtx)
161 	    return 0;
162 	}
163     }
164   else
165     {
166       cmp = XVECEXP (pattern, 0, 0);
167       inc = XVECEXP (pattern, 0, 1);
168     }
169 
170   /* Check for (set (reg) (something)).  */
171   if (GET_CODE (inc) != SET)
172     return 0;
173   reg = SET_DEST (inc);
174   if (! REG_P (reg))
175     return 0;
176 
177   /* Check if something = (plus (reg) (const_int -1)).
178      On IA-64, this decrement is wrapped in an if_then_else.  */
179   inc_src = SET_SRC (inc);
180   if (GET_CODE (inc_src) == IF_THEN_ELSE)
181     inc_src = XEXP (inc_src, 1);
182   if (GET_CODE (inc_src) != PLUS
183       || XEXP (inc_src, 0) != reg
184       || XEXP (inc_src, 1) != constm1_rtx)
185     return 0;
186 
187   /* Check for (set (pc) (if_then_else (condition)
188                                        (label_ref (label))
189                                        (pc))).  */
190   if (GET_CODE (cmp) != SET
191       || SET_DEST (cmp) != pc_rtx
192       || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE
193       || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF
194       || XEXP (SET_SRC (cmp), 2) != pc_rtx)
195     return 0;
196 
197   /* Extract loop termination condition.  */
198   condition = XEXP (SET_SRC (cmp), 0);
199 
200   /* We expect a GE or NE comparison with 0 or 1.  */
201   if ((GET_CODE (condition) != GE
202        && GET_CODE (condition) != NE)
203       || (XEXP (condition, 1) != const0_rtx
204           && XEXP (condition, 1) != const1_rtx))
205     return 0;
206 
207   if ((XEXP (condition, 0) == reg)
208       /* For the third case:  */
209       || ((cc_reg != NULL_RTX)
210 	  && (XEXP (condition, 0) == cc_reg)
211 	  && (reg_orig == reg))
212       || (GET_CODE (XEXP (condition, 0)) == PLUS
213 	  && XEXP (XEXP (condition, 0), 0) == reg))
214    {
215      if (GET_CODE (pattern) != PARALLEL)
216      /*  For the second form we expect:
217 
218          (set (reg) (plus (reg) (const_int -1))
219          (set (pc) (if_then_else (reg != 0)
220                                  (label_ref (label))
221                                  (pc))).
222 
223          is equivalent to the following:
224 
225          (parallel [(set (pc) (if_then_else (reg != 1)
226                                             (label_ref (label))
227                                             (pc)))
228                      (set (reg) (plus (reg) (const_int -1)))
229                      (additional clobbers and uses)])
230 
231         For the third form we expect:
232 
233         (parallel [(set (cc) (compare ((plus (reg) (const_int -1)), 0))
234                    (set (reg) (plus (reg) (const_int -1)))])
235         (set (pc) (if_then_else (cc == NE)
236                                 (label_ref (label))
237                                 (pc)))
238 
239         which is equivalent to the following:
240 
241         (parallel [(set (cc) (compare (reg,  1))
242                    (set (reg) (plus (reg) (const_int -1)))
243                    (set (pc) (if_then_else (NE == cc)
244                                            (label_ref (label))
245                                            (pc))))])
246 
247         So we return the second form instead for the two cases.
248 
249      */
250         condition = gen_rtx_fmt_ee (NE, VOIDmode, inc_src, const1_rtx);
251 
252     return condition;
253    }
254 
255   /* ??? If a machine uses a funny comparison, we could return a
256      canonicalized form here.  */
257 
258   return 0;
259 }
260 
261 /* Return nonzero if the loop specified by LOOP is suitable for
262    the use of special low-overhead looping instructions.  DESC
263    describes the number of iterations of the loop.  */
264 
265 static bool
266 doloop_valid_p (struct loop *loop, struct niter_desc *desc)
267 {
268   basic_block *body = get_loop_body (loop), bb;
269   rtx_insn *insn;
270   unsigned i;
271   bool result = true;
272 
273   /* Check for loops that may not terminate under special conditions.  */
274   if (!desc->simple_p
275       || desc->assumptions
276       || desc->infinite)
277     {
278       /* There are some cases that would require a special attention.
279 	 For example if the comparison is LEU and the comparison value
280 	 is UINT_MAX then the loop will not terminate.  Similarly, if the
281 	 comparison code is GEU and the comparison value is 0, the
282 	 loop will not terminate.
283 
284 	 If the absolute increment is not 1, the loop can be infinite
285 	 even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)
286 
287 	 ??? We could compute these conditions at run-time and have a
288 	 additional jump around the loop to ensure an infinite loop.
289 	 However, it is very unlikely that this is the intended
290 	 behavior of the loop and checking for these rare boundary
291 	 conditions would pessimize all other code.
292 
293 	 If the loop is executed only a few times an extra check to
294 	 restart the loop could use up most of the benefits of using a
295 	 count register loop.  Note however, that normally, this
296 	 restart branch would never execute, so it could be predicted
297 	 well by the CPU.  We should generate the pessimistic code by
298 	 default, and have an option, e.g. -funsafe-loops that would
299 	 enable count-register loops in this case.  */
300       if (dump_file)
301 	fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
302       result = false;
303       goto cleanup;
304     }
305 
306   for (i = 0; i < loop->num_nodes; i++)
307     {
308       bb = body[i];
309 
310       for (insn = BB_HEAD (bb);
311 	   insn != NEXT_INSN (BB_END (bb));
312 	   insn = NEXT_INSN (insn))
313 	{
314 	  /* Different targets have different necessities for low-overhead
315 	     looping.  Call the back end for each instruction within the loop
316 	     to let it decide whether the insn prohibits a low-overhead loop.
317 	     It will then return the cause for it to emit to the dump file.  */
318 	  const char * invalid = targetm.invalid_within_doloop (insn);
319 	  if (invalid)
320 	    {
321 	      if (dump_file)
322 		fprintf (dump_file, "Doloop: %s\n", invalid);
323 	      result = false;
324 	      goto cleanup;
325 	    }
326 	}
327     }
328   result = true;
329 
330 cleanup:
331   free (body);
332 
333   return result;
334 }
335 
336 /* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
337    edge.  If the condition is always false, do not do anything.  If it is always
338    true, redirect E to DEST and return false.  In all other cases, true is
339    returned.  */
340 
341 static bool
342 add_test (rtx cond, edge *e, basic_block dest)
343 {
344   rtx_insn *seq, *jump;
345   rtx_code_label *label;
346   machine_mode mode;
347   rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
348   enum rtx_code code = GET_CODE (cond);
349   basic_block bb;
350 
351   mode = GET_MODE (XEXP (cond, 0));
352   if (mode == VOIDmode)
353     mode = GET_MODE (XEXP (cond, 1));
354 
355   start_sequence ();
356   op0 = force_operand (op0, NULL_RTX);
357   op1 = force_operand (op1, NULL_RTX);
358   label = block_label (dest);
359   do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL, label, -1);
360 
361   jump = get_last_insn ();
362   if (!jump || !JUMP_P (jump))
363     {
364       /* The condition is always false and the jump was optimized out.  */
365       end_sequence ();
366       return true;
367     }
368 
369   seq = get_insns ();
370   unshare_all_rtl_in_chain (seq);
371   end_sequence ();
372 
373   /* There always is at least the jump insn in the sequence.  */
374   gcc_assert (seq != NULL_RTX);
375 
376   bb = split_edge_and_insert (*e, seq);
377   *e = single_succ_edge (bb);
378 
379   if (any_uncondjump_p (jump))
380     {
381       /* The condition is always true.  */
382       delete_insn (jump);
383       redirect_edge_and_branch_force (*e, dest);
384       return false;
385     }
386 
387   JUMP_LABEL (jump) = label;
388 
389   /* The jump is supposed to handle an unlikely special case.  */
390   add_int_reg_note (jump, REG_BR_PROB, 0);
391 
392   LABEL_NUSES (label)++;
393 
394   make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
395   return true;
396 }
397 
398 /* Modify the loop to use the low-overhead looping insn where LOOP
399    describes the loop, DESC describes the number of iterations of the
400    loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the
401    end of the loop.  CONDITION is the condition separated from the
402    DOLOOP_SEQ.  COUNT is the number of iterations of the LOOP.  */
403 
404 static void
405 doloop_modify (struct loop *loop, struct niter_desc *desc,
406 	       rtx_insn *doloop_seq, rtx condition, rtx count)
407 {
408   rtx counter_reg;
409   rtx tmp, noloop = NULL_RTX;
410   rtx_insn *sequence;
411   rtx_insn *jump_insn;
412   rtx_code_label *jump_label;
413   int nonneg = 0;
414   bool increment_count;
415   basic_block loop_end = desc->out_edge->src;
416   machine_mode mode;
417   rtx true_prob_val;
418   widest_int iterations;
419 
420   jump_insn = BB_END (loop_end);
421 
422   if (dump_file)
423     {
424       fprintf (dump_file, "Doloop: Inserting doloop pattern (");
425       if (desc->const_iter)
426 	fprintf (dump_file, "%" PRId64, desc->niter);
427       else
428 	fputs ("runtime", dump_file);
429       fputs (" iterations).\n", dump_file);
430     }
431 
432   /* Get the probability of the original branch. If it exists we would
433      need to update REG_BR_PROB of the new jump_insn.  */
434   true_prob_val = find_reg_note (jump_insn, REG_BR_PROB, NULL_RTX);
435 
436   /* Discard original jump to continue loop.  The original compare
437      result may still be live, so it cannot be discarded explicitly.  */
438   delete_insn (jump_insn);
439 
440   counter_reg = XEXP (condition, 0);
441   if (GET_CODE (counter_reg) == PLUS)
442     counter_reg = XEXP (counter_reg, 0);
443   mode = GET_MODE (counter_reg);
444 
445   increment_count = false;
446   switch (GET_CODE (condition))
447     {
448     case NE:
449       /* Currently only NE tests against zero and one are supported.  */
450       noloop = XEXP (condition, 1);
451       if (noloop != const0_rtx)
452 	{
453 	  gcc_assert (noloop == const1_rtx);
454 	  increment_count = true;
455 	}
456       break;
457 
458     case GE:
459       /* Currently only GE tests against zero are supported.  */
460       gcc_assert (XEXP (condition, 1) == const0_rtx);
461 
462       noloop = constm1_rtx;
463 
464       /* The iteration count does not need incrementing for a GE test.  */
465       increment_count = false;
466 
467       /* Determine if the iteration counter will be non-negative.
468 	 Note that the maximum value loaded is iterations_max - 1.  */
469       if (get_max_loop_iterations (loop, &iterations)
470 	  && wi::leu_p (iterations,
471 			wi::set_bit_in_zero <widest_int>
472 			(GET_MODE_PRECISION (mode) - 1)))
473 	nonneg = 1;
474       break;
475 
476       /* Abort if an invalid doloop pattern has been generated.  */
477     default:
478       gcc_unreachable ();
479     }
480 
481   if (increment_count)
482     count = simplify_gen_binary (PLUS, mode, count, const1_rtx);
483 
484   /* Insert initialization of the count register into the loop header.  */
485   start_sequence ();
486   /* count has been already copied through copy_rtx.  */
487   reset_used_flags (count);
488   set_used_flags (condition);
489   tmp = force_operand (count, counter_reg);
490   convert_move (counter_reg, tmp, 1);
491   sequence = get_insns ();
492   unshare_all_rtl_in_chain (sequence);
493   end_sequence ();
494   emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
495 
496   if (desc->noloop_assumptions)
497     {
498       rtx ass = copy_rtx (desc->noloop_assumptions);
499       basic_block preheader = loop_preheader_edge (loop)->src;
500       basic_block set_zero = split_edge (loop_preheader_edge (loop));
501       basic_block new_preheader = split_edge (loop_preheader_edge (loop));
502       edge te;
503 
504       /* Expand the condition testing the assumptions and if it does not pass,
505 	 reset the count register to 0.  */
506       redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
507       set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
508 
509       set_zero->count = 0;
510       set_zero->frequency = 0;
511 
512       te = single_succ_edge (preheader);
513       for (; ass; ass = XEXP (ass, 1))
514 	if (!add_test (XEXP (ass, 0), &te, set_zero))
515 	  break;
516 
517       if (ass)
518 	{
519 	  /* We reached a condition that is always true.  This is very hard to
520 	     reproduce (such a loop does not roll, and thus it would most
521 	     likely get optimized out by some of the preceding optimizations).
522 	     In fact, I do not have any testcase for it.  However, it would
523 	     also be very hard to show that it is impossible, so we must
524 	     handle this case.  */
525 	  set_zero->count = preheader->count;
526 	  set_zero->frequency = preheader->frequency;
527 	}
528 
529       if (EDGE_COUNT (set_zero->preds) == 0)
530 	{
531 	  /* All the conditions were simplified to false, remove the
532 	     unreachable set_zero block.  */
533 	  delete_basic_block (set_zero);
534 	}
535       else
536 	{
537 	  /* Reset the counter to zero in the set_zero block.  */
538 	  start_sequence ();
539 	  convert_move (counter_reg, noloop, 0);
540 	  sequence = get_insns ();
541 	  end_sequence ();
542 	  emit_insn_after (sequence, BB_END (set_zero));
543 
544 	  set_immediate_dominator (CDI_DOMINATORS, set_zero,
545 				   recompute_dominator (CDI_DOMINATORS,
546 							set_zero));
547 	}
548 
549       set_immediate_dominator (CDI_DOMINATORS, new_preheader,
550 			       recompute_dominator (CDI_DOMINATORS,
551 						    new_preheader));
552     }
553 
554   /* Some targets (eg, C4x) need to initialize special looping
555      registers.  */
556   if (targetm.have_doloop_begin ())
557     if (rtx_insn *seq = targetm.gen_doloop_begin (counter_reg, doloop_seq))
558       emit_insn_after (seq, BB_END (loop_preheader_edge (loop)->src));
559 
560   /* Insert the new low-overhead looping insn.  */
561   emit_jump_insn_after (doloop_seq, BB_END (loop_end));
562   jump_insn = BB_END (loop_end);
563   jump_label = block_label (desc->in_edge->dest);
564   JUMP_LABEL (jump_insn) = jump_label;
565   LABEL_NUSES (jump_label)++;
566 
567   /* Ensure the right fallthru edge is marked, for case we have reversed
568      the condition.  */
569   desc->in_edge->flags &= ~EDGE_FALLTHRU;
570   desc->out_edge->flags |= EDGE_FALLTHRU;
571 
572   /* Add a REG_NONNEG note if the actual or estimated maximum number
573      of iterations is non-negative.  */
574   if (nonneg)
575     add_reg_note (jump_insn, REG_NONNEG, NULL_RTX);
576 
577   /* Update the REG_BR_PROB note.  */
578   if (true_prob_val)
579     {
580       /* Seems safer to use the branch probability.  */
581       add_int_reg_note (jump_insn, REG_BR_PROB, desc->in_edge->probability);
582     }
583 }
584 
585 /* Called through note_stores.  */
586 
587 static void
588 record_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
589 {
590   bitmap mod = (bitmap)data;
591   if (REG_P (x))
592     {
593       unsigned int regno = REGNO (x);
594       if (HARD_REGISTER_P (x))
595 	{
596 	  unsigned int end_regno = end_hard_regno (GET_MODE (x), regno);
597 	  do
598 	    bitmap_set_bit (mod, regno);
599 	  while (++regno < end_regno);
600 	}
601       else
602 	bitmap_set_bit (mod, regno);
603     }
604 }
605 
606 /* Process loop described by LOOP validating that the loop is suitable for
607    conversion to use a low overhead looping instruction, replacing the jump
608    insn where suitable.  Returns true if the loop was successfully
609    modified.  */
610 
611 static bool
612 doloop_optimize (struct loop *loop)
613 {
614   machine_mode mode;
615   rtx doloop_reg;
616   rtx count;
617   widest_int iterations, iterations_max;
618   rtx_code_label *start_label;
619   rtx condition;
620   unsigned level;
621   HOST_WIDE_INT est_niter;
622   int max_cost;
623   struct niter_desc *desc;
624   unsigned word_mode_size;
625   unsigned HOST_WIDE_INT word_mode_max;
626   int entered_at_top;
627 
628   if (dump_file)
629     fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num);
630 
631   iv_analysis_loop_init (loop);
632 
633   /* Find the simple exit of a LOOP.  */
634   desc = get_simple_loop_desc (loop);
635 
636   /* Check that loop is a candidate for a low-overhead looping insn.  */
637   if (!doloop_valid_p (loop, desc))
638     {
639       if (dump_file)
640 	fprintf (dump_file,
641 		 "Doloop: The loop is not suitable.\n");
642       return false;
643     }
644   mode = desc->mode;
645 
646   est_niter = get_estimated_loop_iterations_int (loop);
647   if (est_niter == -1)
648     est_niter = get_likely_max_loop_iterations_int (loop);
649 
650   if (est_niter >= 0 && est_niter < 3)
651     {
652       if (dump_file)
653 	fprintf (dump_file,
654 		 "Doloop: Too few iterations (%u) to be profitable.\n",
655 		 (unsigned int)est_niter);
656       return false;
657     }
658 
659   max_cost
660     = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
661   if (set_src_cost (desc->niter_expr, mode, optimize_loop_for_speed_p (loop))
662       > max_cost)
663     {
664       if (dump_file)
665 	fprintf (dump_file,
666 		 "Doloop: number of iterations too costly to compute.\n");
667       return false;
668     }
669 
670   if (desc->const_iter)
671     iterations = widest_int::from (rtx_mode_t (desc->niter_expr, mode),
672 				   UNSIGNED);
673   else
674     iterations = 0;
675   if (!get_max_loop_iterations (loop, &iterations_max))
676     iterations_max = 0;
677   level = get_loop_level (loop) + 1;
678   entered_at_top = (loop->latch == desc->in_edge->dest
679 		    && contains_no_active_insn_p (loop->latch));
680   if (!targetm.can_use_doloop_p (iterations, iterations_max, level,
681 				 entered_at_top))
682     {
683       if (dump_file)
684 	fprintf (dump_file, "Loop rejected by can_use_doloop_p.\n");
685       return false;
686     }
687 
688   /* Generate looping insn.  If the pattern FAILs then give up trying
689      to modify the loop since there is some aspect the back-end does
690      not like.  */
691   count = copy_rtx (desc->niter_expr);
692   start_label = block_label (desc->in_edge->dest);
693   doloop_reg = gen_reg_rtx (mode);
694   rtx_insn *doloop_seq = targetm.gen_doloop_end (doloop_reg, start_label);
695 
696   word_mode_size = GET_MODE_PRECISION (word_mode);
697   word_mode_max = (HOST_WIDE_INT_1U << (word_mode_size - 1) << 1) - 1;
698   if (! doloop_seq
699       && mode != word_mode
700       /* Before trying mode different from the one in that # of iterations is
701 	 computed, we must be sure that the number of iterations fits into
702 	 the new mode.  */
703       && (word_mode_size >= GET_MODE_PRECISION (mode)
704  	  || wi::leu_p (iterations_max, word_mode_max)))
705     {
706       if (word_mode_size > GET_MODE_PRECISION (mode))
707 	count = simplify_gen_unary (ZERO_EXTEND, word_mode, count, mode);
708       else
709 	count = lowpart_subreg (word_mode, count, mode);
710       PUT_MODE (doloop_reg, word_mode);
711       doloop_seq = targetm.gen_doloop_end (doloop_reg, start_label);
712     }
713   if (! doloop_seq)
714     {
715       if (dump_file)
716 	fprintf (dump_file,
717 		 "Doloop: Target unwilling to use doloop pattern!\n");
718       return false;
719     }
720 
721   /* If multiple instructions were created, the last must be the
722      jump instruction.  */
723   rtx_insn *doloop_insn = doloop_seq;
724   while (NEXT_INSN (doloop_insn) != NULL_RTX)
725     doloop_insn = NEXT_INSN (doloop_insn);
726   if (!JUMP_P (doloop_insn)
727       || !(condition = doloop_condition_get (doloop_insn)))
728     {
729       if (dump_file)
730 	fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n");
731       return false;
732     }
733 
734   /* Ensure that the new sequence doesn't clobber a register that
735      is live at the end of the block.  */
736   {
737     bitmap modified = BITMAP_ALLOC (NULL);
738 
739     for (rtx_insn *i = doloop_seq; i != NULL; i = NEXT_INSN (i))
740       note_stores (PATTERN (i), record_reg_sets, modified);
741 
742     basic_block loop_end = desc->out_edge->src;
743     bool fail = bitmap_intersect_p (df_get_live_out (loop_end), modified);
744     BITMAP_FREE (modified);
745 
746     if (fail)
747       {
748 	if (dump_file)
749 	  fprintf (dump_file, "Doloop: doloop pattern clobbers live out\n");
750 	return false;
751       }
752   }
753 
754   doloop_modify (loop, desc, doloop_seq, condition, count);
755   return true;
756 }
757 
758 /* This is the main entry point.  Process all loops using doloop_optimize.  */
759 
760 void
761 doloop_optimize_loops (void)
762 {
763   struct loop *loop;
764 
765   if (optimize == 1)
766     {
767       df_live_add_problem ();
768       df_live_set_all_dirty ();
769     }
770 
771   FOR_EACH_LOOP (loop, 0)
772     {
773       doloop_optimize (loop);
774     }
775 
776   if (optimize == 1)
777     df_remove_problem (df_live);
778 
779   iv_analysis_done ();
780 
781   checking_verify_loop_structure ();
782 }
783