xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/loop-doloop.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /* Perform doloop optimizations
2    Copyright (C) 2004-2013 Free Software Foundation, Inc.
3    Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "flags.h"
27 #include "expr.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "diagnostic-core.h"
31 #include "tm_p.h"
32 #include "cfgloop.h"
33 #include "params.h"
34 #include "target.h"
35 #include "dumpfile.h"
36 
37 /* This module is used to modify loops with a determinable number of
38    iterations to use special low-overhead looping instructions.
39 
40    It first validates whether the loop is well behaved and has a
41    determinable number of iterations (either at compile or run-time).
42    It then modifies the loop to use a low-overhead looping pattern as
43    follows:
44 
45    1. A pseudo register is allocated as the loop iteration counter.
46 
47    2. The number of loop iterations is calculated and is stored
48       in the loop counter.
49 
50    3. At the end of the loop, the jump insn is replaced by the
51       doloop_end pattern.  The compare must remain because it might be
52       used elsewhere.  If the loop-variable or condition register are
53       used elsewhere, they will be eliminated by flow.
54 
55    4. An optional doloop_begin pattern is inserted at the top of the
56       loop.
57 
58    TODO The optimization should only performed when either the biv used for exit
59    condition is unused at all except for the exit test, or if we do not have to
60    change its value, since otherwise we have to add a new induction variable,
61    which usually will not pay up (unless the cost of the doloop pattern is
62    somehow extremely lower than the cost of compare & jump, or unless the bct
63    register cannot be used for anything else but doloop -- ??? detect these
64    cases).  */
65 
66 #ifdef HAVE_doloop_end
67 
68 /* Return the loop termination condition for PATTERN or zero
69    if it is not a decrement and branch jump insn.  */
70 
71 rtx
72 doloop_condition_get (rtx doloop_pat)
73 {
74   rtx cmp;
75   rtx inc;
76   rtx reg;
77   rtx inc_src;
78   rtx condition;
79   rtx pattern;
80   rtx cc_reg = NULL_RTX;
81   rtx reg_orig = NULL_RTX;
82 
83   /* The canonical doloop pattern we expect has one of the following
84      forms:
85 
86      1)  (parallel [(set (pc) (if_then_else (condition)
87 	  			            (label_ref (label))
88 				            (pc)))
89 	             (set (reg) (plus (reg) (const_int -1)))
90 	             (additional clobbers and uses)])
91 
92      The branch must be the first entry of the parallel (also required
93      by jump.c), and the second entry of the parallel must be a set of
94      the loop counter register.  Some targets (IA-64) wrap the set of
95      the loop counter in an if_then_else too.
96 
97      2)  (set (reg) (plus (reg) (const_int -1))
98          (set (pc) (if_then_else (reg != 0)
99 	                         (label_ref (label))
100 			         (pc))).
101 
102      Some targets (ARM) do the comparison before the branch, as in the
103      following form:
104 
105      3) (parallel [(set (cc) (compare ((plus (reg) (const_int -1), 0)))
106                    (set (reg) (plus (reg) (const_int -1)))])
107         (set (pc) (if_then_else (cc == NE)
108                                 (label_ref (label))
109                                 (pc))) */
110 
111   pattern = PATTERN (doloop_pat);
112 
113   if (GET_CODE (pattern) != PARALLEL)
114     {
115       rtx cond;
116       rtx prev_insn = prev_nondebug_insn (doloop_pat);
117       rtx cmp_arg1, cmp_arg2;
118       rtx cmp_orig;
119 
120       /* In case the pattern is not PARALLEL we expect two forms
121 	 of doloop which are cases 2) and 3) above: in case 2) the
122 	 decrement immediately precedes the branch, while in case 3)
123 	 the compare and decrement instructions immediately precede
124 	 the branch.  */
125 
126       if (prev_insn == NULL_RTX || !INSN_P (prev_insn))
127         return 0;
128 
129       cmp = pattern;
130       if (GET_CODE (PATTERN (prev_insn)) == PARALLEL)
131         {
132 	  /* The third case: the compare and decrement instructions
133 	     immediately precede the branch.  */
134 	  cmp_orig = XVECEXP (PATTERN (prev_insn), 0, 0);
135 	  if (GET_CODE (cmp_orig) != SET)
136 	    return 0;
137 	  if (GET_CODE (SET_SRC (cmp_orig)) != COMPARE)
138 	    return 0;
139 	  cmp_arg1 = XEXP (SET_SRC (cmp_orig), 0);
140           cmp_arg2 = XEXP (SET_SRC (cmp_orig), 1);
141 	  if (cmp_arg2 != const0_rtx
142 	      || GET_CODE (cmp_arg1) != PLUS)
143 	    return 0;
144 	  reg_orig = XEXP (cmp_arg1, 0);
145 	  if (XEXP (cmp_arg1, 1) != GEN_INT (-1)
146 	      || !REG_P (reg_orig))
147 	    return 0;
148 	  cc_reg = SET_DEST (cmp_orig);
149 
150 	  inc = XVECEXP (PATTERN (prev_insn), 0, 1);
151 	}
152       else
153         inc = PATTERN (prev_insn);
154       /* We expect the condition to be of the form (reg != 0)  */
155       cond = XEXP (SET_SRC (cmp), 0);
156       if (GET_CODE (cond) != NE || XEXP (cond, 1) != const0_rtx)
157         return 0;
158     }
159   else
160     {
161       cmp = XVECEXP (pattern, 0, 0);
162       inc = XVECEXP (pattern, 0, 1);
163     }
164 
165   /* Check for (set (reg) (something)).  */
166   if (GET_CODE (inc) != SET)
167     return 0;
168   reg = SET_DEST (inc);
169   if (! REG_P (reg))
170     return 0;
171 
172   /* Check if something = (plus (reg) (const_int -1)).
173      On IA-64, this decrement is wrapped in an if_then_else.  */
174   inc_src = SET_SRC (inc);
175   if (GET_CODE (inc_src) == IF_THEN_ELSE)
176     inc_src = XEXP (inc_src, 1);
177   if (GET_CODE (inc_src) != PLUS
178       || XEXP (inc_src, 0) != reg
179       || XEXP (inc_src, 1) != constm1_rtx)
180     return 0;
181 
182   /* Check for (set (pc) (if_then_else (condition)
183                                        (label_ref (label))
184                                        (pc))).  */
185   if (GET_CODE (cmp) != SET
186       || SET_DEST (cmp) != pc_rtx
187       || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE
188       || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF
189       || XEXP (SET_SRC (cmp), 2) != pc_rtx)
190     return 0;
191 
192   /* Extract loop termination condition.  */
193   condition = XEXP (SET_SRC (cmp), 0);
194 
195   /* We expect a GE or NE comparison with 0 or 1.  */
196   if ((GET_CODE (condition) != GE
197        && GET_CODE (condition) != NE)
198       || (XEXP (condition, 1) != const0_rtx
199           && XEXP (condition, 1) != const1_rtx))
200     return 0;
201 
202   if ((XEXP (condition, 0) == reg)
203       /* For the third case:  */
204       || ((cc_reg != NULL_RTX)
205 	  && (XEXP (condition, 0) == cc_reg)
206 	  && (reg_orig == reg))
207       || (GET_CODE (XEXP (condition, 0)) == PLUS
208 	  && XEXP (XEXP (condition, 0), 0) == reg))
209    {
210      if (GET_CODE (pattern) != PARALLEL)
211      /*  For the second form we expect:
212 
213          (set (reg) (plus (reg) (const_int -1))
214          (set (pc) (if_then_else (reg != 0)
215                                  (label_ref (label))
216                                  (pc))).
217 
218          is equivalent to the following:
219 
220          (parallel [(set (pc) (if_then_else (reg != 1)
221                                             (label_ref (label))
222                                             (pc)))
223                      (set (reg) (plus (reg) (const_int -1)))
224                      (additional clobbers and uses)])
225 
226         For the third form we expect:
227 
228         (parallel [(set (cc) (compare ((plus (reg) (const_int -1)), 0))
229                    (set (reg) (plus (reg) (const_int -1)))])
230         (set (pc) (if_then_else (cc == NE)
231                                 (label_ref (label))
232                                 (pc)))
233 
234         which is equivalent to the following:
235 
236         (parallel [(set (cc) (compare (reg,  1))
237                    (set (reg) (plus (reg) (const_int -1)))
238                    (set (pc) (if_then_else (NE == cc)
239                                            (label_ref (label))
240                                            (pc))))])
241 
242         So we return the second form instead for the two cases.
243 
244      */
245         condition = gen_rtx_fmt_ee (NE, VOIDmode, inc_src, const1_rtx);
246 
247     return condition;
248    }
249 
250   /* ??? If a machine uses a funny comparison, we could return a
251      canonicalized form here.  */
252 
253   return 0;
254 }
255 
256 /* Return nonzero if the loop specified by LOOP is suitable for
257    the use of special low-overhead looping instructions.  DESC
258    describes the number of iterations of the loop.  */
259 
260 static bool
261 doloop_valid_p (struct loop *loop, struct niter_desc *desc)
262 {
263   basic_block *body = get_loop_body (loop), bb;
264   rtx insn;
265   unsigned i;
266   bool result = true;
267 
268   /* Check for loops that may not terminate under special conditions.  */
269   if (!desc->simple_p
270       || desc->assumptions
271       || desc->infinite)
272     {
273       /* There are some cases that would require a special attention.
274 	 For example if the comparison is LEU and the comparison value
275 	 is UINT_MAX then the loop will not terminate.  Similarly, if the
276 	 comparison code is GEU and the comparison value is 0, the
277 	 loop will not terminate.
278 
279 	 If the absolute increment is not 1, the loop can be infinite
280 	 even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)
281 
282 	 ??? We could compute these conditions at run-time and have a
283 	 additional jump around the loop to ensure an infinite loop.
284 	 However, it is very unlikely that this is the intended
285 	 behavior of the loop and checking for these rare boundary
286 	 conditions would pessimize all other code.
287 
288 	 If the loop is executed only a few times an extra check to
289 	 restart the loop could use up most of the benefits of using a
290 	 count register loop.  Note however, that normally, this
291 	 restart branch would never execute, so it could be predicted
292 	 well by the CPU.  We should generate the pessimistic code by
293 	 default, and have an option, e.g. -funsafe-loops that would
294 	 enable count-register loops in this case.  */
295       if (dump_file)
296 	fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
297       result = false;
298       goto cleanup;
299     }
300 
301   for (i = 0; i < loop->num_nodes; i++)
302     {
303       bb = body[i];
304 
305       for (insn = BB_HEAD (bb);
306 	   insn != NEXT_INSN (BB_END (bb));
307 	   insn = NEXT_INSN (insn))
308 	{
309 	  /* Different targets have different necessities for low-overhead
310 	     looping.  Call the back end for each instruction within the loop
311 	     to let it decide whether the insn prohibits a low-overhead loop.
312 	     It will then return the cause for it to emit to the dump file.  */
313 	  const char * invalid = targetm.invalid_within_doloop (insn);
314 	  if (invalid)
315 	    {
316 	      if (dump_file)
317 		fprintf (dump_file, "Doloop: %s\n", invalid);
318 	      result = false;
319 	      goto cleanup;
320 	    }
321 	}
322     }
323   result = true;
324 
325 cleanup:
326   free (body);
327 
328   return result;
329 }
330 
331 /* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
332    edge.  If the condition is always false, do not do anything.  If it is always
333    true, redirect E to DEST and return false.  In all other cases, true is
334    returned.  */
335 
336 static bool
337 add_test (rtx cond, edge *e, basic_block dest)
338 {
339   rtx seq, jump, label;
340   enum machine_mode mode;
341   rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
342   enum rtx_code code = GET_CODE (cond);
343   basic_block bb;
344 
345   mode = GET_MODE (XEXP (cond, 0));
346   if (mode == VOIDmode)
347     mode = GET_MODE (XEXP (cond, 1));
348 
349   start_sequence ();
350   op0 = force_operand (op0, NULL_RTX);
351   op1 = force_operand (op1, NULL_RTX);
352   label = block_label (dest);
353   do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX,
354 			   NULL_RTX, label, -1);
355 
356   jump = get_last_insn ();
357   if (!jump || !JUMP_P (jump))
358     {
359       /* The condition is always false and the jump was optimized out.  */
360       end_sequence ();
361       return true;
362     }
363 
364   seq = get_insns ();
365   end_sequence ();
366 
367   /* There always is at least the jump insn in the sequence.  */
368   gcc_assert (seq != NULL_RTX);
369 
370   bb = split_edge_and_insert (*e, seq);
371   *e = single_succ_edge (bb);
372 
373   if (any_uncondjump_p (jump))
374     {
375       /* The condition is always true.  */
376       delete_insn (jump);
377       redirect_edge_and_branch_force (*e, dest);
378       return false;
379     }
380 
381   JUMP_LABEL (jump) = label;
382 
383   /* The jump is supposed to handle an unlikely special case.  */
384   add_reg_note (jump, REG_BR_PROB, const0_rtx);
385 
386   LABEL_NUSES (label)++;
387 
388   make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
389   return true;
390 }
391 
392 /* Modify the loop to use the low-overhead looping insn where LOOP
393    describes the loop, DESC describes the number of iterations of the
394    loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the
395    end of the loop.  CONDITION is the condition separated from the
396    DOLOOP_SEQ.  COUNT is the number of iterations of the LOOP.  */
397 
398 static void
399 doloop_modify (struct loop *loop, struct niter_desc *desc,
400 	       rtx doloop_seq, rtx condition, rtx count)
401 {
402   rtx counter_reg;
403   rtx tmp, noloop = NULL_RTX;
404   rtx sequence;
405   rtx jump_insn;
406   rtx jump_label;
407   int nonneg = 0;
408   bool increment_count;
409   basic_block loop_end = desc->out_edge->src;
410   enum machine_mode mode;
411   rtx true_prob_val;
412   double_int iterations;
413 
414   jump_insn = BB_END (loop_end);
415 
416   if (dump_file)
417     {
418       fprintf (dump_file, "Doloop: Inserting doloop pattern (");
419       if (desc->const_iter)
420 	fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
421       else
422 	fputs ("runtime", dump_file);
423       fputs (" iterations).\n", dump_file);
424     }
425 
426   /* Get the probability of the original branch. If it exists we would
427      need to update REG_BR_PROB of the new jump_insn.  */
428   true_prob_val = find_reg_note (jump_insn, REG_BR_PROB, NULL_RTX);
429 
430   /* Discard original jump to continue loop.  The original compare
431      result may still be live, so it cannot be discarded explicitly.  */
432   delete_insn (jump_insn);
433 
434   counter_reg = XEXP (condition, 0);
435   if (GET_CODE (counter_reg) == PLUS)
436     counter_reg = XEXP (counter_reg, 0);
437   mode = GET_MODE (counter_reg);
438 
439   increment_count = false;
440   switch (GET_CODE (condition))
441     {
442     case NE:
443       /* Currently only NE tests against zero and one are supported.  */
444       noloop = XEXP (condition, 1);
445       if (noloop != const0_rtx)
446 	{
447 	  gcc_assert (noloop == const1_rtx);
448 	  increment_count = true;
449 	}
450       break;
451 
452     case GE:
453       /* Currently only GE tests against zero are supported.  */
454       gcc_assert (XEXP (condition, 1) == const0_rtx);
455 
456       noloop = constm1_rtx;
457 
458       /* The iteration count does not need incrementing for a GE test.  */
459       increment_count = false;
460 
461       /* Determine if the iteration counter will be non-negative.
462 	 Note that the maximum value loaded is iterations_max - 1.  */
463       if (max_loop_iterations (loop, &iterations)
464 	  && (iterations.ule (double_int_one.llshift
465 			       (GET_MODE_PRECISION (mode) - 1,
466 				GET_MODE_PRECISION (mode)))))
467 	nonneg = 1;
468       break;
469 
470       /* Abort if an invalid doloop pattern has been generated.  */
471     default:
472       gcc_unreachable ();
473     }
474 
475   if (increment_count)
476     count = simplify_gen_binary (PLUS, mode, count, const1_rtx);
477 
478   /* Insert initialization of the count register into the loop header.  */
479   start_sequence ();
480   tmp = force_operand (count, counter_reg);
481   convert_move (counter_reg, tmp, 1);
482   sequence = get_insns ();
483   end_sequence ();
484   emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
485 
486   if (desc->noloop_assumptions)
487     {
488       rtx ass = copy_rtx (desc->noloop_assumptions);
489       basic_block preheader = loop_preheader_edge (loop)->src;
490       basic_block set_zero
491 	      = split_edge (loop_preheader_edge (loop));
492       basic_block new_preheader
493 	      = split_edge (loop_preheader_edge (loop));
494       edge te;
495 
496       /* Expand the condition testing the assumptions and if it does not pass,
497 	 reset the count register to 0.  */
498       redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
499       set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
500 
501       set_zero->count = 0;
502       set_zero->frequency = 0;
503 
504       te = single_succ_edge (preheader);
505       for (; ass; ass = XEXP (ass, 1))
506 	if (!add_test (XEXP (ass, 0), &te, set_zero))
507 	  break;
508 
509       if (ass)
510 	{
511 	  /* We reached a condition that is always true.  This is very hard to
512 	     reproduce (such a loop does not roll, and thus it would most
513 	     likely get optimized out by some of the preceding optimizations).
514 	     In fact, I do not have any testcase for it.  However, it would
515 	     also be very hard to show that it is impossible, so we must
516 	     handle this case.  */
517 	  set_zero->count = preheader->count;
518 	  set_zero->frequency = preheader->frequency;
519 	}
520 
521       if (EDGE_COUNT (set_zero->preds) == 0)
522 	{
523 	  /* All the conditions were simplified to false, remove the
524 	     unreachable set_zero block.  */
525 	  delete_basic_block (set_zero);
526 	}
527       else
528 	{
529 	  /* Reset the counter to zero in the set_zero block.  */
530 	  start_sequence ();
531 	  convert_move (counter_reg, noloop, 0);
532 	  sequence = get_insns ();
533 	  end_sequence ();
534 	  emit_insn_after (sequence, BB_END (set_zero));
535 
536 	  set_immediate_dominator (CDI_DOMINATORS, set_zero,
537 				   recompute_dominator (CDI_DOMINATORS,
538 							set_zero));
539 	}
540 
541       set_immediate_dominator (CDI_DOMINATORS, new_preheader,
542 			       recompute_dominator (CDI_DOMINATORS,
543 						    new_preheader));
544     }
545 
546   /* Some targets (eg, C4x) need to initialize special looping
547      registers.  */
548 #ifdef HAVE_doloop_begin
549   {
550     rtx init;
551     unsigned level = get_loop_level (loop) + 1;
552     double_int iter;
553     rtx iter_rtx;
554 
555     if (!max_loop_iterations (loop, &iter)
556 	|| !iter.fits_shwi ())
557       iter_rtx = const0_rtx;
558     else
559       iter_rtx = GEN_INT (iter.to_shwi());
560     init = gen_doloop_begin (counter_reg,
561 			     desc->const_iter ? desc->niter_expr : const0_rtx,
562 			     iter_rtx,
563 			     GEN_INT (level),
564 			     doloop_seq);
565     if (init)
566       {
567 	start_sequence ();
568 	emit_insn (init);
569 	sequence = get_insns ();
570 	end_sequence ();
571 	emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
572       }
573   }
574 #endif
575 
576   /* Insert the new low-overhead looping insn.  */
577   emit_jump_insn_after (doloop_seq, BB_END (loop_end));
578   jump_insn = BB_END (loop_end);
579   jump_label = block_label (desc->in_edge->dest);
580   JUMP_LABEL (jump_insn) = jump_label;
581   LABEL_NUSES (jump_label)++;
582 
583   /* Ensure the right fallthru edge is marked, for case we have reversed
584      the condition.  */
585   desc->in_edge->flags &= ~EDGE_FALLTHRU;
586   desc->out_edge->flags |= EDGE_FALLTHRU;
587 
588   /* Add a REG_NONNEG note if the actual or estimated maximum number
589      of iterations is non-negative.  */
590   if (nonneg)
591     add_reg_note (jump_insn, REG_NONNEG, NULL_RTX);
592 
593   /* Update the REG_BR_PROB note.  */
594   if (true_prob_val)
595     {
596       /* Seems safer to use the branch probability.  */
597       add_reg_note (jump_insn, REG_BR_PROB,
598 		    GEN_INT (desc->in_edge->probability));
599     }
600 }
601 
602 /* Process loop described by LOOP validating that the loop is suitable for
603    conversion to use a low overhead looping instruction, replacing the jump
604    insn where suitable.  Returns true if the loop was successfully
605    modified.  */
606 
607 static bool
608 doloop_optimize (struct loop *loop)
609 {
610   enum machine_mode mode;
611   rtx doloop_seq, doloop_pat, doloop_reg;
612   rtx iterations, count;
613   rtx iterations_max;
614   rtx start_label;
615   rtx condition;
616   unsigned level, est_niter;
617   int max_cost;
618   struct niter_desc *desc;
619   unsigned word_mode_size;
620   unsigned HOST_WIDE_INT word_mode_max;
621   double_int iter;
622   int entered_at_top;
623 
624   if (dump_file)
625     fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num);
626 
627   iv_analysis_loop_init (loop);
628 
629   /* Find the simple exit of a LOOP.  */
630   desc = get_simple_loop_desc (loop);
631 
632   /* Check that loop is a candidate for a low-overhead looping insn.  */
633   if (!doloop_valid_p (loop, desc))
634     {
635       if (dump_file)
636 	fprintf (dump_file,
637 		 "Doloop: The loop is not suitable.\n");
638       return false;
639     }
640   mode = desc->mode;
641 
642   est_niter = 3;
643   if (desc->const_iter)
644     est_niter = desc->niter;
645   /* If the estimate on number of iterations is reliable (comes from profile
646      feedback), use it.  Do not use it normally, since the expected number
647      of iterations of an unrolled loop is 2.  */
648   if (loop->header->count)
649     est_niter = expected_loop_iterations (loop);
650 
651   if (est_niter < 3)
652     {
653       if (dump_file)
654 	fprintf (dump_file,
655 		 "Doloop: Too few iterations (%u) to be profitable.\n",
656 		 est_niter);
657       return false;
658     }
659 
660   max_cost
661     = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
662   if (set_src_cost (desc->niter_expr, optimize_loop_for_speed_p (loop))
663       > max_cost)
664     {
665       if (dump_file)
666 	fprintf (dump_file,
667 		 "Doloop: number of iterations too costly to compute.\n");
668       return false;
669     }
670 
671   count = copy_rtx (desc->niter_expr);
672   iterations = desc->const_iter ? desc->niter_expr : const0_rtx;
673   if (!max_loop_iterations (loop, &iter)
674       || !iter.fits_shwi ())
675     iterations_max = const0_rtx;
676   else
677     iterations_max = GEN_INT (iter.to_shwi());
678   level = get_loop_level (loop) + 1;
679 
680   /* Generate looping insn.  If the pattern FAILs then give up trying
681      to modify the loop since there is some aspect the back-end does
682      not like.  */
683   start_label = block_label (desc->in_edge->dest);
684   doloop_reg = gen_reg_rtx (mode);
685   entered_at_top = (loop->latch == desc->in_edge->dest
686 		    && contains_no_active_insn_p (loop->latch));
687   doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
688 			       GEN_INT (level), start_label,
689 			       GEN_INT (entered_at_top));
690 
691   word_mode_size = GET_MODE_PRECISION (word_mode);
692   word_mode_max
693 	  = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1;
694   if (! doloop_seq
695       && mode != word_mode
696       /* Before trying mode different from the one in that # of iterations is
697 	 computed, we must be sure that the number of iterations fits into
698 	 the new mode.  */
699       && (word_mode_size >= GET_MODE_PRECISION (mode)
700 	  || iter.ule (double_int::from_shwi (word_mode_max))))
701     {
702       if (word_mode_size > GET_MODE_PRECISION (mode))
703 	{
704 	  count = simplify_gen_unary (ZERO_EXTEND, word_mode,
705 				      count, mode);
706 	  iterations = simplify_gen_unary (ZERO_EXTEND, word_mode,
707 					   iterations, mode);
708 	  iterations_max = simplify_gen_unary (ZERO_EXTEND, word_mode,
709 					       iterations_max, mode);
710 	}
711       else
712 	{
713 	  count = lowpart_subreg (word_mode, count, mode);
714 	  iterations = lowpart_subreg (word_mode, iterations, mode);
715 	  iterations_max = lowpart_subreg (word_mode, iterations_max, mode);
716 	}
717       PUT_MODE (doloop_reg, word_mode);
718       doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
719 				   GEN_INT (level), start_label,
720 				   GEN_INT (entered_at_top));
721     }
722   if (! doloop_seq)
723     {
724       if (dump_file)
725 	fprintf (dump_file,
726 		 "Doloop: Target unwilling to use doloop pattern!\n");
727       return false;
728     }
729 
730   /* If multiple instructions were created, the last must be the
731      jump instruction.  Also, a raw define_insn may yield a plain
732      pattern.  */
733   doloop_pat = doloop_seq;
734   if (INSN_P (doloop_pat))
735     {
736       while (NEXT_INSN (doloop_pat) != NULL_RTX)
737 	doloop_pat = NEXT_INSN (doloop_pat);
738       if (!JUMP_P (doloop_pat))
739 	doloop_pat = NULL_RTX;
740     }
741 
742   if (! doloop_pat
743       || ! (condition = doloop_condition_get (doloop_pat)))
744     {
745       if (dump_file)
746 	fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n");
747       return false;
748     }
749 
750   doloop_modify (loop, desc, doloop_seq, condition, count);
751   return true;
752 }
753 
754 /* This is the main entry point.  Process all loops using doloop_optimize.  */
755 
756 void
757 doloop_optimize_loops (void)
758 {
759   loop_iterator li;
760   struct loop *loop;
761 
762   FOR_EACH_LOOP (li, loop, 0)
763     {
764       doloop_optimize (loop);
765     }
766 
767   iv_analysis_done ();
768 
769 #ifdef ENABLE_CHECKING
770   verify_loop_structure ();
771 #endif
772 }
773 #endif /* HAVE_doloop_end */
774 
775