xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-if-conv.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /* If-conversion for vectorizer.
2    Copyright (C) 2004-2017 Free Software Foundation, Inc.
3    Contributed by Devang Patel <dpatel@apple.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* This pass implements a tree level if-conversion of loops.  Its
22    initial goal is to help the vectorizer to vectorize loops with
23    conditions.
24 
25    A short description of if-conversion:
26 
27      o Decide if a loop is if-convertible or not.
28      o Walk all loop basic blocks in breadth first order (BFS order).
29        o Remove conditional statements (at the end of basic block)
30          and propagate condition into destination basic blocks'
31 	 predicate list.
32        o Replace modify expression with conditional modify expression
33          using current basic block's condition.
34      o Merge all basic blocks
35        o Replace phi nodes with conditional modify expr
36        o Merge all basic blocks into header
37 
38      Sample transformation:
39 
40      INPUT
41      -----
42 
43      # i_23 = PHI <0(0), i_18(10)>;
44      <L0>:;
45      j_15 = A[i_23];
46      if (j_15 > 41) goto <L1>; else goto <L17>;
47 
48      <L17>:;
49      goto <bb 3> (<L3>);
50 
51      <L1>:;
52 
53      # iftmp.2_4 = PHI <0(8), 42(2)>;
54      <L3>:;
55      A[i_23] = iftmp.2_4;
56      i_18 = i_23 + 1;
57      if (i_18 <= 15) goto <L19>; else goto <L18>;
58 
59      <L19>:;
60      goto <bb 1> (<L0>);
61 
62      <L18>:;
63 
64      OUTPUT
65      ------
66 
67      # i_23 = PHI <0(0), i_18(10)>;
68      <L0>:;
69      j_15 = A[i_23];
70 
71      <L3>:;
72      iftmp.2_4 = j_15 > 41 ? 42 : 0;
73      A[i_23] = iftmp.2_4;
74      i_18 = i_23 + 1;
75      if (i_18 <= 15) goto <L19>; else goto <L18>;
76 
77      <L19>:;
78      goto <bb 1> (<L0>);
79 
80      <L18>:;
81 */
82 
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "params.h"
118 #include "cfganal.h"
119 
120 /* Only handle PHIs with no more arguments unless we are asked to by
121    simd pragma.  */
122 #define MAX_PHI_ARG_NUM \
123   ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
124 
125 /* Indicate if new load/store that needs to be predicated is introduced
126    during if conversion.  */
127 static bool any_pred_load_store;
128 
129 /* Indicate if there are any complicated PHIs that need to be handled in
130    if-conversion.  Complicated PHI has more than two arguments and can't
131    be degenerated to two arguments PHI.  See more information in comment
132    before phi_convertible_by_degenerating_args.  */
133 static bool any_complicated_phi;
134 
135 /* Hash for struct innermost_loop_behavior.  It depends on the user to
136    free the memory.  */
137 
138 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
139 {
140   static inline hashval_t hash (const value_type &);
141   static inline bool equal (const value_type &,
142 			    const compare_type &);
143 };
144 
145 inline hashval_t
146 innermost_loop_behavior_hash::hash (const value_type &e)
147 {
148   hashval_t hash;
149 
150   hash = iterative_hash_expr (e->base_address, 0);
151   hash = iterative_hash_expr (e->offset, hash);
152   hash = iterative_hash_expr (e->init, hash);
153   return iterative_hash_expr (e->step, hash);
154 }
155 
156 inline bool
157 innermost_loop_behavior_hash::equal (const value_type &e1,
158 				     const compare_type &e2)
159 {
160   if ((e1->base_address && !e2->base_address)
161       || (!e1->base_address && e2->base_address)
162       || (!e1->offset && e2->offset)
163       || (e1->offset && !e2->offset)
164       || (!e1->init && e2->init)
165       || (e1->init && !e2->init)
166       || (!e1->step && e2->step)
167       || (e1->step && !e2->step))
168     return false;
169 
170   if (e1->base_address && e2->base_address
171       && !operand_equal_p (e1->base_address, e2->base_address, 0))
172     return false;
173   if (e1->offset && e2->offset
174       && !operand_equal_p (e1->offset, e2->offset, 0))
175     return false;
176   if (e1->init && e2->init
177       && !operand_equal_p (e1->init, e2->init, 0))
178     return false;
179   if (e1->step && e2->step
180       && !operand_equal_p (e1->step, e2->step, 0))
181     return false;
182 
183   return true;
184 }
185 
186 /* List of basic blocks in if-conversion-suitable order.  */
187 static basic_block *ifc_bbs;
188 
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs.  */
190 static hash_map<innermost_loop_behavior_hash,
191 		data_reference_p> *innermost_DR_map;
192 
193 /* Hash table to store <base reference, DR> pairs.  */
194 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
195 
196 /* Structure used to predicate basic blocks.  This is attached to the
197    ->aux field of the BBs in the loop to be if-converted.  */
198 struct bb_predicate {
199 
200   /* The condition under which this basic block is executed.  */
201   tree predicate;
202 
203   /* PREDICATE is gimplified, and the sequence of statements is
204      recorded here, in order to avoid the duplication of computations
205      that occur in previous conditions.  See PR44483.  */
206   gimple_seq predicate_gimplified_stmts;
207 };
208 
209 /* Returns true when the basic block BB has a predicate.  */
210 
211 static inline bool
212 bb_has_predicate (basic_block bb)
213 {
214   return bb->aux != NULL;
215 }
216 
217 /* Returns the gimplified predicate for basic block BB.  */
218 
219 static inline tree
220 bb_predicate (basic_block bb)
221 {
222   return ((struct bb_predicate *) bb->aux)->predicate;
223 }
224 
225 /* Sets the gimplified predicate COND for basic block BB.  */
226 
227 static inline void
228 set_bb_predicate (basic_block bb, tree cond)
229 {
230   gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 	       && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 	      || is_gimple_condexpr (cond));
233   ((struct bb_predicate *) bb->aux)->predicate = cond;
234 }
235 
236 /* Returns the sequence of statements of the gimplification of the
237    predicate for basic block BB.  */
238 
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb)
241 {
242   return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
243 }
244 
245 /* Sets the sequence of statements STMTS of the gimplification of the
246    predicate for basic block BB.  */
247 
248 static inline void
249 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
250 {
251   ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
252 }
253 
254 /* Adds the sequence of statements STMTS to the sequence of statements
255    of the predicate for basic block BB.  */
256 
257 static inline void
258 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
259 {
260   gimple_seq_add_seq_without_update
261     (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
262 }
263 
264 /* Initializes to TRUE the predicate of basic block BB.  */
265 
266 static inline void
267 init_bb_predicate (basic_block bb)
268 {
269   bb->aux = XNEW (struct bb_predicate);
270   set_bb_predicate_gimplified_stmts (bb, NULL);
271   set_bb_predicate (bb, boolean_true_node);
272 }
273 
274 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
275    but don't actually free it.  */
276 
277 static inline void
278 release_bb_predicate (basic_block bb)
279 {
280   gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
281   if (stmts)
282     {
283       if (flag_checking)
284 	for (gimple_stmt_iterator i = gsi_start (stmts);
285 	     !gsi_end_p (i); gsi_next (&i))
286 	  gcc_assert (! gimple_use_ops (gsi_stmt (i)));
287 
288       set_bb_predicate_gimplified_stmts (bb, NULL);
289     }
290 }
291 
292 /* Free the predicate of basic block BB.  */
293 
294 static inline void
295 free_bb_predicate (basic_block bb)
296 {
297   if (!bb_has_predicate (bb))
298     return;
299 
300   release_bb_predicate (bb);
301   free (bb->aux);
302   bb->aux = NULL;
303 }
304 
305 /* Reinitialize predicate of BB with the true predicate.  */
306 
307 static inline void
308 reset_bb_predicate (basic_block bb)
309 {
310   if (!bb_has_predicate (bb))
311     init_bb_predicate (bb);
312   else
313     {
314       release_bb_predicate (bb);
315       set_bb_predicate (bb, boolean_true_node);
316     }
317 }
318 
319 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
320    the expression EXPR.  Inserts the statement created for this
321    computation before GSI and leaves the iterator GSI at the same
322    statement.  */
323 
324 static tree
325 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
326 {
327   tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
328   gimple *stmt = gimple_build_assign (new_name, expr);
329   gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
330   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
331   return new_name;
332 }
333 
334 /* Return true when COND is a false predicate.  */
335 
336 static inline bool
337 is_false_predicate (tree cond)
338 {
339   return (cond != NULL_TREE
340 	  && (cond == boolean_false_node
341 	      || integer_zerop (cond)));
342 }
343 
344 /* Return true when COND is a true predicate.  */
345 
346 static inline bool
347 is_true_predicate (tree cond)
348 {
349   return (cond == NULL_TREE
350 	  || cond == boolean_true_node
351 	  || integer_onep (cond));
352 }
353 
354 /* Returns true when BB has a predicate that is not trivial: true or
355    NULL_TREE.  */
356 
357 static inline bool
358 is_predicated (basic_block bb)
359 {
360   return !is_true_predicate (bb_predicate (bb));
361 }
362 
363 /* Parses the predicate COND and returns its comparison code and
364    operands OP0 and OP1.  */
365 
366 static enum tree_code
367 parse_predicate (tree cond, tree *op0, tree *op1)
368 {
369   gimple *s;
370 
371   if (TREE_CODE (cond) == SSA_NAME
372       && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
373     {
374       if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
375 	{
376 	  *op0 = gimple_assign_rhs1 (s);
377 	  *op1 = gimple_assign_rhs2 (s);
378 	  return gimple_assign_rhs_code (s);
379 	}
380 
381       else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
382 	{
383 	  tree op = gimple_assign_rhs1 (s);
384 	  tree type = TREE_TYPE (op);
385 	  enum tree_code code = parse_predicate (op, op0, op1);
386 
387 	  return code == ERROR_MARK ? ERROR_MARK
388 	    : invert_tree_comparison (code, HONOR_NANS (type));
389 	}
390 
391       return ERROR_MARK;
392     }
393 
394   if (COMPARISON_CLASS_P (cond))
395     {
396       *op0 = TREE_OPERAND (cond, 0);
397       *op1 = TREE_OPERAND (cond, 1);
398       return TREE_CODE (cond);
399     }
400 
401   return ERROR_MARK;
402 }
403 
404 /* Returns the fold of predicate C1 OR C2 at location LOC.  */
405 
406 static tree
407 fold_or_predicates (location_t loc, tree c1, tree c2)
408 {
409   tree op1a, op1b, op2a, op2b;
410   enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
411   enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
412 
413   if (code1 != ERROR_MARK && code2 != ERROR_MARK)
414     {
415       tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
416 					  code2, op2a, op2b);
417       if (t)
418 	return t;
419     }
420 
421   return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
422 }
423 
424 /* Returns either a COND_EXPR or the folded expression if the folded
425    expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
426    a constant or a SSA_NAME. */
427 
428 static tree
429 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
430 {
431   tree rhs1, lhs1, cond_expr;
432 
433   /* If COND is comparison r != 0 and r has boolean type, convert COND
434      to SSA_NAME to accept by vect bool pattern.  */
435   if (TREE_CODE (cond) == NE_EXPR)
436     {
437       tree op0 = TREE_OPERAND (cond, 0);
438       tree op1 = TREE_OPERAND (cond, 1);
439       if (TREE_CODE (op0) == SSA_NAME
440 	  && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
441 	  && (integer_zerop (op1)))
442 	cond = op0;
443     }
444   cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
445 
446   if (cond_expr == NULL_TREE)
447     return build3 (COND_EXPR, type, cond, rhs, lhs);
448 
449   STRIP_USELESS_TYPE_CONVERSION (cond_expr);
450 
451   if (is_gimple_val (cond_expr))
452     return cond_expr;
453 
454   if (TREE_CODE (cond_expr) == ABS_EXPR)
455     {
456       rhs1 = TREE_OPERAND (cond_expr, 1);
457       STRIP_USELESS_TYPE_CONVERSION (rhs1);
458       if (is_gimple_val (rhs1))
459 	return build1 (ABS_EXPR, type, rhs1);
460     }
461 
462   if (TREE_CODE (cond_expr) == MIN_EXPR
463       || TREE_CODE (cond_expr) == MAX_EXPR)
464     {
465       lhs1 = TREE_OPERAND (cond_expr, 0);
466       STRIP_USELESS_TYPE_CONVERSION (lhs1);
467       rhs1 = TREE_OPERAND (cond_expr, 1);
468       STRIP_USELESS_TYPE_CONVERSION (rhs1);
469       if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
470 	return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
471     }
472   return build3 (COND_EXPR, type, cond, rhs, lhs);
473 }
474 
475 /* Add condition NC to the predicate list of basic block BB.  LOOP is
476    the loop to be if-converted. Use predicate of cd-equivalent block
477    for join bb if it exists: we call basic blocks bb1 and bb2
478    cd-equivalent if they are executed under the same condition.  */
479 
480 static inline void
481 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
482 {
483   tree bc, *tp;
484   basic_block dom_bb;
485 
486   if (is_true_predicate (nc))
487     return;
488 
489   /* If dominance tells us this basic block is always executed,
490      don't record any predicates for it.  */
491   if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
492     return;
493 
494   dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
495   /* We use notion of cd equivalence to get simpler predicate for
496      join block, e.g. if join block has 2 predecessors with predicates
497      p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
498      p1 & p2 | p1 & !p2.  */
499   if (dom_bb != loop->header
500       && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
501     {
502       gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
503       bc = bb_predicate (dom_bb);
504       if (!is_true_predicate (bc))
505 	set_bb_predicate (bb, bc);
506       else
507 	gcc_assert (is_true_predicate (bb_predicate (bb)));
508       if (dump_file && (dump_flags & TDF_DETAILS))
509 	fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
510 		 dom_bb->index, bb->index);
511       return;
512     }
513 
514   if (!is_predicated (bb))
515     bc = nc;
516   else
517     {
518       bc = bb_predicate (bb);
519       bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
520       if (is_true_predicate (bc))
521 	{
522 	  reset_bb_predicate (bb);
523 	  return;
524 	}
525     }
526 
527   /* Allow a TRUTH_NOT_EXPR around the main predicate.  */
528   if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
529     tp = &TREE_OPERAND (bc, 0);
530   else
531     tp = &bc;
532   if (!is_gimple_condexpr (*tp))
533     {
534       gimple_seq stmts;
535       *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
536       add_bb_predicate_gimplified_stmts (bb, stmts);
537     }
538   set_bb_predicate (bb, bc);
539 }
540 
541 /* Add the condition COND to the previous condition PREV_COND, and add
542    this to the predicate list of the destination of edge E.  LOOP is
543    the loop to be if-converted.  */
544 
545 static void
546 add_to_dst_predicate_list (struct loop *loop, edge e,
547 			   tree prev_cond, tree cond)
548 {
549   if (!flow_bb_inside_loop_p (loop, e->dest))
550     return;
551 
552   if (!is_true_predicate (prev_cond))
553     cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
554 			prev_cond, cond);
555 
556   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
557     add_to_predicate_list (loop, e->dest, cond);
558 }
559 
560 /* Return true if one of the successor edges of BB exits LOOP.  */
561 
562 static bool
563 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
564 {
565   edge e;
566   edge_iterator ei;
567 
568   FOR_EACH_EDGE (e, ei, bb->succs)
569     if (loop_exit_edge_p (loop, e))
570       return true;
571 
572   return false;
573 }
574 
575 /* Given PHI which has more than two arguments, this function checks if
576    it's if-convertible by degenerating its arguments.  Specifically, if
577    below two conditions are satisfied:
578 
579      1) Number of PHI arguments with different values equals to 2 and one
580 	argument has the only occurrence.
581      2) The edge corresponding to the unique argument isn't critical edge.
582 
583    Such PHI can be handled as PHIs have only two arguments.  For example,
584    below PHI:
585 
586      res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
587 
588    can be transformed into:
589 
590      res = (predicate of e3) ? A_2 : A_1;
591 
592    Return TRUE if it is the case, FALSE otherwise.  */
593 
594 static bool
595 phi_convertible_by_degenerating_args (gphi *phi)
596 {
597   edge e;
598   tree arg, t1 = NULL, t2 = NULL;
599   unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
600   unsigned int num_args = gimple_phi_num_args (phi);
601 
602   gcc_assert (num_args > 2);
603 
604   for (i = 0; i < num_args; i++)
605     {
606       arg = gimple_phi_arg_def (phi, i);
607       if (t1 == NULL || operand_equal_p (t1, arg, 0))
608 	{
609 	  n1++;
610 	  i1 = i;
611 	  t1 = arg;
612 	}
613       else if (t2 == NULL || operand_equal_p (t2, arg, 0))
614 	{
615 	  n2++;
616 	  i2 = i;
617 	  t2 = arg;
618 	}
619       else
620 	return false;
621     }
622 
623   if (n1 != 1 && n2 != 1)
624     return false;
625 
626   /* Check if the edge corresponding to the unique arg is critical.  */
627   e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
628   if (EDGE_COUNT (e->src->succs) > 1)
629     return false;
630 
631   return true;
632 }
633 
634 /* Return true when PHI is if-convertible.  PHI is part of loop LOOP
635    and it belongs to basic block BB.  Note at this point, it is sure
636    that PHI is if-convertible.  This function updates global variable
637    ANY_COMPLICATED_PHI if PHI is complicated.  */
638 
639 static bool
640 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
641 {
642   if (dump_file && (dump_flags & TDF_DETAILS))
643     {
644       fprintf (dump_file, "-------------------------\n");
645       print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
646     }
647 
648   if (bb != loop->header
649       && gimple_phi_num_args (phi) > 2
650       && !phi_convertible_by_degenerating_args (phi))
651     any_complicated_phi = true;
652 
653   return true;
654 }
655 
656 /* Records the status of a data reference.  This struct is attached to
657    each DR->aux field.  */
658 
659 struct ifc_dr {
660   bool rw_unconditionally;
661   bool w_unconditionally;
662   bool written_at_least_once;
663 
664   tree rw_predicate;
665   tree w_predicate;
666   tree base_w_predicate;
667 };
668 
669 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
670 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
671 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
672 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
673 
674 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
675    HASH tables.  While storing them in HASH table, it checks if the
676    reference is unconditionally read or written and stores that as a flag
677    information.  For base reference it checks if it is written atlest once
678    unconditionally and stores it as flag information along with DR.
679    In other words for every data reference A in STMT there exist other
680    accesses to a data reference with the same base with predicates that
681    add up (OR-up) to the true predicate: this ensures that the data
682    reference A is touched (read or written) on every iteration of the
683    if-converted loop.  */
684 static void
685 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
686 {
687 
688   data_reference_p *master_dr, *base_master_dr;
689   tree base_ref = DR_BASE_OBJECT (a);
690   innermost_loop_behavior *innermost = &DR_INNERMOST (a);
691   tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
692   bool exist1, exist2;
693 
694   master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
695   if (!exist1)
696     *master_dr = a;
697 
698   if (DR_IS_WRITE (a))
699     {
700       IFC_DR (*master_dr)->w_predicate
701 	= fold_or_predicates (UNKNOWN_LOCATION, ca,
702 			      IFC_DR (*master_dr)->w_predicate);
703       if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
704 	DR_W_UNCONDITIONALLY (*master_dr) = true;
705     }
706   IFC_DR (*master_dr)->rw_predicate
707     = fold_or_predicates (UNKNOWN_LOCATION, ca,
708 			  IFC_DR (*master_dr)->rw_predicate);
709   if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
710     DR_RW_UNCONDITIONALLY (*master_dr) = true;
711 
712   if (DR_IS_WRITE (a))
713     {
714       base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
715       if (!exist2)
716 	*base_master_dr = a;
717       IFC_DR (*base_master_dr)->base_w_predicate
718 	= fold_or_predicates (UNKNOWN_LOCATION, ca,
719 			      IFC_DR (*base_master_dr)->base_w_predicate);
720       if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
721 	DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
722     }
723 }
724 
725 /* Return TRUE if can prove the index IDX of an array reference REF is
726    within array bound.  Return false otherwise.  */
727 
728 static bool
729 idx_within_array_bound (tree ref, tree *idx, void *dta)
730 {
731   bool overflow;
732   widest_int niter, valid_niter, delta, wi_step;
733   tree ev, init, step;
734   tree low, high;
735   struct loop *loop = (struct loop*) dta;
736 
737   /* Only support within-bound access for array references.  */
738   if (TREE_CODE (ref) != ARRAY_REF)
739     return false;
740 
741   /* For arrays at the end of the structure, we are not guaranteed that they
742      do not really extend over their declared size.  However, for arrays of
743      size greater than one, this is unlikely to be intended.  */
744   if (array_at_struct_end_p (ref))
745     return false;
746 
747   ev = analyze_scalar_evolution (loop, *idx);
748   ev = instantiate_parameters (loop, ev);
749   init = initial_condition (ev);
750   step = evolution_part_in_loop_num (ev, loop->num);
751 
752   if (!init || TREE_CODE (init) != INTEGER_CST
753       || (step && TREE_CODE (step) != INTEGER_CST))
754     return false;
755 
756   low = array_ref_low_bound (ref);
757   high = array_ref_up_bound (ref);
758 
759   /* The case of nonconstant bounds could be handled, but it would be
760      complicated.  */
761   if (TREE_CODE (low) != INTEGER_CST
762       || !high || TREE_CODE (high) != INTEGER_CST)
763     return false;
764 
765   /* Check if the intial idx is within bound.  */
766   if (wi::to_widest (init) < wi::to_widest (low)
767       || wi::to_widest (init) > wi::to_widest (high))
768     return false;
769 
770   /* The idx is always within bound.  */
771   if (!step || integer_zerop (step))
772     return true;
773 
774   if (!max_loop_iterations (loop, &niter))
775     return false;
776 
777   if (wi::to_widest (step) < 0)
778     {
779       delta = wi::to_widest (init) - wi::to_widest (low);
780       wi_step = -wi::to_widest (step);
781     }
782   else
783     {
784       delta = wi::to_widest (high) - wi::to_widest (init);
785       wi_step = wi::to_widest (step);
786     }
787 
788   valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
789   /* The iteration space of idx is within array bound.  */
790   if (!overflow && niter <= valid_niter)
791     return true;
792 
793   return false;
794 }
795 
796 /* Return TRUE if ref is a within bound array reference.  */
797 
798 static bool
799 ref_within_array_bound (gimple *stmt, tree ref)
800 {
801   struct loop *loop = loop_containing_stmt (stmt);
802 
803   gcc_assert (loop != NULL);
804   return for_each_index (&ref, idx_within_array_bound, loop);
805 }
806 
807 
808 /* Given a memory reference expression T, return TRUE if base object
809    it refers to is writable.  The base object of a memory reference
810    is the main object being referenced, which is returned by function
811    get_base_address.  */
812 
813 static bool
814 base_object_writable (tree ref)
815 {
816   tree base_tree = get_base_address (ref);
817 
818   return (base_tree
819 	  && DECL_P (base_tree)
820 	  && decl_binds_to_current_def_p (base_tree)
821 	  && !TREE_READONLY (base_tree));
822 }
823 
824 /* Return true when the memory references of STMT won't trap in the
825    if-converted code.  There are two things that we have to check for:
826 
827    - writes to memory occur to writable memory: if-conversion of
828    memory writes transforms the conditional memory writes into
829    unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
830    into "A[i] = cond ? foo : A[i]", and as the write to memory may not
831    be executed at all in the original code, it may be a readonly
832    memory.  To check that A is not const-qualified, we check that
833    there exists at least an unconditional write to A in the current
834    function.
835 
836    - reads or writes to memory are valid memory accesses for every
837    iteration.  To check that the memory accesses are correctly formed
838    and that we are allowed to read and write in these locations, we
839    check that the memory accesses to be if-converted occur at every
840    iteration unconditionally.
841 
842    Returns true for the memory reference in STMT, same memory reference
843    is read or written unconditionally atleast once and the base memory
844    reference is written unconditionally once.  This is to check reference
845    will not write fault.  Also retuns true if the memory reference is
846    unconditionally read once then we are conditionally writing to memory
847    which is defined as read and write and is bound to the definition
848    we are seeing.  */
849 static bool
850 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
851 {
852   data_reference_p *master_dr, *base_master_dr;
853   data_reference_p a = drs[gimple_uid (stmt) - 1];
854 
855   tree base = DR_BASE_OBJECT (a);
856   innermost_loop_behavior *innermost = &DR_INNERMOST (a);
857 
858   gcc_assert (DR_STMT (a) == stmt);
859   gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
860               || DR_INIT (a) || DR_STEP (a));
861 
862   master_dr = innermost_DR_map->get (innermost);
863   gcc_assert (master_dr != NULL);
864 
865   base_master_dr = baseref_DR_map->get (base);
866 
867   /* If a is unconditionally written to it doesn't trap.  */
868   if (DR_W_UNCONDITIONALLY (*master_dr))
869     return true;
870 
871   /* If a is unconditionally accessed then ...
872 
873      Even a is conditional access, we can treat it as an unconditional
874      one if it's an array reference and all its index are within array
875      bound.  */
876   if (DR_RW_UNCONDITIONALLY (*master_dr)
877       || ref_within_array_bound (stmt, DR_REF (a)))
878     {
879       /* an unconditional read won't trap.  */
880       if (DR_IS_READ (a))
881 	return true;
882 
883       /* an unconditionaly write won't trap if the base is written
884          to unconditionally.  */
885       if (base_master_dr
886 	  && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
887 	return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
888       /* or the base is known to be not readonly.  */
889       else if (base_object_writable (DR_REF (a)))
890 	return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
891     }
892 
893   return false;
894 }
895 
896 /* Return true if STMT could be converted into a masked load or store
897    (conditional load or store based on a mask computed from bb predicate).  */
898 
899 static bool
900 ifcvt_can_use_mask_load_store (gimple *stmt)
901 {
902   tree lhs, ref;
903   machine_mode mode;
904   basic_block bb = gimple_bb (stmt);
905   bool is_load;
906 
907   if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
908       || bb->loop_father->dont_vectorize
909       || !gimple_assign_single_p (stmt)
910       || gimple_has_volatile_ops (stmt))
911     return false;
912 
913   /* Check whether this is a load or store.  */
914   lhs = gimple_assign_lhs (stmt);
915   if (gimple_store_p (stmt))
916     {
917       if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
918 	return false;
919       is_load = false;
920       ref = lhs;
921     }
922   else if (gimple_assign_load_p (stmt))
923     {
924       is_load = true;
925       ref = gimple_assign_rhs1 (stmt);
926     }
927   else
928     return false;
929 
930   if (may_be_nonaddressable_p (ref))
931     return false;
932 
933   /* Mask should be integer mode of the same size as the load/store
934      mode.  */
935   mode = TYPE_MODE (TREE_TYPE (lhs));
936   if (int_mode_for_mode (mode) == BLKmode
937       || VECTOR_MODE_P (mode))
938     return false;
939 
940   if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
941     return true;
942 
943   return false;
944 }
945 
946 /* Return true when STMT is if-convertible.
947 
948    GIMPLE_ASSIGN statement is not if-convertible if,
949    - it is not movable,
950    - it could trap,
951    - LHS is not var decl.  */
952 
953 static bool
954 if_convertible_gimple_assign_stmt_p (gimple *stmt,
955 				     vec<data_reference_p> refs)
956 {
957   tree lhs = gimple_assign_lhs (stmt);
958 
959   if (dump_file && (dump_flags & TDF_DETAILS))
960     {
961       fprintf (dump_file, "-------------------------\n");
962       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
963     }
964 
965   if (!is_gimple_reg_type (TREE_TYPE (lhs)))
966     return false;
967 
968   /* Some of these constrains might be too conservative.  */
969   if (stmt_ends_bb_p (stmt)
970       || gimple_has_volatile_ops (stmt)
971       || (TREE_CODE (lhs) == SSA_NAME
972           && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
973       || gimple_has_side_effects (stmt))
974     {
975       if (dump_file && (dump_flags & TDF_DETAILS))
976         fprintf (dump_file, "stmt not suitable for ifcvt\n");
977       return false;
978     }
979 
980   /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
981      in between if_convertible_loop_p and combine_blocks
982      we can perform loop versioning.  */
983   gimple_set_plf (stmt, GF_PLF_2, false);
984 
985   if ((! gimple_vuse (stmt)
986        || gimple_could_trap_p_1 (stmt, false, false)
987        || ! ifcvt_memrefs_wont_trap (stmt, refs))
988       && gimple_could_trap_p (stmt))
989     {
990       if (ifcvt_can_use_mask_load_store (stmt))
991 	{
992 	  gimple_set_plf (stmt, GF_PLF_2, true);
993 	  any_pred_load_store = true;
994 	  return true;
995 	}
996       if (dump_file && (dump_flags & TDF_DETAILS))
997 	fprintf (dump_file, "tree could trap...\n");
998       return false;
999     }
1000 
1001   /* When if-converting stores force versioning, likewise if we
1002      ended up generating store data races.  */
1003   if (gimple_vdef (stmt))
1004     any_pred_load_store = true;
1005 
1006   return true;
1007 }
1008 
1009 /* Return true when STMT is if-convertible.
1010 
1011    A statement is if-convertible if:
1012    - it is an if-convertible GIMPLE_ASSIGN,
1013    - it is a GIMPLE_LABEL or a GIMPLE_COND,
1014    - it is builtins call.  */
1015 
1016 static bool
1017 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1018 {
1019   switch (gimple_code (stmt))
1020     {
1021     case GIMPLE_LABEL:
1022     case GIMPLE_DEBUG:
1023     case GIMPLE_COND:
1024       return true;
1025 
1026     case GIMPLE_ASSIGN:
1027       return if_convertible_gimple_assign_stmt_p (stmt, refs);
1028 
1029     case GIMPLE_CALL:
1030       {
1031 	tree fndecl = gimple_call_fndecl (stmt);
1032 	if (fndecl)
1033 	  {
1034 	    int flags = gimple_call_flags (stmt);
1035 	    if ((flags & ECF_CONST)
1036 		&& !(flags & ECF_LOOPING_CONST_OR_PURE)
1037 		/* We can only vectorize some builtins at the moment,
1038 		   so restrict if-conversion to those.  */
1039 		&& DECL_BUILT_IN (fndecl))
1040 	      return true;
1041 	  }
1042 	return false;
1043       }
1044 
1045     default:
1046       /* Don't know what to do with 'em so don't do anything.  */
1047       if (dump_file && (dump_flags & TDF_DETAILS))
1048 	{
1049 	  fprintf (dump_file, "don't know what to do\n");
1050 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1051 	}
1052       return false;
1053     }
1054 
1055   return true;
1056 }
1057 
1058 /* Assumes that BB has more than 1 predecessors.
1059    Returns false if at least one successor is not on critical edge
1060    and true otherwise.  */
1061 
1062 static inline bool
1063 all_preds_critical_p (basic_block bb)
1064 {
1065   edge e;
1066   edge_iterator ei;
1067 
1068   FOR_EACH_EDGE (e, ei, bb->preds)
1069     if (EDGE_COUNT (e->src->succs) == 1)
1070       return false;
1071   return true;
1072 }
1073 
1074 /* Returns true if at least one successor in on critical edge.  */
1075 static inline bool
1076 has_pred_critical_p (basic_block bb)
1077 {
1078   edge e;
1079   edge_iterator ei;
1080 
1081   FOR_EACH_EDGE (e, ei, bb->preds)
1082     if (EDGE_COUNT (e->src->succs) > 1)
1083       return true;
1084   return false;
1085 }
1086 
1087 /* Return true when BB is if-convertible.  This routine does not check
1088    basic block's statements and phis.
1089 
1090    A basic block is not if-convertible if:
1091    - it is non-empty and it is after the exit block (in BFS order),
1092    - it is after the exit block but before the latch,
1093    - its edges are not normal.
1094 
1095    EXIT_BB is the basic block containing the exit of the LOOP.  BB is
1096    inside LOOP.  */
1097 
1098 static bool
1099 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1100 {
1101   edge e;
1102   edge_iterator ei;
1103 
1104   if (dump_file && (dump_flags & TDF_DETAILS))
1105     fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1106 
1107   if (EDGE_COUNT (bb->succs) > 2)
1108     return false;
1109 
1110   if (exit_bb)
1111     {
1112       if (bb != loop->latch)
1113 	{
1114 	  if (dump_file && (dump_flags & TDF_DETAILS))
1115 	    fprintf (dump_file, "basic block after exit bb but before latch\n");
1116 	  return false;
1117 	}
1118       else if (!empty_block_p (bb))
1119 	{
1120 	  if (dump_file && (dump_flags & TDF_DETAILS))
1121 	    fprintf (dump_file, "non empty basic block after exit bb\n");
1122 	  return false;
1123 	}
1124       else if (bb == loop->latch
1125 	       && bb != exit_bb
1126 	       && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1127 	  {
1128 	    if (dump_file && (dump_flags & TDF_DETAILS))
1129 	      fprintf (dump_file, "latch is not dominated by exit_block\n");
1130 	    return false;
1131 	  }
1132     }
1133 
1134   /* Be less adventurous and handle only normal edges.  */
1135   FOR_EACH_EDGE (e, ei, bb->succs)
1136     if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1137       {
1138 	if (dump_file && (dump_flags & TDF_DETAILS))
1139 	  fprintf (dump_file, "Difficult to handle edges\n");
1140 	return false;
1141       }
1142 
1143   return true;
1144 }
1145 
1146 /* Return true when all predecessor blocks of BB are visited.  The
1147    VISITED bitmap keeps track of the visited blocks.  */
1148 
1149 static bool
1150 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1151 {
1152   edge e;
1153   edge_iterator ei;
1154   FOR_EACH_EDGE (e, ei, bb->preds)
1155     if (!bitmap_bit_p (*visited, e->src->index))
1156       return false;
1157 
1158   return true;
1159 }
1160 
1161 /* Get body of a LOOP in suitable order for if-conversion.  It is
1162    caller's responsibility to deallocate basic block list.
1163    If-conversion suitable order is, breadth first sort (BFS) order
1164    with an additional constraint: select a block only if all its
1165    predecessors are already selected.  */
1166 
1167 static basic_block *
1168 get_loop_body_in_if_conv_order (const struct loop *loop)
1169 {
1170   basic_block *blocks, *blocks_in_bfs_order;
1171   basic_block bb;
1172   bitmap visited;
1173   unsigned int index = 0;
1174   unsigned int visited_count = 0;
1175 
1176   gcc_assert (loop->num_nodes);
1177   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1178 
1179   blocks = XCNEWVEC (basic_block, loop->num_nodes);
1180   visited = BITMAP_ALLOC (NULL);
1181 
1182   blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1183 
1184   index = 0;
1185   while (index < loop->num_nodes)
1186     {
1187       bb = blocks_in_bfs_order [index];
1188 
1189       if (bb->flags & BB_IRREDUCIBLE_LOOP)
1190 	{
1191 	  free (blocks_in_bfs_order);
1192 	  BITMAP_FREE (visited);
1193 	  free (blocks);
1194 	  return NULL;
1195 	}
1196 
1197       if (!bitmap_bit_p (visited, bb->index))
1198 	{
1199 	  if (pred_blocks_visited_p (bb, &visited)
1200 	      || bb == loop->header)
1201 	    {
1202 	      /* This block is now visited.  */
1203 	      bitmap_set_bit (visited, bb->index);
1204 	      blocks[visited_count++] = bb;
1205 	    }
1206 	}
1207 
1208       index++;
1209 
1210       if (index == loop->num_nodes
1211 	  && visited_count != loop->num_nodes)
1212 	/* Not done yet.  */
1213 	index = 0;
1214     }
1215   free (blocks_in_bfs_order);
1216   BITMAP_FREE (visited);
1217   return blocks;
1218 }
1219 
1220 /* Returns true when the analysis of the predicates for all the basic
1221    blocks in LOOP succeeded.
1222 
1223    predicate_bbs first allocates the predicates of the basic blocks.
1224    These fields are then initialized with the tree expressions
1225    representing the predicates under which a basic block is executed
1226    in the LOOP.  As the loop->header is executed at each iteration, it
1227    has the "true" predicate.  Other statements executed under a
1228    condition are predicated with that condition, for example
1229 
1230    | if (x)
1231    |   S1;
1232    | else
1233    |   S2;
1234 
1235    S1 will be predicated with "x", and
1236    S2 will be predicated with "!x".  */
1237 
1238 static void
1239 predicate_bbs (loop_p loop)
1240 {
1241   unsigned int i;
1242 
1243   for (i = 0; i < loop->num_nodes; i++)
1244     init_bb_predicate (ifc_bbs[i]);
1245 
1246   for (i = 0; i < loop->num_nodes; i++)
1247     {
1248       basic_block bb = ifc_bbs[i];
1249       tree cond;
1250       gimple *stmt;
1251 
1252       /* The loop latch and loop exit block are always executed and
1253 	 have no extra conditions to be processed: skip them.  */
1254       if (bb == loop->latch
1255 	  || bb_with_exit_edge_p (loop, bb))
1256 	{
1257 	  reset_bb_predicate (bb);
1258 	  continue;
1259 	}
1260 
1261       cond = bb_predicate (bb);
1262       stmt = last_stmt (bb);
1263       if (stmt && gimple_code (stmt) == GIMPLE_COND)
1264 	{
1265 	  tree c2;
1266 	  edge true_edge, false_edge;
1267 	  location_t loc = gimple_location (stmt);
1268 	  tree c = build2_loc (loc, gimple_cond_code (stmt),
1269 				    boolean_type_node,
1270 				    gimple_cond_lhs (stmt),
1271 				    gimple_cond_rhs (stmt));
1272 
1273 	  /* Add new condition into destination's predicate list.  */
1274 	  extract_true_false_edges_from_block (gimple_bb (stmt),
1275 					       &true_edge, &false_edge);
1276 
1277 	  /* If C is true, then TRUE_EDGE is taken.  */
1278 	  add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1279 				     unshare_expr (c));
1280 
1281 	  /* If C is false, then FALSE_EDGE is taken.  */
1282 	  c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1283 			   unshare_expr (c));
1284 	  add_to_dst_predicate_list (loop, false_edge,
1285 				     unshare_expr (cond), c2);
1286 
1287 	  cond = NULL_TREE;
1288 	}
1289 
1290       /* If current bb has only one successor, then consider it as an
1291 	 unconditional goto.  */
1292       if (single_succ_p (bb))
1293 	{
1294 	  basic_block bb_n = single_succ (bb);
1295 
1296 	  /* The successor bb inherits the predicate of its
1297 	     predecessor.  If there is no predicate in the predecessor
1298 	     bb, then consider the successor bb as always executed.  */
1299 	  if (cond == NULL_TREE)
1300 	    cond = boolean_true_node;
1301 
1302 	  add_to_predicate_list (loop, bb_n, cond);
1303 	}
1304     }
1305 
1306   /* The loop header is always executed.  */
1307   reset_bb_predicate (loop->header);
1308   gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1309 	      && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1310 }
1311 
1312 /* Build region by adding loop pre-header and post-header blocks.  */
1313 
1314 static vec<basic_block>
1315 build_region (struct loop *loop)
1316 {
1317   vec<basic_block> region = vNULL;
1318   basic_block exit_bb = NULL;
1319 
1320   gcc_assert (ifc_bbs);
1321   /* The first element is loop pre-header.  */
1322   region.safe_push (loop_preheader_edge (loop)->src);
1323 
1324   for (unsigned int i = 0; i < loop->num_nodes; i++)
1325     {
1326       basic_block bb = ifc_bbs[i];
1327       region.safe_push (bb);
1328       /* Find loop postheader.  */
1329       edge e;
1330       edge_iterator ei;
1331       FOR_EACH_EDGE (e, ei, bb->succs)
1332 	if (loop_exit_edge_p (loop, e))
1333 	  {
1334 	      exit_bb = e->dest;
1335 	      break;
1336 	  }
1337     }
1338   /* The last element is loop post-header.  */
1339   gcc_assert (exit_bb);
1340   region.safe_push (exit_bb);
1341   return region;
1342 }
1343 
1344 /* Return true when LOOP is if-convertible.  This is a helper function
1345    for if_convertible_loop_p.  REFS and DDRS are initialized and freed
1346    in if_convertible_loop_p.  */
1347 
1348 static bool
1349 if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1350 {
1351   unsigned int i;
1352   basic_block exit_bb = NULL;
1353   vec<basic_block> region;
1354 
1355   if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1356     return false;
1357 
1358   calculate_dominance_info (CDI_DOMINATORS);
1359 
1360   /* Allow statements that can be handled during if-conversion.  */
1361   ifc_bbs = get_loop_body_in_if_conv_order (loop);
1362   if (!ifc_bbs)
1363     {
1364       if (dump_file && (dump_flags & TDF_DETAILS))
1365 	fprintf (dump_file, "Irreducible loop\n");
1366       return false;
1367     }
1368 
1369   for (i = 0; i < loop->num_nodes; i++)
1370     {
1371       basic_block bb = ifc_bbs[i];
1372 
1373       if (!if_convertible_bb_p (loop, bb, exit_bb))
1374 	return false;
1375 
1376       if (bb_with_exit_edge_p (loop, bb))
1377 	exit_bb = bb;
1378     }
1379 
1380   for (i = 0; i < loop->num_nodes; i++)
1381     {
1382       basic_block bb = ifc_bbs[i];
1383       gimple_stmt_iterator gsi;
1384 
1385       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1386 	switch (gimple_code (gsi_stmt (gsi)))
1387 	  {
1388 	  case GIMPLE_LABEL:
1389 	  case GIMPLE_ASSIGN:
1390 	  case GIMPLE_CALL:
1391 	  case GIMPLE_DEBUG:
1392 	  case GIMPLE_COND:
1393 	    gimple_set_uid (gsi_stmt (gsi), 0);
1394 	    break;
1395 	  default:
1396 	    return false;
1397 	  }
1398     }
1399 
1400   data_reference_p dr;
1401 
1402   innermost_DR_map
1403 	  = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1404   baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1405 
1406   /* Compute post-dominator tree locally.  */
1407   region = build_region (loop);
1408   calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1409 
1410   predicate_bbs (loop);
1411 
1412   /* Free post-dominator tree since it is not used after predication.  */
1413   free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1414   region.release ();
1415 
1416   for (i = 0; refs->iterate (i, &dr); i++)
1417     {
1418       tree ref = DR_REF (dr);
1419 
1420       dr->aux = XNEW (struct ifc_dr);
1421       DR_BASE_W_UNCONDITIONALLY (dr) = false;
1422       DR_RW_UNCONDITIONALLY (dr) = false;
1423       DR_W_UNCONDITIONALLY (dr) = false;
1424       IFC_DR (dr)->rw_predicate = boolean_false_node;
1425       IFC_DR (dr)->w_predicate = boolean_false_node;
1426       IFC_DR (dr)->base_w_predicate = boolean_false_node;
1427       if (gimple_uid (DR_STMT (dr)) == 0)
1428 	gimple_set_uid (DR_STMT (dr), i + 1);
1429 
1430       /* If DR doesn't have innermost loop behavior or it's a compound
1431          memory reference, we synthesize its innermost loop behavior
1432          for hashing.  */
1433       if (TREE_CODE (ref) == COMPONENT_REF
1434           || TREE_CODE (ref) == IMAGPART_EXPR
1435           || TREE_CODE (ref) == REALPART_EXPR
1436           || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1437 	       || DR_INIT (dr) || DR_STEP (dr)))
1438         {
1439           while (TREE_CODE (ref) == COMPONENT_REF
1440 	         || TREE_CODE (ref) == IMAGPART_EXPR
1441 	         || TREE_CODE (ref) == REALPART_EXPR)
1442 	    ref = TREE_OPERAND (ref, 0);
1443 
1444           DR_BASE_ADDRESS (dr) = ref;
1445           DR_OFFSET (dr) = NULL;
1446           DR_INIT (dr) = NULL;
1447           DR_STEP (dr) = NULL;
1448           DR_ALIGNED_TO (dr) = NULL;
1449         }
1450       hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1451     }
1452 
1453   for (i = 0; i < loop->num_nodes; i++)
1454     {
1455       basic_block bb = ifc_bbs[i];
1456       gimple_stmt_iterator itr;
1457 
1458       /* Check the if-convertibility of statements in predicated BBs.  */
1459       if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1460 	for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1461 	  if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1462 	    return false;
1463     }
1464 
1465   /* Checking PHIs needs to be done after stmts, as the fact whether there
1466      are any masked loads or stores affects the tests.  */
1467   for (i = 0; i < loop->num_nodes; i++)
1468     {
1469       basic_block bb = ifc_bbs[i];
1470       gphi_iterator itr;
1471 
1472       for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1473 	if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1474 	  return false;
1475     }
1476 
1477   if (dump_file)
1478     fprintf (dump_file, "Applying if-conversion\n");
1479 
1480   return true;
1481 }
1482 
1483 /* Return true when LOOP is if-convertible.
1484    LOOP is if-convertible if:
1485    - it is innermost,
1486    - it has two or more basic blocks,
1487    - it has only one exit,
1488    - loop header is not the exit edge,
1489    - if its basic blocks and phi nodes are if convertible.  */
1490 
1491 static bool
1492 if_convertible_loop_p (struct loop *loop)
1493 {
1494   edge e;
1495   edge_iterator ei;
1496   bool res = false;
1497   vec<data_reference_p> refs;
1498 
1499   /* Handle only innermost loop.  */
1500   if (!loop || loop->inner)
1501     {
1502       if (dump_file && (dump_flags & TDF_DETAILS))
1503 	fprintf (dump_file, "not innermost loop\n");
1504       return false;
1505     }
1506 
1507   /* If only one block, no need for if-conversion.  */
1508   if (loop->num_nodes <= 2)
1509     {
1510       if (dump_file && (dump_flags & TDF_DETAILS))
1511 	fprintf (dump_file, "less than 2 basic blocks\n");
1512       return false;
1513     }
1514 
1515   /* More than one loop exit is too much to handle.  */
1516   if (!single_exit (loop))
1517     {
1518       if (dump_file && (dump_flags & TDF_DETAILS))
1519 	fprintf (dump_file, "multiple exits\n");
1520       return false;
1521     }
1522 
1523   /* If one of the loop header's edge is an exit edge then do not
1524      apply if-conversion.  */
1525   FOR_EACH_EDGE (e, ei, loop->header->succs)
1526     if (loop_exit_edge_p (loop, e))
1527       return false;
1528 
1529   refs.create (5);
1530   res = if_convertible_loop_p_1 (loop, &refs);
1531 
1532   data_reference_p dr;
1533   unsigned int i;
1534   for (i = 0; refs.iterate (i, &dr); i++)
1535     free (dr->aux);
1536 
1537   free_data_refs (refs);
1538 
1539   delete innermost_DR_map;
1540   innermost_DR_map = NULL;
1541 
1542   delete baseref_DR_map;
1543   baseref_DR_map = NULL;
1544 
1545   return res;
1546 }
1547 
1548 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1549    which is in predicated basic block.
1550    In fact, the following PHI pattern is searching:
1551       loop-header:
1552 	reduc_1 = PHI <..., reduc_2>
1553       ...
1554 	if (...)
1555 	  reduc_3 = ...
1556 	reduc_2 = PHI <reduc_1, reduc_3>
1557 
1558    ARG_0 and ARG_1 are correspondent PHI arguments.
1559    REDUC, OP0 and OP1 contain reduction stmt and its operands.
1560    EXTENDED is true if PHI has > 2 arguments.  */
1561 
1562 static bool
1563 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1564 			  tree *op0, tree *op1, bool extended)
1565 {
1566   tree lhs, r_op1, r_op2;
1567   gimple *stmt;
1568   gimple *header_phi = NULL;
1569   enum tree_code reduction_op;
1570   basic_block bb = gimple_bb (phi);
1571   struct loop *loop = bb->loop_father;
1572   edge latch_e = loop_latch_edge (loop);
1573   imm_use_iterator imm_iter;
1574   use_operand_p use_p;
1575   edge e;
1576   edge_iterator ei;
1577   bool result = false;
1578   if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1579     return false;
1580 
1581   if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1582     {
1583       lhs = arg_1;
1584       header_phi = SSA_NAME_DEF_STMT (arg_0);
1585       stmt = SSA_NAME_DEF_STMT (arg_1);
1586     }
1587   else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1588     {
1589       lhs = arg_0;
1590       header_phi = SSA_NAME_DEF_STMT (arg_1);
1591       stmt = SSA_NAME_DEF_STMT (arg_0);
1592     }
1593   else
1594     return false;
1595   if (gimple_bb (header_phi) != loop->header)
1596     return false;
1597 
1598   if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1599     return false;
1600 
1601   if (gimple_code (stmt) != GIMPLE_ASSIGN
1602       || gimple_has_volatile_ops (stmt))
1603     return false;
1604 
1605   if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1606     return false;
1607 
1608   if (!is_predicated (gimple_bb (stmt)))
1609     return false;
1610 
1611   /* Check that stmt-block is predecessor of phi-block.  */
1612   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1613     if (e->dest == bb)
1614       {
1615 	result = true;
1616 	break;
1617       }
1618   if (!result)
1619     return false;
1620 
1621   if (!has_single_use (lhs))
1622     return false;
1623 
1624   reduction_op = gimple_assign_rhs_code (stmt);
1625   if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1626     return false;
1627   r_op1 = gimple_assign_rhs1 (stmt);
1628   r_op2 = gimple_assign_rhs2 (stmt);
1629 
1630   /* Make R_OP1 to hold reduction variable.  */
1631   if (r_op2 == PHI_RESULT (header_phi)
1632       && reduction_op == PLUS_EXPR)
1633     std::swap (r_op1, r_op2);
1634   else if (r_op1 != PHI_RESULT (header_phi))
1635     return false;
1636 
1637   /* Check that R_OP1 is used in reduction stmt or in PHI only.  */
1638   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1639     {
1640       gimple *use_stmt = USE_STMT (use_p);
1641       if (is_gimple_debug (use_stmt))
1642 	continue;
1643       if (use_stmt == stmt)
1644 	continue;
1645       if (gimple_code (use_stmt) != GIMPLE_PHI)
1646 	return false;
1647     }
1648 
1649   *op0 = r_op1; *op1 = r_op2;
1650   *reduc = stmt;
1651   return true;
1652 }
1653 
1654 /* Converts conditional scalar reduction into unconditional form, e.g.
1655      bb_4
1656        if (_5 != 0) goto bb_5 else goto bb_6
1657      end_bb_4
1658      bb_5
1659        res_6 = res_13 + 1;
1660      end_bb_5
1661      bb_6
1662        # res_2 = PHI <res_13(4), res_6(5)>
1663      end_bb_6
1664 
1665    will be converted into sequence
1666     _ifc__1 = _5 != 0 ? 1 : 0;
1667     res_2 = res_13 + _ifc__1;
1668   Argument SWAP tells that arguments of conditional expression should be
1669   swapped.
1670   Returns rhs of resulting PHI assignment.  */
1671 
1672 static tree
1673 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1674 			       tree cond, tree op0, tree op1, bool swap)
1675 {
1676   gimple_stmt_iterator stmt_it;
1677   gimple *new_assign;
1678   tree rhs;
1679   tree rhs1 = gimple_assign_rhs1 (reduc);
1680   tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1681   tree c;
1682   tree zero = build_zero_cst (TREE_TYPE (rhs1));
1683 
1684   if (dump_file && (dump_flags & TDF_DETAILS))
1685     {
1686       fprintf (dump_file, "Found cond scalar reduction.\n");
1687       print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1688     }
1689 
1690   /* Build cond expression using COND and constant operand
1691      of reduction rhs.  */
1692   c = fold_build_cond_expr (TREE_TYPE (rhs1),
1693 			    unshare_expr (cond),
1694 			    swap ? zero : op1,
1695 			    swap ? op1 : zero);
1696 
1697   /* Create assignment stmt and insert it at GSI.  */
1698   new_assign = gimple_build_assign (tmp, c);
1699   gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1700   /* Build rhs for unconditional increment/decrement.  */
1701   rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1702 		     TREE_TYPE (rhs1), op0, tmp);
1703 
1704   /* Delete original reduction stmt.  */
1705   stmt_it = gsi_for_stmt (reduc);
1706   gsi_remove (&stmt_it, true);
1707   release_defs (reduc);
1708   return rhs;
1709 }
1710 
1711 /* Produce condition for all occurrences of ARG in PHI node.  */
1712 
1713 static tree
1714 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1715 		       gimple_stmt_iterator *gsi)
1716 {
1717   int len;
1718   int i;
1719   tree cond = NULL_TREE;
1720   tree c;
1721   edge e;
1722 
1723   len = occur->length ();
1724   gcc_assert (len > 0);
1725   for (i = 0; i < len; i++)
1726     {
1727       e = gimple_phi_arg_edge (phi, (*occur)[i]);
1728       c = bb_predicate (e->src);
1729       if (is_true_predicate (c))
1730 	{
1731 	  cond = c;
1732 	  break;
1733 	}
1734       c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1735 				      is_gimple_condexpr, NULL_TREE,
1736 				      true, GSI_SAME_STMT);
1737       if (cond != NULL_TREE)
1738 	{
1739 	  /* Must build OR expression.  */
1740 	  cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1741 	  cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1742 					     is_gimple_condexpr, NULL_TREE,
1743 					     true, GSI_SAME_STMT);
1744 	}
1745       else
1746 	cond = c;
1747     }
1748   gcc_assert (cond != NULL_TREE);
1749   return cond;
1750 }
1751 
1752 /* Local valueization callback that follows all-use SSA edges.  */
1753 
1754 static tree
1755 ifcvt_follow_ssa_use_edges (tree val)
1756 {
1757   return val;
1758 }
1759 
1760 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1761    This routine can handle PHI nodes with more than two arguments.
1762 
1763    For example,
1764      S1: A = PHI <x1(1), x2(5)>
1765    is converted into,
1766      S2: A = cond ? x1 : x2;
1767 
1768    The generated code is inserted at GSI that points to the top of
1769    basic block's statement list.
1770    If PHI node has more than two arguments a chain of conditional
1771    expression is produced.  */
1772 
1773 
1774 static void
1775 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1776 {
1777   gimple *new_stmt = NULL, *reduc;
1778   tree rhs, res, arg0, arg1, op0, op1, scev;
1779   tree cond;
1780   unsigned int index0;
1781   unsigned int max, args_len;
1782   edge e;
1783   basic_block bb;
1784   unsigned int i;
1785 
1786   res = gimple_phi_result (phi);
1787   if (virtual_operand_p (res))
1788     return;
1789 
1790   if ((rhs = degenerate_phi_result (phi))
1791       || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1792 					    res))
1793 	  && !chrec_contains_undetermined (scev)
1794 	  && scev != res
1795 	  && (rhs = gimple_phi_arg_def (phi, 0))))
1796     {
1797       if (dump_file && (dump_flags & TDF_DETAILS))
1798 	{
1799 	  fprintf (dump_file, "Degenerate phi!\n");
1800 	  print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1801 	}
1802       new_stmt = gimple_build_assign (res, rhs);
1803       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1804       update_stmt (new_stmt);
1805       return;
1806     }
1807 
1808   bb = gimple_bb (phi);
1809   if (EDGE_COUNT (bb->preds) == 2)
1810     {
1811       /* Predicate ordinary PHI node with 2 arguments.  */
1812       edge first_edge, second_edge;
1813       basic_block true_bb;
1814       first_edge = EDGE_PRED (bb, 0);
1815       second_edge = EDGE_PRED (bb, 1);
1816       cond = bb_predicate (first_edge->src);
1817       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1818 	std::swap (first_edge, second_edge);
1819       if (EDGE_COUNT (first_edge->src->succs) > 1)
1820 	{
1821 	  cond = bb_predicate (second_edge->src);
1822 	  if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1823 	    cond = TREE_OPERAND (cond, 0);
1824 	  else
1825 	    first_edge = second_edge;
1826 	}
1827       else
1828 	cond = bb_predicate (first_edge->src);
1829       /* Gimplify the condition to a valid cond-expr conditonal operand.  */
1830       cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1831 					 is_gimple_condexpr, NULL_TREE,
1832 					 true, GSI_SAME_STMT);
1833       true_bb = first_edge->src;
1834       if (EDGE_PRED (bb, 1)->src == true_bb)
1835 	{
1836 	  arg0 = gimple_phi_arg_def (phi, 1);
1837 	  arg1 = gimple_phi_arg_def (phi, 0);
1838 	}
1839       else
1840 	{
1841 	  arg0 = gimple_phi_arg_def (phi, 0);
1842 	  arg1 = gimple_phi_arg_def (phi, 1);
1843 	}
1844       if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1845 				    &op0, &op1, false))
1846 	/* Convert reduction stmt into vectorizable form.  */
1847 	rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1848 					     true_bb != gimple_bb (reduc));
1849       else
1850 	/* Build new RHS using selected condition and arguments.  */
1851 	rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1852 				    arg0, arg1);
1853       new_stmt = gimple_build_assign (res, rhs);
1854       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1855       gimple_stmt_iterator new_gsi = gsi_for_stmt (new_stmt);
1856       fold_stmt (&new_gsi, ifcvt_follow_ssa_use_edges);
1857       update_stmt (new_stmt);
1858 
1859       if (dump_file && (dump_flags & TDF_DETAILS))
1860 	{
1861 	  fprintf (dump_file, "new phi replacement stmt\n");
1862 	  print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1863 	}
1864       return;
1865     }
1866 
1867   /* Create hashmap for PHI node which contain vector of argument indexes
1868      having the same value.  */
1869   bool swap = false;
1870   hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1871   unsigned int num_args = gimple_phi_num_args (phi);
1872   int max_ind = -1;
1873   /* Vector of different PHI argument values.  */
1874   auto_vec<tree> args (num_args);
1875 
1876   /* Compute phi_arg_map.  */
1877   for (i = 0; i < num_args; i++)
1878     {
1879       tree arg;
1880 
1881       arg = gimple_phi_arg_def (phi, i);
1882       if (!phi_arg_map.get (arg))
1883 	args.quick_push (arg);
1884       phi_arg_map.get_or_insert (arg).safe_push (i);
1885     }
1886 
1887   /* Determine element with max number of occurrences.  */
1888   max_ind = -1;
1889   max = 1;
1890   args_len = args.length ();
1891   for (i = 0; i < args_len; i++)
1892     {
1893       unsigned int len;
1894       if ((len = phi_arg_map.get (args[i])->length ()) > max)
1895 	{
1896 	  max_ind = (int) i;
1897 	  max = len;
1898 	}
1899     }
1900 
1901   /* Put element with max number of occurences to the end of ARGS.  */
1902   if (max_ind != -1 && max_ind +1 != (int) args_len)
1903     std::swap (args[args_len - 1], args[max_ind]);
1904 
1905   /* Handle one special case when number of arguments with different values
1906      is equal 2 and one argument has the only occurrence.  Such PHI can be
1907      handled as if would have only 2 arguments.  */
1908   if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1909     {
1910       vec<int> *indexes;
1911       indexes = phi_arg_map.get (args[0]);
1912       index0 = (*indexes)[0];
1913       arg0 = args[0];
1914       arg1 = args[1];
1915       e = gimple_phi_arg_edge (phi, index0);
1916       cond = bb_predicate (e->src);
1917       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1918 	{
1919 	  swap = true;
1920 	  cond = TREE_OPERAND (cond, 0);
1921 	}
1922       /* Gimplify the condition to a valid cond-expr conditonal operand.  */
1923       cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1924 					 is_gimple_condexpr, NULL_TREE,
1925 					 true, GSI_SAME_STMT);
1926       if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1927 				      &op0, &op1, true)))
1928 	rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1929 				    swap? arg1 : arg0,
1930 				    swap? arg0 : arg1);
1931       else
1932 	/* Convert reduction stmt into vectorizable form.  */
1933 	rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1934 					     swap);
1935       new_stmt = gimple_build_assign (res, rhs);
1936       gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1937       update_stmt (new_stmt);
1938     }
1939   else
1940     {
1941       /* Common case.  */
1942       vec<int> *indexes;
1943       tree type = TREE_TYPE (gimple_phi_result (phi));
1944       tree lhs;
1945       arg1 = args[1];
1946       for (i = 0; i < args_len; i++)
1947 	{
1948 	  arg0 = args[i];
1949 	  indexes = phi_arg_map.get (args[i]);
1950 	  if (i != args_len - 1)
1951 	    lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1952 	  else
1953 	    lhs = res;
1954 	  cond = gen_phi_arg_condition (phi, indexes, gsi);
1955 	  rhs = fold_build_cond_expr (type, unshare_expr (cond),
1956 				      arg0, arg1);
1957 	  new_stmt = gimple_build_assign (lhs, rhs);
1958 	  gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1959 	  update_stmt (new_stmt);
1960 	  arg1 = lhs;
1961 	}
1962     }
1963 
1964   if (dump_file && (dump_flags & TDF_DETAILS))
1965     {
1966       fprintf (dump_file, "new extended phi replacement stmt\n");
1967       print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1968     }
1969 }
1970 
1971 /* Replaces in LOOP all the scalar phi nodes other than those in the
1972    LOOP->header block with conditional modify expressions.  */
1973 
1974 static void
1975 predicate_all_scalar_phis (struct loop *loop)
1976 {
1977   basic_block bb;
1978   unsigned int orig_loop_num_nodes = loop->num_nodes;
1979   unsigned int i;
1980 
1981   for (i = 1; i < orig_loop_num_nodes; i++)
1982     {
1983       gphi *phi;
1984       gimple_stmt_iterator gsi;
1985       gphi_iterator phi_gsi;
1986       bb = ifc_bbs[i];
1987 
1988       if (bb == loop->header)
1989 	continue;
1990 
1991       phi_gsi = gsi_start_phis (bb);
1992       if (gsi_end_p (phi_gsi))
1993 	continue;
1994 
1995       gsi = gsi_after_labels (bb);
1996       while (!gsi_end_p (phi_gsi))
1997 	{
1998 	  phi = phi_gsi.phi ();
1999 	  if (virtual_operand_p (gimple_phi_result (phi)))
2000 	    gsi_next (&phi_gsi);
2001 	  else
2002 	    {
2003 	      predicate_scalar_phi (phi, &gsi);
2004 	      remove_phi_node (&phi_gsi, false);
2005 	    }
2006 	}
2007     }
2008 }
2009 
2010 /* Insert in each basic block of LOOP the statements produced by the
2011    gimplification of the predicates.  */
2012 
2013 static void
2014 insert_gimplified_predicates (loop_p loop)
2015 {
2016   unsigned int i;
2017 
2018   for (i = 0; i < loop->num_nodes; i++)
2019     {
2020       basic_block bb = ifc_bbs[i];
2021       gimple_seq stmts;
2022       if (!is_predicated (bb))
2023 	gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2024       if (!is_predicated (bb))
2025 	{
2026 	  /* Do not insert statements for a basic block that is not
2027 	     predicated.  Also make sure that the predicate of the
2028 	     basic block is set to true.  */
2029 	  reset_bb_predicate (bb);
2030 	  continue;
2031 	}
2032 
2033       stmts = bb_predicate_gimplified_stmts (bb);
2034       if (stmts)
2035 	{
2036 	  if (any_pred_load_store)
2037 	    {
2038 	      /* Insert the predicate of the BB just after the label,
2039 		 as the if-conversion of memory writes will use this
2040 		 predicate.  */
2041 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
2042 	      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2043 	    }
2044 	  else
2045 	    {
2046 	      /* Insert the predicate of the BB at the end of the BB
2047 		 as this would reduce the register pressure: the only
2048 		 use of this predicate will be in successor BBs.  */
2049 	      gimple_stmt_iterator gsi = gsi_last_bb (bb);
2050 
2051 	      if (gsi_end_p (gsi)
2052 		  || stmt_ends_bb_p (gsi_stmt (gsi)))
2053 		gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2054 	      else
2055 		gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2056 	    }
2057 
2058 	  /* Once the sequence is code generated, set it to NULL.  */
2059 	  set_bb_predicate_gimplified_stmts (bb, NULL);
2060 	}
2061     }
2062 }
2063 
2064 /* Helper function for predicate_mem_writes. Returns index of existent
2065    mask if it was created for given SIZE and -1 otherwise.  */
2066 
2067 static int
2068 mask_exists (int size, vec<int> vec)
2069 {
2070   unsigned int ix;
2071   int v;
2072   FOR_EACH_VEC_ELT (vec, ix, v)
2073     if (v == size)
2074       return (int) ix;
2075   return -1;
2076 }
2077 
2078 /* Predicate each write to memory in LOOP.
2079 
2080    This function transforms control flow constructs containing memory
2081    writes of the form:
2082 
2083    | for (i = 0; i < N; i++)
2084    |   if (cond)
2085    |     A[i] = expr;
2086 
2087    into the following form that does not contain control flow:
2088 
2089    | for (i = 0; i < N; i++)
2090    |   A[i] = cond ? expr : A[i];
2091 
2092    The original CFG looks like this:
2093 
2094    | bb_0
2095    |   i = 0
2096    | end_bb_0
2097    |
2098    | bb_1
2099    |   if (i < N) goto bb_5 else goto bb_2
2100    | end_bb_1
2101    |
2102    | bb_2
2103    |   cond = some_computation;
2104    |   if (cond) goto bb_3 else goto bb_4
2105    | end_bb_2
2106    |
2107    | bb_3
2108    |   A[i] = expr;
2109    |   goto bb_4
2110    | end_bb_3
2111    |
2112    | bb_4
2113    |   goto bb_1
2114    | end_bb_4
2115 
2116    insert_gimplified_predicates inserts the computation of the COND
2117    expression at the beginning of the destination basic block:
2118 
2119    | bb_0
2120    |   i = 0
2121    | end_bb_0
2122    |
2123    | bb_1
2124    |   if (i < N) goto bb_5 else goto bb_2
2125    | end_bb_1
2126    |
2127    | bb_2
2128    |   cond = some_computation;
2129    |   if (cond) goto bb_3 else goto bb_4
2130    | end_bb_2
2131    |
2132    | bb_3
2133    |   cond = some_computation;
2134    |   A[i] = expr;
2135    |   goto bb_4
2136    | end_bb_3
2137    |
2138    | bb_4
2139    |   goto bb_1
2140    | end_bb_4
2141 
2142    predicate_mem_writes is then predicating the memory write as follows:
2143 
2144    | bb_0
2145    |   i = 0
2146    | end_bb_0
2147    |
2148    | bb_1
2149    |   if (i < N) goto bb_5 else goto bb_2
2150    | end_bb_1
2151    |
2152    | bb_2
2153    |   if (cond) goto bb_3 else goto bb_4
2154    | end_bb_2
2155    |
2156    | bb_3
2157    |   cond = some_computation;
2158    |   A[i] = cond ? expr : A[i];
2159    |   goto bb_4
2160    | end_bb_3
2161    |
2162    | bb_4
2163    |   goto bb_1
2164    | end_bb_4
2165 
2166    and finally combine_blocks removes the basic block boundaries making
2167    the loop vectorizable:
2168 
2169    | bb_0
2170    |   i = 0
2171    |   if (i < N) goto bb_5 else goto bb_1
2172    | end_bb_0
2173    |
2174    | bb_1
2175    |   cond = some_computation;
2176    |   A[i] = cond ? expr : A[i];
2177    |   if (i < N) goto bb_5 else goto bb_4
2178    | end_bb_1
2179    |
2180    | bb_4
2181    |   goto bb_1
2182    | end_bb_4
2183 */
2184 
2185 static void
2186 predicate_mem_writes (loop_p loop)
2187 {
2188   unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2189   auto_vec<int, 1> vect_sizes;
2190   auto_vec<tree, 1> vect_masks;
2191 
2192   for (i = 1; i < orig_loop_num_nodes; i++)
2193     {
2194       gimple_stmt_iterator gsi;
2195       basic_block bb = ifc_bbs[i];
2196       tree cond = bb_predicate (bb);
2197       bool swap;
2198       gimple *stmt;
2199       int index;
2200 
2201       if (is_true_predicate (cond))
2202 	continue;
2203 
2204       swap = false;
2205       if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2206 	{
2207 	  swap = true;
2208 	  cond = TREE_OPERAND (cond, 0);
2209 	}
2210 
2211       vect_sizes.truncate (0);
2212       vect_masks.truncate (0);
2213 
2214       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
2215 	{
2216 	  if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2217 	    ;
2218 	  else if (is_false_predicate (cond)
2219 		   && gimple_vdef (stmt))
2220 	    {
2221 	      unlink_stmt_vdef (stmt);
2222 	      gsi_remove (&gsi, true);
2223 	      release_defs (stmt);
2224 	      continue;
2225 	    }
2226 	  else if (gimple_plf (stmt, GF_PLF_2))
2227 	    {
2228 	      tree lhs = gimple_assign_lhs (stmt);
2229 	      tree rhs = gimple_assign_rhs1 (stmt);
2230 	      tree ref, addr, ptr, mask;
2231 	      gimple *new_stmt;
2232 	      gimple_seq stmts = NULL;
2233 	      int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2234 	      ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2235 	      mark_addressable (ref);
2236 	      addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2237 					       true, NULL_TREE, true,
2238 					       GSI_SAME_STMT);
2239 	      if (!vect_sizes.is_empty ()
2240 		  && (index = mask_exists (bitsize, vect_sizes)) != -1)
2241 		/* Use created mask.  */
2242 		mask = vect_masks[index];
2243 	      else
2244 		{
2245 		  if (COMPARISON_CLASS_P (cond))
2246 		    mask = gimple_build (&stmts, TREE_CODE (cond),
2247 					 boolean_type_node,
2248 					 TREE_OPERAND (cond, 0),
2249 					 TREE_OPERAND (cond, 1));
2250 		  else
2251 		    mask = cond;
2252 
2253 		  if (swap)
2254 		    {
2255 		      tree true_val
2256 			= constant_boolean_node (true, TREE_TYPE (mask));
2257 		      mask = gimple_build (&stmts, BIT_XOR_EXPR,
2258 					   TREE_TYPE (mask), mask, true_val);
2259 		    }
2260 		  gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2261 
2262 		  mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2263 		  /* Save mask and its size for further use.  */
2264 		  vect_sizes.safe_push (bitsize);
2265 		  vect_masks.safe_push (mask);
2266 		}
2267 	      ptr = build_int_cst (reference_alias_ptr_type (ref),
2268 				   get_object_alignment (ref));
2269 	      /* Copy points-to info if possible.  */
2270 	      if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2271 		copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2272 			       ref);
2273 	      if (TREE_CODE (lhs) == SSA_NAME)
2274 		{
2275 		  new_stmt
2276 		    = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2277 						  ptr, mask);
2278 		  gimple_call_set_lhs (new_stmt, lhs);
2279 		  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2280 		}
2281 	      else
2282 		{
2283 		  new_stmt
2284 		    = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2285 						  mask, rhs);
2286 		  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2287 		  gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2288 		  SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2289 		}
2290 
2291 	      gsi_replace (&gsi, new_stmt, true);
2292 	    }
2293 	  else if (gimple_vdef (stmt))
2294 	    {
2295 	      tree lhs = gimple_assign_lhs (stmt);
2296 	      tree rhs = gimple_assign_rhs1 (stmt);
2297 	      tree type = TREE_TYPE (lhs);
2298 
2299 	      lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2300 	      rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2301 	      if (swap)
2302 		std::swap (lhs, rhs);
2303 	      cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2304 						 is_gimple_condexpr, NULL_TREE,
2305 						 true, GSI_SAME_STMT);
2306 	      rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2307 	      gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2308 	      update_stmt (stmt);
2309 	    }
2310 	  gsi_next (&gsi);
2311 	}
2312     }
2313 }
2314 
2315 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2316    other than the exit and latch of the LOOP.  Also resets the
2317    GIMPLE_DEBUG information.  */
2318 
2319 static void
2320 remove_conditions_and_labels (loop_p loop)
2321 {
2322   gimple_stmt_iterator gsi;
2323   unsigned int i;
2324 
2325   for (i = 0; i < loop->num_nodes; i++)
2326     {
2327       basic_block bb = ifc_bbs[i];
2328 
2329       if (bb_with_exit_edge_p (loop, bb)
2330         || bb == loop->latch)
2331       continue;
2332 
2333       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2334 	switch (gimple_code (gsi_stmt (gsi)))
2335 	  {
2336 	  case GIMPLE_COND:
2337 	  case GIMPLE_LABEL:
2338 	    gsi_remove (&gsi, true);
2339 	    break;
2340 
2341 	  case GIMPLE_DEBUG:
2342 	    /* ??? Should there be conditional GIMPLE_DEBUG_BINDs?  */
2343 	    if (gimple_debug_bind_p (gsi_stmt (gsi)))
2344 	      {
2345 		gimple_debug_bind_reset_value (gsi_stmt (gsi));
2346 		update_stmt (gsi_stmt (gsi));
2347 	      }
2348 	    gsi_next (&gsi);
2349 	    break;
2350 
2351 	  default:
2352 	    gsi_next (&gsi);
2353 	  }
2354     }
2355 }
2356 
2357 /* Combine all the basic blocks from LOOP into one or two super basic
2358    blocks.  Replace PHI nodes with conditional modify expressions.  */
2359 
2360 static void
2361 combine_blocks (struct loop *loop)
2362 {
2363   basic_block bb, exit_bb, merge_target_bb;
2364   unsigned int orig_loop_num_nodes = loop->num_nodes;
2365   unsigned int i;
2366   edge e;
2367   edge_iterator ei;
2368 
2369   remove_conditions_and_labels (loop);
2370   insert_gimplified_predicates (loop);
2371   predicate_all_scalar_phis (loop);
2372 
2373   if (any_pred_load_store)
2374     predicate_mem_writes (loop);
2375 
2376   /* Merge basic blocks: first remove all the edges in the loop,
2377      except for those from the exit block.  */
2378   exit_bb = NULL;
2379   bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2380   for (i = 0; i < orig_loop_num_nodes; i++)
2381     {
2382       bb = ifc_bbs[i];
2383       predicated[i] = !is_true_predicate (bb_predicate (bb));
2384       free_bb_predicate (bb);
2385       if (bb_with_exit_edge_p (loop, bb))
2386 	{
2387 	  gcc_assert (exit_bb == NULL);
2388 	  exit_bb = bb;
2389 	}
2390     }
2391   gcc_assert (exit_bb != loop->latch);
2392 
2393   for (i = 1; i < orig_loop_num_nodes; i++)
2394     {
2395       bb = ifc_bbs[i];
2396 
2397       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2398 	{
2399 	  if (e->src == exit_bb)
2400 	    ei_next (&ei);
2401 	  else
2402 	    remove_edge (e);
2403 	}
2404     }
2405 
2406   if (exit_bb != NULL)
2407     {
2408       if (exit_bb != loop->header)
2409 	{
2410 	  /* Connect this node to loop header.  */
2411 	  make_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2412 	  set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2413 	}
2414 
2415       /* Redirect non-exit edges to loop->latch.  */
2416       FOR_EACH_EDGE (e, ei, exit_bb->succs)
2417 	{
2418 	  if (!loop_exit_edge_p (loop, e))
2419 	    redirect_edge_and_branch (e, loop->latch);
2420 	}
2421       set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2422     }
2423   else
2424     {
2425       /* If the loop does not have an exit, reconnect header and latch.  */
2426       make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2427       set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2428     }
2429 
2430   merge_target_bb = loop->header;
2431 
2432   /* Get at the virtual def valid for uses starting at the first block
2433      we merge into the header.  Without a virtual PHI the loop has the
2434      same virtual use on all stmts.  */
2435   gphi *vphi = get_virtual_phi (loop->header);
2436   tree last_vdef = NULL_TREE;
2437   if (vphi)
2438     {
2439       last_vdef = gimple_phi_result (vphi);
2440       for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2441 	   ! gsi_end_p (gsi); gsi_next (&gsi))
2442 	if (gimple_vdef (gsi_stmt (gsi)))
2443 	  last_vdef = gimple_vdef (gsi_stmt (gsi));
2444     }
2445   for (i = 1; i < orig_loop_num_nodes; i++)
2446     {
2447       gimple_stmt_iterator gsi;
2448       gimple_stmt_iterator last;
2449 
2450       bb = ifc_bbs[i];
2451 
2452       if (bb == exit_bb || bb == loop->latch)
2453 	continue;
2454 
2455       /* We release virtual PHIs late because we have to propagate them
2456          out using the current VUSE.  The def might be the one used
2457 	 after the loop.  */
2458       vphi = get_virtual_phi (bb);
2459       if (vphi)
2460 	{
2461 	  imm_use_iterator iter;
2462 	  use_operand_p use_p;
2463 	  gimple *use_stmt;
2464 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2465 	    {
2466 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2467 		SET_USE (use_p, last_vdef);
2468 	    }
2469 	  gsi = gsi_for_stmt (vphi);
2470 	  remove_phi_node (&gsi, true);
2471 	}
2472 
2473       /* Make stmts member of loop->header and clear range info from all stmts
2474 	 in BB which is now no longer executed conditional on a predicate we
2475 	 could have derived it from.  */
2476       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2477 	{
2478 	  gimple *stmt = gsi_stmt (gsi);
2479 	  gimple_set_bb (stmt, merge_target_bb);
2480 	  /* Update virtual operands.  */
2481 	  if (last_vdef)
2482 	    {
2483 	      use_operand_p use_p = ssa_vuse_operand (stmt);
2484 	      if (use_p
2485 		  && USE_FROM_PTR (use_p) != last_vdef)
2486 		SET_USE (use_p, last_vdef);
2487 	      if (gimple_vdef (stmt))
2488 		last_vdef = gimple_vdef (stmt);
2489 	    }
2490 	  if (predicated[i])
2491 	    {
2492 	      ssa_op_iter i;
2493 	      tree op;
2494 	      FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2495 		reset_flow_sensitive_info (op);
2496 	    }
2497 	}
2498 
2499       /* Update stmt list.  */
2500       last = gsi_last_bb (merge_target_bb);
2501       gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2502       set_bb_seq (bb, NULL);
2503 
2504       delete_basic_block (bb);
2505     }
2506 
2507   /* If possible, merge loop header to the block with the exit edge.
2508      This reduces the number of basic blocks to two, to please the
2509      vectorizer that handles only loops with two nodes.  */
2510   if (exit_bb
2511       && exit_bb != loop->header)
2512     {
2513       /* We release virtual PHIs late because we have to propagate them
2514          out using the current VUSE.  The def might be the one used
2515 	 after the loop.  */
2516       vphi = get_virtual_phi (exit_bb);
2517       if (vphi)
2518 	{
2519 	  imm_use_iterator iter;
2520 	  use_operand_p use_p;
2521 	  gimple *use_stmt;
2522 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2523 	    {
2524 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2525 		SET_USE (use_p, last_vdef);
2526 	    }
2527 	  gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2528 	  remove_phi_node (&gsi, true);
2529 	}
2530 
2531       if (can_merge_blocks_p (loop->header, exit_bb))
2532 	merge_blocks (loop->header, exit_bb);
2533     }
2534 
2535   free (ifc_bbs);
2536   ifc_bbs = NULL;
2537   free (predicated);
2538 }
2539 
2540 /* Version LOOP before if-converting it; the original loop
2541    will be if-converted, the new copy of the loop will not,
2542    and the LOOP_VECTORIZED internal call will be guarding which
2543    loop to execute.  The vectorizer pass will fold this
2544    internal call into either true or false.
2545 
2546    Note that this function intentionally invalidates profile.  Both edges
2547    out of LOOP_VECTORIZED must have 100% probability so the profile remains
2548    consistent after the condition is folded in the vectorizer.  */
2549 
2550 static struct loop *
2551 version_loop_for_if_conversion (struct loop *loop)
2552 {
2553   basic_block cond_bb;
2554   tree cond = make_ssa_name (boolean_type_node);
2555   struct loop *new_loop;
2556   gimple *g;
2557   gimple_stmt_iterator gsi;
2558   unsigned int save_length;
2559 
2560   g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2561 				  build_int_cst (integer_type_node, loop->num),
2562 				  integer_zero_node);
2563   gimple_call_set_lhs (g, cond);
2564 
2565   /* Save BB->aux around loop_version as that uses the same field.  */
2566   save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2567   void **saved_preds = XALLOCAVEC (void *, save_length);
2568   for (unsigned i = 0; i < save_length; i++)
2569     saved_preds[i] = ifc_bbs[i]->aux;
2570 
2571   initialize_original_copy_tables ();
2572   /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2573      is re-merged in the vectorizer.  */
2574   new_loop = loop_version (loop, cond, &cond_bb,
2575 			   REG_BR_PROB_BASE, REG_BR_PROB_BASE,
2576 			   REG_BR_PROB_BASE, REG_BR_PROB_BASE, true);
2577   free_original_copy_tables ();
2578 
2579   for (unsigned i = 0; i < save_length; i++)
2580     ifc_bbs[i]->aux = saved_preds[i];
2581 
2582   if (new_loop == NULL)
2583     return NULL;
2584 
2585   new_loop->dont_vectorize = true;
2586   new_loop->force_vectorize = false;
2587   gsi = gsi_last_bb (cond_bb);
2588   gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2589   gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2590   update_ssa (TODO_update_ssa);
2591   return new_loop;
2592 }
2593 
2594 /* Return true when LOOP satisfies the follow conditions that will
2595    allow it to be recognized by the vectorizer for outer-loop
2596    vectorization:
2597     - The loop is not the root node of the loop tree.
2598     - The loop has exactly one inner loop.
2599     - The loop has a single exit.
2600     - The loop header has a single successor, which is the inner
2601       loop header.
2602     - Each of the inner and outer loop latches have a single
2603       predecessor.
2604     - The loop exit block has a single predecessor, which is the
2605       inner loop's exit block.  */
2606 
2607 static bool
2608 versionable_outer_loop_p (struct loop *loop)
2609 {
2610   if (!loop_outer (loop)
2611       || loop->dont_vectorize
2612       || !loop->inner
2613       || loop->inner->next
2614       || !single_exit (loop)
2615       || !single_succ_p (loop->header)
2616       || single_succ (loop->header) != loop->inner->header
2617       || !single_pred_p (loop->latch)
2618       || !single_pred_p (loop->inner->latch))
2619     return false;
2620 
2621   basic_block outer_exit = single_pred (loop->latch);
2622   basic_block inner_exit = single_pred (loop->inner->latch);
2623 
2624   if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2625     return false;
2626 
2627   if (dump_file)
2628     fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2629 
2630   return true;
2631 }
2632 
2633 /* Performs splitting of critical edges.  Skip splitting and return false
2634    if LOOP will not be converted because:
2635 
2636      - LOOP is not well formed.
2637      - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2638 
2639    Last restriction is valid only if AGGRESSIVE_IF_CONV is false.  */
2640 
2641 static bool
2642 ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2643 {
2644   basic_block *body;
2645   basic_block bb;
2646   unsigned int num = loop->num_nodes;
2647   unsigned int i;
2648   gimple *stmt;
2649   edge e;
2650   edge_iterator ei;
2651   auto_vec<edge> critical_edges;
2652 
2653   /* Loop is not well formed.  */
2654   if (num <= 2 || loop->inner || !single_exit (loop))
2655     return false;
2656 
2657   body = get_loop_body (loop);
2658   for (i = 0; i < num; i++)
2659     {
2660       bb = body[i];
2661       if (!aggressive_if_conv
2662 	  && phi_nodes (bb)
2663 	  && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2664 	{
2665 	  if (dump_file && (dump_flags & TDF_DETAILS))
2666 	    fprintf (dump_file,
2667 		     "BB %d has complicated PHI with more than %u args.\n",
2668 		     bb->index, MAX_PHI_ARG_NUM);
2669 
2670 	  free (body);
2671 	  return false;
2672 	}
2673       if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2674 	continue;
2675 
2676       stmt = last_stmt (bb);
2677       /* Skip basic blocks not ending with conditional branch.  */
2678       if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2679 	continue;
2680 
2681       FOR_EACH_EDGE (e, ei, bb->succs)
2682 	if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2683 	  critical_edges.safe_push (e);
2684     }
2685   free (body);
2686 
2687   while (critical_edges.length () > 0)
2688     {
2689       e = critical_edges.pop ();
2690       /* Don't split if bb can be predicated along non-critical edge.  */
2691       if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2692 	split_edge (e);
2693     }
2694 
2695   return true;
2696 }
2697 
2698 /* Delete redundant statements produced by predication which prevents
2699    loop vectorization.  */
2700 
2701 static void
2702 ifcvt_local_dce (basic_block bb)
2703 {
2704   gimple *stmt;
2705   gimple *stmt1;
2706   gimple *phi;
2707   gimple_stmt_iterator gsi;
2708   auto_vec<gimple *> worklist;
2709   enum gimple_code code;
2710   use_operand_p use_p;
2711   imm_use_iterator imm_iter;
2712 
2713   worklist.create (64);
2714   /* Consider all phi as live statements.  */
2715   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2716     {
2717       phi = gsi_stmt (gsi);
2718       gimple_set_plf (phi, GF_PLF_2, true);
2719       worklist.safe_push (phi);
2720     }
2721   /* Consider load/store statements, CALL and COND as live.  */
2722   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2723     {
2724       stmt = gsi_stmt (gsi);
2725       if (gimple_store_p (stmt)
2726 	  || gimple_assign_load_p (stmt)
2727 	  || is_gimple_debug (stmt))
2728 	{
2729 	  gimple_set_plf (stmt, GF_PLF_2, true);
2730 	  worklist.safe_push (stmt);
2731 	  continue;
2732 	}
2733       code = gimple_code (stmt);
2734       if (code == GIMPLE_COND || code == GIMPLE_CALL)
2735 	{
2736 	  gimple_set_plf (stmt, GF_PLF_2, true);
2737 	  worklist.safe_push (stmt);
2738 	  continue;
2739 	}
2740       gimple_set_plf (stmt, GF_PLF_2, false);
2741 
2742       if (code == GIMPLE_ASSIGN)
2743 	{
2744 	  tree lhs = gimple_assign_lhs (stmt);
2745 	  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2746 	    {
2747 	      stmt1 = USE_STMT (use_p);
2748 	      if (gimple_bb (stmt1) != bb)
2749 		{
2750 		  gimple_set_plf (stmt, GF_PLF_2, true);
2751 		  worklist.safe_push (stmt);
2752 		  break;
2753 		}
2754 	    }
2755 	}
2756     }
2757   /* Propagate liveness through arguments of live stmt.  */
2758   while (worklist.length () > 0)
2759     {
2760       ssa_op_iter iter;
2761       use_operand_p use_p;
2762       tree use;
2763 
2764       stmt = worklist.pop ();
2765       FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2766 	{
2767 	  use = USE_FROM_PTR (use_p);
2768 	  if (TREE_CODE (use) != SSA_NAME)
2769 	    continue;
2770 	  stmt1 = SSA_NAME_DEF_STMT (use);
2771 	  if (gimple_bb (stmt1) != bb
2772 	      || gimple_plf (stmt1, GF_PLF_2))
2773 	    continue;
2774 	  gimple_set_plf (stmt1, GF_PLF_2, true);
2775 	  worklist.safe_push (stmt1);
2776 	}
2777     }
2778   /* Delete dead statements.  */
2779   gsi = gsi_start_bb (bb);
2780   while (!gsi_end_p (gsi))
2781     {
2782       stmt = gsi_stmt (gsi);
2783       if (gimple_plf (stmt, GF_PLF_2))
2784 	{
2785 	  gsi_next (&gsi);
2786 	  continue;
2787 	}
2788       if (dump_file && (dump_flags & TDF_DETAILS))
2789 	{
2790 	  fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2791 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2792 	}
2793       gsi_remove (&gsi, true);
2794       release_defs (stmt);
2795     }
2796 }
2797 
2798 /* If-convert LOOP when it is legal.  For the moment this pass has no
2799    profitability analysis.  Returns non-zero todo flags when something
2800    changed.  */
2801 
2802 unsigned int
2803 tree_if_conversion (struct loop *loop)
2804 {
2805   unsigned int todo = 0;
2806   bool aggressive_if_conv;
2807   struct loop *rloop;
2808 
2809  again:
2810   rloop = NULL;
2811   ifc_bbs = NULL;
2812   any_pred_load_store = false;
2813   any_complicated_phi = false;
2814 
2815   /* Apply more aggressive if-conversion when loop or its outer loop were
2816      marked with simd pragma.  When that's the case, we try to if-convert
2817      loop containing PHIs with more than MAX_PHI_ARG_NUM arguments.  */
2818   aggressive_if_conv = loop->force_vectorize;
2819   if (!aggressive_if_conv)
2820     {
2821       struct loop *outer_loop = loop_outer (loop);
2822       if (outer_loop && outer_loop->force_vectorize)
2823 	aggressive_if_conv = true;
2824     }
2825 
2826   if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2827     goto cleanup;
2828 
2829   if (!if_convertible_loop_p (loop)
2830       || !dbg_cnt (if_conversion_tree))
2831     goto cleanup;
2832 
2833   if ((any_pred_load_store || any_complicated_phi)
2834       && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2835 	  || loop->dont_vectorize))
2836     goto cleanup;
2837 
2838   /* Since we have no cost model, always version loops unless the user
2839      specified -ftree-loop-if-convert or unless versioning is required.
2840      Either version this loop, or if the pattern is right for outer-loop
2841      vectorization, version the outer loop.  In the latter case we will
2842      still if-convert the original inner loop.  */
2843   if (any_pred_load_store
2844       || any_complicated_phi
2845       || flag_tree_loop_if_convert != 1)
2846     {
2847       struct loop *vloop
2848 	= (versionable_outer_loop_p (loop_outer (loop))
2849 	   ? loop_outer (loop) : loop);
2850       struct loop *nloop = version_loop_for_if_conversion (vloop);
2851       if (nloop == NULL)
2852 	goto cleanup;
2853       if (vloop != loop)
2854 	{
2855 	  /* If versionable_outer_loop_p decided to version the
2856 	     outer loop, version also the inner loop of the non-vectorized
2857 	     loop copy.  So we transform:
2858 	      loop1
2859 		loop2
2860 	     into:
2861 	      if (LOOP_VECTORIZED (1, 3))
2862 		{
2863 		  loop1
2864 		    loop2
2865 		}
2866 	      else
2867 		loop3 (copy of loop1)
2868 		  if (LOOP_VECTORIZED (4, 5))
2869 		    loop4 (copy of loop2)
2870 		  else
2871 		    loop5 (copy of loop4)  */
2872 	  gcc_assert (nloop->inner && nloop->inner->next == NULL);
2873 	  rloop = nloop->inner;
2874 	}
2875     }
2876 
2877   /* Now all statements are if-convertible.  Combine all the basic
2878      blocks into one huge basic block doing the if-conversion
2879      on-the-fly.  */
2880   combine_blocks (loop);
2881 
2882   /* Delete dead predicate computations.  */
2883   ifcvt_local_dce (loop->header);
2884 
2885   todo |= TODO_cleanup_cfg;
2886 
2887  cleanup:
2888   if (ifc_bbs)
2889     {
2890       unsigned int i;
2891 
2892       for (i = 0; i < loop->num_nodes; i++)
2893 	free_bb_predicate (ifc_bbs[i]);
2894 
2895       free (ifc_bbs);
2896       ifc_bbs = NULL;
2897     }
2898   if (rloop != NULL)
2899     {
2900       loop = rloop;
2901       goto again;
2902     }
2903 
2904   return todo;
2905 }
2906 
2907 /* Tree if-conversion pass management.  */
2908 
2909 namespace {
2910 
2911 const pass_data pass_data_if_conversion =
2912 {
2913   GIMPLE_PASS, /* type */
2914   "ifcvt", /* name */
2915   OPTGROUP_NONE, /* optinfo_flags */
2916   TV_TREE_LOOP_IFCVT, /* tv_id */
2917   ( PROP_cfg | PROP_ssa ), /* properties_required */
2918   0, /* properties_provided */
2919   0, /* properties_destroyed */
2920   0, /* todo_flags_start */
2921   0, /* todo_flags_finish */
2922 };
2923 
2924 class pass_if_conversion : public gimple_opt_pass
2925 {
2926 public:
2927   pass_if_conversion (gcc::context *ctxt)
2928     : gimple_opt_pass (pass_data_if_conversion, ctxt)
2929   {}
2930 
2931   /* opt_pass methods: */
2932   virtual bool gate (function *);
2933   virtual unsigned int execute (function *);
2934 
2935 }; // class pass_if_conversion
2936 
2937 bool
2938 pass_if_conversion::gate (function *fun)
2939 {
2940   return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2941 	   && flag_tree_loop_if_convert != 0)
2942 	  || flag_tree_loop_if_convert == 1);
2943 }
2944 
2945 unsigned int
2946 pass_if_conversion::execute (function *fun)
2947 {
2948   struct loop *loop;
2949   unsigned todo = 0;
2950 
2951   if (number_of_loops (fun) <= 1)
2952     return 0;
2953 
2954   FOR_EACH_LOOP (loop, 0)
2955     if (flag_tree_loop_if_convert == 1
2956 	|| ((flag_tree_loop_vectorize || loop->force_vectorize)
2957 	    && !loop->dont_vectorize))
2958       todo |= tree_if_conversion (loop);
2959 
2960   if (flag_checking)
2961     {
2962       basic_block bb;
2963       FOR_EACH_BB_FN (bb, fun)
2964 	gcc_assert (!bb->aux);
2965     }
2966 
2967   return todo;
2968 }
2969 
2970 } // anon namespace
2971 
2972 gimple_opt_pass *
2973 make_pass_if_conversion (gcc::context *ctxt)
2974 {
2975   return new pass_if_conversion (ctxt);
2976 }
2977