xref: /dflybsd-src/contrib/gcc-8.0/gcc/tree-ssa-ifcombine.c (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj /* Combining of if-expressions on trees.
2*38fd1498Szrj    Copyright (C) 2007-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Contributed by Richard Guenther <rguenther@suse.de>
4*38fd1498Szrj 
5*38fd1498Szrj This file is part of GCC.
6*38fd1498Szrj 
7*38fd1498Szrj GCC is free software; you can redistribute it and/or modify
8*38fd1498Szrj it under the terms of the GNU General Public License as published by
9*38fd1498Szrj the Free Software Foundation; either version 3, or (at your option)
10*38fd1498Szrj any later version.
11*38fd1498Szrj 
12*38fd1498Szrj GCC is distributed in the hope that it will be useful,
13*38fd1498Szrj but WITHOUT ANY WARRANTY; without even the implied warranty of
14*38fd1498Szrj MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15*38fd1498Szrj GNU General Public License for more details.
16*38fd1498Szrj 
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
20*38fd1498Szrj 
21*38fd1498Szrj #include "config.h"
22*38fd1498Szrj #include "system.h"
23*38fd1498Szrj #include "coretypes.h"
24*38fd1498Szrj #include "backend.h"
25*38fd1498Szrj #include "rtl.h"
26*38fd1498Szrj #include "tree.h"
27*38fd1498Szrj #include "gimple.h"
28*38fd1498Szrj #include "cfghooks.h"
29*38fd1498Szrj #include "tree-pass.h"
30*38fd1498Szrj #include "memmodel.h"
31*38fd1498Szrj #include "tm_p.h"
32*38fd1498Szrj #include "ssa.h"
33*38fd1498Szrj #include "tree-pretty-print.h"
34*38fd1498Szrj /* rtl is needed only because arm back-end requires it for
35*38fd1498Szrj    BRANCH_COST.  */
36*38fd1498Szrj #include "fold-const.h"
37*38fd1498Szrj #include "cfganal.h"
38*38fd1498Szrj #include "gimple-fold.h"
39*38fd1498Szrj #include "gimple-iterator.h"
40*38fd1498Szrj #include "gimplify-me.h"
41*38fd1498Szrj #include "tree-cfg.h"
42*38fd1498Szrj #include "tree-ssa.h"
43*38fd1498Szrj 
44*38fd1498Szrj #ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
45*38fd1498Szrj #define LOGICAL_OP_NON_SHORT_CIRCUIT \
46*38fd1498Szrj   (BRANCH_COST (optimize_function_for_speed_p (cfun), \
47*38fd1498Szrj                 false) >= 2)
48*38fd1498Szrj #endif
49*38fd1498Szrj 
50*38fd1498Szrj /* This pass combines COND_EXPRs to simplify control flow.  It
51*38fd1498Szrj    currently recognizes bit tests and comparisons in chains that
52*38fd1498Szrj    represent logical and or logical or of two COND_EXPRs.
53*38fd1498Szrj 
54*38fd1498Szrj    It does so by walking basic blocks in a approximate reverse
55*38fd1498Szrj    post-dominator order and trying to match CFG patterns that
56*38fd1498Szrj    represent logical and or logical or of two COND_EXPRs.
57*38fd1498Szrj    Transformations are done if the COND_EXPR conditions match
58*38fd1498Szrj    either
59*38fd1498Szrj 
60*38fd1498Szrj      1. two single bit tests X & (1 << Yn) (for logical and)
61*38fd1498Szrj 
62*38fd1498Szrj      2. two bit tests X & Yn (for logical or)
63*38fd1498Szrj 
64*38fd1498Szrj      3. two comparisons X OPn Y (for logical or)
65*38fd1498Szrj 
66*38fd1498Szrj    To simplify this pass, removing basic blocks and dead code
67*38fd1498Szrj    is left to CFG cleanup and DCE.  */
68*38fd1498Szrj 
69*38fd1498Szrj 
70*38fd1498Szrj /* Recognize a if-then-else CFG pattern starting to match with the
71*38fd1498Szrj    COND_BB basic-block containing the COND_EXPR.  The recognized
72*38fd1498Szrj    then end else blocks are stored to *THEN_BB and *ELSE_BB.  If
73*38fd1498Szrj    *THEN_BB and/or *ELSE_BB are already set, they are required to
74*38fd1498Szrj    match the then and else basic-blocks to make the pattern match.
75*38fd1498Szrj    Returns true if the pattern matched, false otherwise.  */
76*38fd1498Szrj 
77*38fd1498Szrj static bool
78*38fd1498Szrj recognize_if_then_else (basic_block cond_bb,
79*38fd1498Szrj 			basic_block *then_bb, basic_block *else_bb)
80*38fd1498Szrj {
81*38fd1498Szrj   edge t, e;
82*38fd1498Szrj 
83*38fd1498Szrj   if (EDGE_COUNT (cond_bb->succs) != 2)
84*38fd1498Szrj     return false;
85*38fd1498Szrj 
86*38fd1498Szrj   /* Find the then/else edges.  */
87*38fd1498Szrj   t = EDGE_SUCC (cond_bb, 0);
88*38fd1498Szrj   e = EDGE_SUCC (cond_bb, 1);
89*38fd1498Szrj   if (!(t->flags & EDGE_TRUE_VALUE))
90*38fd1498Szrj     std::swap (t, e);
91*38fd1498Szrj   if (!(t->flags & EDGE_TRUE_VALUE)
92*38fd1498Szrj       || !(e->flags & EDGE_FALSE_VALUE))
93*38fd1498Szrj     return false;
94*38fd1498Szrj 
95*38fd1498Szrj   /* Check if the edge destinations point to the required block.  */
96*38fd1498Szrj   if (*then_bb
97*38fd1498Szrj       && t->dest != *then_bb)
98*38fd1498Szrj     return false;
99*38fd1498Szrj   if (*else_bb
100*38fd1498Szrj       && e->dest != *else_bb)
101*38fd1498Szrj     return false;
102*38fd1498Szrj 
103*38fd1498Szrj   if (!*then_bb)
104*38fd1498Szrj     *then_bb = t->dest;
105*38fd1498Szrj   if (!*else_bb)
106*38fd1498Szrj     *else_bb = e->dest;
107*38fd1498Szrj 
108*38fd1498Szrj   return true;
109*38fd1498Szrj }
110*38fd1498Szrj 
111*38fd1498Szrj /* Verify if the basic block BB does not have side-effects.  Return
112*38fd1498Szrj    true in this case, else false.  */
113*38fd1498Szrj 
114*38fd1498Szrj static bool
115*38fd1498Szrj bb_no_side_effects_p (basic_block bb)
116*38fd1498Szrj {
117*38fd1498Szrj   gimple_stmt_iterator gsi;
118*38fd1498Szrj 
119*38fd1498Szrj   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
120*38fd1498Szrj     {
121*38fd1498Szrj       gimple *stmt = gsi_stmt (gsi);
122*38fd1498Szrj 
123*38fd1498Szrj       if (is_gimple_debug (stmt))
124*38fd1498Szrj 	continue;
125*38fd1498Szrj 
126*38fd1498Szrj       if (gimple_has_side_effects (stmt)
127*38fd1498Szrj 	  || gimple_uses_undefined_value_p (stmt)
128*38fd1498Szrj 	  || gimple_could_trap_p (stmt)
129*38fd1498Szrj 	  || gimple_vuse (stmt)
130*38fd1498Szrj 	  /* const calls don't match any of the above, yet they could
131*38fd1498Szrj 	     still have some side-effects - they could contain
132*38fd1498Szrj 	     gimple_could_trap_p statements, like floating point
133*38fd1498Szrj 	     exceptions or integer division by zero.  See PR70586.
134*38fd1498Szrj 	     FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
135*38fd1498Szrj 	     should handle this.  */
136*38fd1498Szrj 	  || is_gimple_call (stmt))
137*38fd1498Szrj 	return false;
138*38fd1498Szrj     }
139*38fd1498Szrj 
140*38fd1498Szrj   return true;
141*38fd1498Szrj }
142*38fd1498Szrj 
143*38fd1498Szrj /* Return true if BB is an empty forwarder block to TO_BB.  */
144*38fd1498Szrj 
145*38fd1498Szrj static bool
146*38fd1498Szrj forwarder_block_to (basic_block bb, basic_block to_bb)
147*38fd1498Szrj {
148*38fd1498Szrj   return empty_block_p (bb)
149*38fd1498Szrj 	 && single_succ_p (bb)
150*38fd1498Szrj 	 && single_succ (bb) == to_bb;
151*38fd1498Szrj }
152*38fd1498Szrj 
153*38fd1498Szrj /* Verify if all PHI node arguments in DEST for edges from BB1 or
154*38fd1498Szrj    BB2 to DEST are the same.  This makes the CFG merge point
155*38fd1498Szrj    free from side-effects.  Return true in this case, else false.  */
156*38fd1498Szrj 
157*38fd1498Szrj static bool
158*38fd1498Szrj same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
159*38fd1498Szrj {
160*38fd1498Szrj   edge e1 = find_edge (bb1, dest);
161*38fd1498Szrj   edge e2 = find_edge (bb2, dest);
162*38fd1498Szrj   gphi_iterator gsi;
163*38fd1498Szrj   gphi *phi;
164*38fd1498Szrj 
165*38fd1498Szrj   for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
166*38fd1498Szrj     {
167*38fd1498Szrj       phi = gsi.phi ();
168*38fd1498Szrj       if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
169*38fd1498Szrj 			    PHI_ARG_DEF_FROM_EDGE (phi, e2), 0))
170*38fd1498Szrj         return false;
171*38fd1498Szrj     }
172*38fd1498Szrj 
173*38fd1498Szrj   return true;
174*38fd1498Szrj }
175*38fd1498Szrj 
176*38fd1498Szrj /* Return the best representative SSA name for CANDIDATE which is used
177*38fd1498Szrj    in a bit test.  */
178*38fd1498Szrj 
179*38fd1498Szrj static tree
180*38fd1498Szrj get_name_for_bit_test (tree candidate)
181*38fd1498Szrj {
182*38fd1498Szrj   /* Skip single-use names in favor of using the name from a
183*38fd1498Szrj      non-widening conversion definition.  */
184*38fd1498Szrj   if (TREE_CODE (candidate) == SSA_NAME
185*38fd1498Szrj       && has_single_use (candidate))
186*38fd1498Szrj     {
187*38fd1498Szrj       gimple *def_stmt = SSA_NAME_DEF_STMT (candidate);
188*38fd1498Szrj       if (is_gimple_assign (def_stmt)
189*38fd1498Szrj 	  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
190*38fd1498Szrj 	{
191*38fd1498Szrj 	  if (TYPE_PRECISION (TREE_TYPE (candidate))
192*38fd1498Szrj 	      <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
193*38fd1498Szrj 	    return gimple_assign_rhs1 (def_stmt);
194*38fd1498Szrj 	}
195*38fd1498Szrj     }
196*38fd1498Szrj 
197*38fd1498Szrj   return candidate;
198*38fd1498Szrj }
199*38fd1498Szrj 
200*38fd1498Szrj /* Recognize a single bit test pattern in GIMPLE_COND and its defining
201*38fd1498Szrj    statements.  Store the name being tested in *NAME and the bit
202*38fd1498Szrj    in *BIT.  The GIMPLE_COND computes *NAME & (1 << *BIT).
203*38fd1498Szrj    Returns true if the pattern matched, false otherwise.  */
204*38fd1498Szrj 
205*38fd1498Szrj static bool
206*38fd1498Szrj recognize_single_bit_test (gcond *cond, tree *name, tree *bit, bool inv)
207*38fd1498Szrj {
208*38fd1498Szrj   gimple *stmt;
209*38fd1498Szrj 
210*38fd1498Szrj   /* Get at the definition of the result of the bit test.  */
211*38fd1498Szrj   if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
212*38fd1498Szrj       || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
213*38fd1498Szrj       || !integer_zerop (gimple_cond_rhs (cond)))
214*38fd1498Szrj     return false;
215*38fd1498Szrj   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
216*38fd1498Szrj   if (!is_gimple_assign (stmt))
217*38fd1498Szrj     return false;
218*38fd1498Szrj 
219*38fd1498Szrj   /* Look at which bit is tested.  One form to recognize is
220*38fd1498Szrj      D.1985_5 = state_3(D) >> control1_4(D);
221*38fd1498Szrj      D.1986_6 = (int) D.1985_5;
222*38fd1498Szrj      D.1987_7 = op0 & 1;
223*38fd1498Szrj      if (D.1987_7 != 0)  */
224*38fd1498Szrj   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
225*38fd1498Szrj       && integer_onep (gimple_assign_rhs2 (stmt))
226*38fd1498Szrj       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
227*38fd1498Szrj     {
228*38fd1498Szrj       tree orig_name = gimple_assign_rhs1 (stmt);
229*38fd1498Szrj 
230*38fd1498Szrj       /* Look through copies and conversions to eventually
231*38fd1498Szrj 	 find the stmt that computes the shift.  */
232*38fd1498Szrj       stmt = SSA_NAME_DEF_STMT (orig_name);
233*38fd1498Szrj 
234*38fd1498Szrj       while (is_gimple_assign (stmt)
235*38fd1498Szrj 	     && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
236*38fd1498Szrj 		  && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
237*38fd1498Szrj 		      <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))
238*38fd1498Szrj 		  && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
239*38fd1498Szrj 		 || gimple_assign_ssa_name_copy_p (stmt)))
240*38fd1498Szrj 	stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
241*38fd1498Szrj 
242*38fd1498Szrj       /* If we found such, decompose it.  */
243*38fd1498Szrj       if (is_gimple_assign (stmt)
244*38fd1498Szrj 	  && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR)
245*38fd1498Szrj 	{
246*38fd1498Szrj 	  /* op0 & (1 << op1) */
247*38fd1498Szrj 	  *bit = gimple_assign_rhs2 (stmt);
248*38fd1498Szrj 	  *name = gimple_assign_rhs1 (stmt);
249*38fd1498Szrj 	}
250*38fd1498Szrj       else
251*38fd1498Szrj 	{
252*38fd1498Szrj 	  /* t & 1 */
253*38fd1498Szrj 	  *bit = integer_zero_node;
254*38fd1498Szrj 	  *name = get_name_for_bit_test (orig_name);
255*38fd1498Szrj 	}
256*38fd1498Szrj 
257*38fd1498Szrj       return true;
258*38fd1498Szrj     }
259*38fd1498Szrj 
260*38fd1498Szrj   /* Another form is
261*38fd1498Szrj      D.1987_7 = op0 & (1 << CST)
262*38fd1498Szrj      if (D.1987_7 != 0)  */
263*38fd1498Szrj   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
264*38fd1498Szrj       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
265*38fd1498Szrj       && integer_pow2p (gimple_assign_rhs2 (stmt)))
266*38fd1498Szrj     {
267*38fd1498Szrj       *name = gimple_assign_rhs1 (stmt);
268*38fd1498Szrj       *bit = build_int_cst (integer_type_node,
269*38fd1498Szrj 			    tree_log2 (gimple_assign_rhs2 (stmt)));
270*38fd1498Szrj       return true;
271*38fd1498Szrj     }
272*38fd1498Szrj 
273*38fd1498Szrj   /* Another form is
274*38fd1498Szrj      D.1986_6 = 1 << control1_4(D)
275*38fd1498Szrj      D.1987_7 = op0 & D.1986_6
276*38fd1498Szrj      if (D.1987_7 != 0)  */
277*38fd1498Szrj   if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
278*38fd1498Szrj       && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
279*38fd1498Szrj       && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
280*38fd1498Szrj     {
281*38fd1498Szrj       gimple *tmp;
282*38fd1498Szrj 
283*38fd1498Szrj       /* Both arguments of the BIT_AND_EXPR can be the single-bit
284*38fd1498Szrj 	 specifying expression.  */
285*38fd1498Szrj       tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
286*38fd1498Szrj       if (is_gimple_assign (tmp)
287*38fd1498Szrj 	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
288*38fd1498Szrj 	  && integer_onep (gimple_assign_rhs1 (tmp)))
289*38fd1498Szrj 	{
290*38fd1498Szrj 	  *name = gimple_assign_rhs2 (stmt);
291*38fd1498Szrj 	  *bit = gimple_assign_rhs2 (tmp);
292*38fd1498Szrj 	  return true;
293*38fd1498Szrj 	}
294*38fd1498Szrj 
295*38fd1498Szrj       tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
296*38fd1498Szrj       if (is_gimple_assign (tmp)
297*38fd1498Szrj 	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
298*38fd1498Szrj 	  && integer_onep (gimple_assign_rhs1 (tmp)))
299*38fd1498Szrj 	{
300*38fd1498Szrj 	  *name = gimple_assign_rhs1 (stmt);
301*38fd1498Szrj 	  *bit = gimple_assign_rhs2 (tmp);
302*38fd1498Szrj 	  return true;
303*38fd1498Szrj 	}
304*38fd1498Szrj     }
305*38fd1498Szrj 
306*38fd1498Szrj   return false;
307*38fd1498Szrj }
308*38fd1498Szrj 
309*38fd1498Szrj /* Recognize a bit test pattern in a GIMPLE_COND and its defining
310*38fd1498Szrj    statements.  Store the name being tested in *NAME and the bits
311*38fd1498Szrj    in *BITS.  The COND_EXPR computes *NAME & *BITS.
312*38fd1498Szrj    Returns true if the pattern matched, false otherwise.  */
313*38fd1498Szrj 
314*38fd1498Szrj static bool
315*38fd1498Szrj recognize_bits_test (gcond *cond, tree *name, tree *bits, bool inv)
316*38fd1498Szrj {
317*38fd1498Szrj   gimple *stmt;
318*38fd1498Szrj 
319*38fd1498Szrj   /* Get at the definition of the result of the bit test.  */
320*38fd1498Szrj   if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
321*38fd1498Szrj       || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
322*38fd1498Szrj       || !integer_zerop (gimple_cond_rhs (cond)))
323*38fd1498Szrj     return false;
324*38fd1498Szrj   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
325*38fd1498Szrj   if (!is_gimple_assign (stmt)
326*38fd1498Szrj       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
327*38fd1498Szrj     return false;
328*38fd1498Szrj 
329*38fd1498Szrj   *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt));
330*38fd1498Szrj   *bits = gimple_assign_rhs2 (stmt);
331*38fd1498Szrj 
332*38fd1498Szrj   return true;
333*38fd1498Szrj }
334*38fd1498Szrj 
335*38fd1498Szrj 
336*38fd1498Szrj /* Update profile after code in outer_cond_bb was adjusted so
337*38fd1498Szrj    outer_cond_bb has no condition.  */
338*38fd1498Szrj 
339*38fd1498Szrj static void
340*38fd1498Szrj update_profile_after_ifcombine (basic_block inner_cond_bb,
341*38fd1498Szrj 			        basic_block outer_cond_bb)
342*38fd1498Szrj {
343*38fd1498Szrj   edge outer_to_inner = find_edge (outer_cond_bb, inner_cond_bb);
344*38fd1498Szrj   edge outer2 = (EDGE_SUCC (outer_cond_bb, 0) == outer_to_inner
345*38fd1498Szrj 		 ? EDGE_SUCC (outer_cond_bb, 1)
346*38fd1498Szrj 		 : EDGE_SUCC (outer_cond_bb, 0));
347*38fd1498Szrj   edge inner_taken = EDGE_SUCC (inner_cond_bb, 0);
348*38fd1498Szrj   edge inner_not_taken = EDGE_SUCC (inner_cond_bb, 1);
349*38fd1498Szrj 
350*38fd1498Szrj   if (inner_taken->dest != outer2->dest)
351*38fd1498Szrj     std::swap (inner_taken, inner_not_taken);
352*38fd1498Szrj   gcc_assert (inner_taken->dest == outer2->dest);
353*38fd1498Szrj 
354*38fd1498Szrj   /* In the following we assume that inner_cond_bb has single predecessor.  */
355*38fd1498Szrj   gcc_assert (single_pred_p (inner_cond_bb));
356*38fd1498Szrj 
357*38fd1498Szrj   /* Path outer_cond_bb->(outer2) needs to be merged into path
358*38fd1498Szrj      outer_cond_bb->(outer_to_inner)->inner_cond_bb->(inner_taken)
359*38fd1498Szrj      and probability of inner_not_taken updated.  */
360*38fd1498Szrj 
361*38fd1498Szrj   inner_cond_bb->count = outer_cond_bb->count;
362*38fd1498Szrj 
363*38fd1498Szrj   inner_taken->probability = outer2->probability + outer_to_inner->probability
364*38fd1498Szrj 			     * inner_taken->probability;
365*38fd1498Szrj   inner_not_taken->probability = profile_probability::always ()
366*38fd1498Szrj 				 - inner_taken->probability;
367*38fd1498Szrj 
368*38fd1498Szrj   outer_to_inner->probability = profile_probability::always ();
369*38fd1498Szrj   outer2->probability = profile_probability::never ();
370*38fd1498Szrj }
371*38fd1498Szrj 
372*38fd1498Szrj /* If-convert on a and pattern with a common else block.  The inner
373*38fd1498Szrj    if is specified by its INNER_COND_BB, the outer by OUTER_COND_BB.
374*38fd1498Szrj    inner_inv, outer_inv and result_inv indicate whether the conditions
375*38fd1498Szrj    are inverted.
376*38fd1498Szrj    Returns true if the edges to the common else basic-block were merged.  */
377*38fd1498Szrj 
378*38fd1498Szrj static bool
379*38fd1498Szrj ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
380*38fd1498Szrj 		   basic_block outer_cond_bb, bool outer_inv, bool result_inv)
381*38fd1498Szrj {
382*38fd1498Szrj   gimple_stmt_iterator gsi;
383*38fd1498Szrj   gimple *inner_stmt, *outer_stmt;
384*38fd1498Szrj   gcond *inner_cond, *outer_cond;
385*38fd1498Szrj   tree name1, name2, bit1, bit2, bits1, bits2;
386*38fd1498Szrj 
387*38fd1498Szrj   inner_stmt = last_stmt (inner_cond_bb);
388*38fd1498Szrj   if (!inner_stmt
389*38fd1498Szrj       || gimple_code (inner_stmt) != GIMPLE_COND)
390*38fd1498Szrj     return false;
391*38fd1498Szrj   inner_cond = as_a <gcond *> (inner_stmt);
392*38fd1498Szrj 
393*38fd1498Szrj   outer_stmt = last_stmt (outer_cond_bb);
394*38fd1498Szrj   if (!outer_stmt
395*38fd1498Szrj       || gimple_code (outer_stmt) != GIMPLE_COND)
396*38fd1498Szrj     return false;
397*38fd1498Szrj   outer_cond = as_a <gcond *> (outer_stmt);
398*38fd1498Szrj 
399*38fd1498Szrj   /* See if we test a single bit of the same name in both tests.  In
400*38fd1498Szrj      that case remove the outer test, merging both else edges,
401*38fd1498Szrj      and change the inner one to test for
402*38fd1498Szrj      name & (bit1 | bit2) == (bit1 | bit2).  */
403*38fd1498Szrj   if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
404*38fd1498Szrj       && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
405*38fd1498Szrj       && name1 == name2)
406*38fd1498Szrj     {
407*38fd1498Szrj       tree t, t2;
408*38fd1498Szrj 
409*38fd1498Szrj       /* Do it.  */
410*38fd1498Szrj       gsi = gsi_for_stmt (inner_cond);
411*38fd1498Szrj       t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
412*38fd1498Szrj 		       build_int_cst (TREE_TYPE (name1), 1), bit1);
413*38fd1498Szrj       t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
414*38fd1498Szrj 		        build_int_cst (TREE_TYPE (name1), 1), bit2);
415*38fd1498Szrj       t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
416*38fd1498Szrj       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
417*38fd1498Szrj 				    true, GSI_SAME_STMT);
418*38fd1498Szrj       t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
419*38fd1498Szrj       t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
420*38fd1498Szrj 				     true, GSI_SAME_STMT);
421*38fd1498Szrj       t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
422*38fd1498Szrj 		       boolean_type_node, t2, t);
423*38fd1498Szrj       t = canonicalize_cond_expr_cond (t);
424*38fd1498Szrj       if (!t)
425*38fd1498Szrj 	return false;
426*38fd1498Szrj       gimple_cond_set_condition_from_tree (inner_cond, t);
427*38fd1498Szrj       update_stmt (inner_cond);
428*38fd1498Szrj 
429*38fd1498Szrj       /* Leave CFG optimization to cfg_cleanup.  */
430*38fd1498Szrj       gimple_cond_set_condition_from_tree (outer_cond,
431*38fd1498Szrj 	outer_inv ? boolean_false_node : boolean_true_node);
432*38fd1498Szrj       update_stmt (outer_cond);
433*38fd1498Szrj 
434*38fd1498Szrj       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
435*38fd1498Szrj 
436*38fd1498Szrj       if (dump_file)
437*38fd1498Szrj 	{
438*38fd1498Szrj 	  fprintf (dump_file, "optimizing double bit test to ");
439*38fd1498Szrj 	  print_generic_expr (dump_file, name1);
440*38fd1498Szrj 	  fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
441*38fd1498Szrj 	  print_generic_expr (dump_file, bit1);
442*38fd1498Szrj 	  fprintf (dump_file, ") | (1 << ");
443*38fd1498Szrj 	  print_generic_expr (dump_file, bit2);
444*38fd1498Szrj 	  fprintf (dump_file, ")\n");
445*38fd1498Szrj 	}
446*38fd1498Szrj 
447*38fd1498Szrj       return true;
448*38fd1498Szrj     }
449*38fd1498Szrj 
450*38fd1498Szrj   /* See if we have two bit tests of the same name in both tests.
451*38fd1498Szrj      In that case remove the outer test and change the inner one to
452*38fd1498Szrj      test for name & (bits1 | bits2) != 0.  */
453*38fd1498Szrj   else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
454*38fd1498Szrj       && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
455*38fd1498Szrj     {
456*38fd1498Szrj       gimple_stmt_iterator gsi;
457*38fd1498Szrj       tree t;
458*38fd1498Szrj 
459*38fd1498Szrj       /* Find the common name which is bit-tested.  */
460*38fd1498Szrj       if (name1 == name2)
461*38fd1498Szrj 	;
462*38fd1498Szrj       else if (bits1 == bits2)
463*38fd1498Szrj 	{
464*38fd1498Szrj 	  std::swap (name2, bits2);
465*38fd1498Szrj 	  std::swap (name1, bits1);
466*38fd1498Szrj 	}
467*38fd1498Szrj       else if (name1 == bits2)
468*38fd1498Szrj 	std::swap (name2, bits2);
469*38fd1498Szrj       else if (bits1 == name2)
470*38fd1498Szrj 	std::swap (name1, bits1);
471*38fd1498Szrj       else
472*38fd1498Szrj 	return false;
473*38fd1498Szrj 
474*38fd1498Szrj       /* As we strip non-widening conversions in finding a common
475*38fd1498Szrj          name that is tested make sure to end up with an integral
476*38fd1498Szrj 	 type for building the bit operations.  */
477*38fd1498Szrj       if (TYPE_PRECISION (TREE_TYPE (bits1))
478*38fd1498Szrj 	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
479*38fd1498Szrj 	{
480*38fd1498Szrj 	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
481*38fd1498Szrj 	  name1 = fold_convert (TREE_TYPE (bits1), name1);
482*38fd1498Szrj 	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
483*38fd1498Szrj 	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
484*38fd1498Szrj 	}
485*38fd1498Szrj       else
486*38fd1498Szrj 	{
487*38fd1498Szrj 	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
488*38fd1498Szrj 	  name1 = fold_convert (TREE_TYPE (bits2), name1);
489*38fd1498Szrj 	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
490*38fd1498Szrj 	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
491*38fd1498Szrj 	}
492*38fd1498Szrj 
493*38fd1498Szrj       /* Do it.  */
494*38fd1498Szrj       gsi = gsi_for_stmt (inner_cond);
495*38fd1498Szrj       t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
496*38fd1498Szrj       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
497*38fd1498Szrj 				    true, GSI_SAME_STMT);
498*38fd1498Szrj       t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
499*38fd1498Szrj       t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
500*38fd1498Szrj 				    true, GSI_SAME_STMT);
501*38fd1498Szrj       t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
502*38fd1498Szrj 		       build_int_cst (TREE_TYPE (t), 0));
503*38fd1498Szrj       t = canonicalize_cond_expr_cond (t);
504*38fd1498Szrj       if (!t)
505*38fd1498Szrj 	return false;
506*38fd1498Szrj       gimple_cond_set_condition_from_tree (inner_cond, t);
507*38fd1498Szrj       update_stmt (inner_cond);
508*38fd1498Szrj 
509*38fd1498Szrj       /* Leave CFG optimization to cfg_cleanup.  */
510*38fd1498Szrj       gimple_cond_set_condition_from_tree (outer_cond,
511*38fd1498Szrj 	outer_inv ? boolean_false_node : boolean_true_node);
512*38fd1498Szrj       update_stmt (outer_cond);
513*38fd1498Szrj       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
514*38fd1498Szrj 
515*38fd1498Szrj       if (dump_file)
516*38fd1498Szrj 	{
517*38fd1498Szrj 	  fprintf (dump_file, "optimizing bits or bits test to ");
518*38fd1498Szrj 	  print_generic_expr (dump_file, name1);
519*38fd1498Szrj 	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
520*38fd1498Szrj 	  print_generic_expr (dump_file, bits1);
521*38fd1498Szrj 	  fprintf (dump_file, " | ");
522*38fd1498Szrj 	  print_generic_expr (dump_file, bits2);
523*38fd1498Szrj 	  fprintf (dump_file, "\n");
524*38fd1498Szrj 	}
525*38fd1498Szrj 
526*38fd1498Szrj       return true;
527*38fd1498Szrj     }
528*38fd1498Szrj 
529*38fd1498Szrj   /* See if we have two comparisons that we can merge into one.  */
530*38fd1498Szrj   else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
531*38fd1498Szrj 	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
532*38fd1498Szrj     {
533*38fd1498Szrj       tree t;
534*38fd1498Szrj       enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
535*38fd1498Szrj       enum tree_code outer_cond_code = gimple_cond_code (outer_cond);
536*38fd1498Szrj 
537*38fd1498Szrj       /* Invert comparisons if necessary (and possible).  */
538*38fd1498Szrj       if (inner_inv)
539*38fd1498Szrj 	inner_cond_code = invert_tree_comparison (inner_cond_code,
540*38fd1498Szrj 	  HONOR_NANS (gimple_cond_lhs (inner_cond)));
541*38fd1498Szrj       if (inner_cond_code == ERROR_MARK)
542*38fd1498Szrj 	return false;
543*38fd1498Szrj       if (outer_inv)
544*38fd1498Szrj 	outer_cond_code = invert_tree_comparison (outer_cond_code,
545*38fd1498Szrj 	  HONOR_NANS (gimple_cond_lhs (outer_cond)));
546*38fd1498Szrj       if (outer_cond_code == ERROR_MARK)
547*38fd1498Szrj 	return false;
548*38fd1498Szrj       /* Don't return false so fast, try maybe_fold_or_comparisons?  */
549*38fd1498Szrj 
550*38fd1498Szrj       if (!(t = maybe_fold_and_comparisons (inner_cond_code,
551*38fd1498Szrj 					    gimple_cond_lhs (inner_cond),
552*38fd1498Szrj 					    gimple_cond_rhs (inner_cond),
553*38fd1498Szrj 					    outer_cond_code,
554*38fd1498Szrj 					    gimple_cond_lhs (outer_cond),
555*38fd1498Szrj 					    gimple_cond_rhs (outer_cond))))
556*38fd1498Szrj 	{
557*38fd1498Szrj 	  tree t1, t2;
558*38fd1498Szrj 	  gimple_stmt_iterator gsi;
559*38fd1498Szrj 	  if (!LOGICAL_OP_NON_SHORT_CIRCUIT || flag_sanitize_coverage)
560*38fd1498Szrj 	    return false;
561*38fd1498Szrj 	  /* Only do this optimization if the inner bb contains only the conditional. */
562*38fd1498Szrj 	  if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
563*38fd1498Szrj 	    return false;
564*38fd1498Szrj 	  t1 = fold_build2_loc (gimple_location (inner_cond),
565*38fd1498Szrj 				inner_cond_code,
566*38fd1498Szrj 				boolean_type_node,
567*38fd1498Szrj 				gimple_cond_lhs (inner_cond),
568*38fd1498Szrj 				gimple_cond_rhs (inner_cond));
569*38fd1498Szrj 	  t2 = fold_build2_loc (gimple_location (outer_cond),
570*38fd1498Szrj 				outer_cond_code,
571*38fd1498Szrj 				boolean_type_node,
572*38fd1498Szrj 				gimple_cond_lhs (outer_cond),
573*38fd1498Szrj 				gimple_cond_rhs (outer_cond));
574*38fd1498Szrj 	  t = fold_build2_loc (gimple_location (inner_cond),
575*38fd1498Szrj 			       TRUTH_AND_EXPR, boolean_type_node, t1, t2);
576*38fd1498Szrj 	  if (result_inv)
577*38fd1498Szrj 	    {
578*38fd1498Szrj 	      t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
579*38fd1498Szrj 	      result_inv = false;
580*38fd1498Szrj 	    }
581*38fd1498Szrj 	  gsi = gsi_for_stmt (inner_cond);
582*38fd1498Szrj 	  t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
583*38fd1498Szrj 					  GSI_SAME_STMT);
584*38fd1498Szrj         }
585*38fd1498Szrj       if (result_inv)
586*38fd1498Szrj 	t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
587*38fd1498Szrj       t = canonicalize_cond_expr_cond (t);
588*38fd1498Szrj       if (!t)
589*38fd1498Szrj 	return false;
590*38fd1498Szrj       gimple_cond_set_condition_from_tree (inner_cond, t);
591*38fd1498Szrj       update_stmt (inner_cond);
592*38fd1498Szrj 
593*38fd1498Szrj       /* Leave CFG optimization to cfg_cleanup.  */
594*38fd1498Szrj       gimple_cond_set_condition_from_tree (outer_cond,
595*38fd1498Szrj 	outer_inv ? boolean_false_node : boolean_true_node);
596*38fd1498Szrj       update_stmt (outer_cond);
597*38fd1498Szrj       update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
598*38fd1498Szrj 
599*38fd1498Szrj       if (dump_file)
600*38fd1498Szrj 	{
601*38fd1498Szrj 	  fprintf (dump_file, "optimizing two comparisons to ");
602*38fd1498Szrj 	  print_generic_expr (dump_file, t);
603*38fd1498Szrj 	  fprintf (dump_file, "\n");
604*38fd1498Szrj 	}
605*38fd1498Szrj 
606*38fd1498Szrj       return true;
607*38fd1498Szrj     }
608*38fd1498Szrj 
609*38fd1498Szrj   return false;
610*38fd1498Szrj }
611*38fd1498Szrj 
612*38fd1498Szrj /* Helper function for tree_ssa_ifcombine_bb.  Recognize a CFG pattern and
613*38fd1498Szrj    dispatch to the appropriate if-conversion helper for a particular
614*38fd1498Szrj    set of INNER_COND_BB, OUTER_COND_BB, THEN_BB and ELSE_BB.
615*38fd1498Szrj    PHI_PRED_BB should be one of INNER_COND_BB, THEN_BB or ELSE_BB.  */
616*38fd1498Szrj 
617*38fd1498Szrj static bool
618*38fd1498Szrj tree_ssa_ifcombine_bb_1 (basic_block inner_cond_bb, basic_block outer_cond_bb,
619*38fd1498Szrj 			 basic_block then_bb, basic_block else_bb,
620*38fd1498Szrj 			 basic_block phi_pred_bb)
621*38fd1498Szrj {
622*38fd1498Szrj   /* The && form is characterized by a common else_bb with
623*38fd1498Szrj      the two edges leading to it mergable.  The latter is
624*38fd1498Szrj      guaranteed by matching PHI arguments in the else_bb and
625*38fd1498Szrj      the inner cond_bb having no side-effects.  */
626*38fd1498Szrj   if (phi_pred_bb != else_bb
627*38fd1498Szrj       && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &else_bb)
628*38fd1498Szrj       && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
629*38fd1498Szrj     {
630*38fd1498Szrj       /* We have
631*38fd1498Szrj 	   <outer_cond_bb>
632*38fd1498Szrj 	     if (q) goto inner_cond_bb; else goto else_bb;
633*38fd1498Szrj 	   <inner_cond_bb>
634*38fd1498Szrj 	     if (p) goto ...; else goto else_bb;
635*38fd1498Szrj 	     ...
636*38fd1498Szrj 	   <else_bb>
637*38fd1498Szrj 	     ...
638*38fd1498Szrj        */
639*38fd1498Szrj       return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, false,
640*38fd1498Szrj 				false);
641*38fd1498Szrj     }
642*38fd1498Szrj 
643*38fd1498Szrj   /* And a version where the outer condition is negated.  */
644*38fd1498Szrj   if (phi_pred_bb != else_bb
645*38fd1498Szrj       && recognize_if_then_else (outer_cond_bb, &else_bb, &inner_cond_bb)
646*38fd1498Szrj       && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
647*38fd1498Szrj     {
648*38fd1498Szrj       /* We have
649*38fd1498Szrj 	   <outer_cond_bb>
650*38fd1498Szrj 	     if (q) goto else_bb; else goto inner_cond_bb;
651*38fd1498Szrj 	   <inner_cond_bb>
652*38fd1498Szrj 	     if (p) goto ...; else goto else_bb;
653*38fd1498Szrj 	     ...
654*38fd1498Szrj 	   <else_bb>
655*38fd1498Szrj 	     ...
656*38fd1498Szrj        */
657*38fd1498Szrj       return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, true,
658*38fd1498Szrj 				false);
659*38fd1498Szrj     }
660*38fd1498Szrj 
661*38fd1498Szrj   /* The || form is characterized by a common then_bb with the
662*38fd1498Szrj      two edges leading to it mergable.  The latter is guaranteed
663*38fd1498Szrj      by matching PHI arguments in the then_bb and the inner cond_bb
664*38fd1498Szrj      having no side-effects.  */
665*38fd1498Szrj   if (phi_pred_bb != then_bb
666*38fd1498Szrj       && recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
667*38fd1498Szrj       && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
668*38fd1498Szrj     {
669*38fd1498Szrj       /* We have
670*38fd1498Szrj 	   <outer_cond_bb>
671*38fd1498Szrj 	     if (q) goto then_bb; else goto inner_cond_bb;
672*38fd1498Szrj 	   <inner_cond_bb>
673*38fd1498Szrj 	     if (q) goto then_bb; else goto ...;
674*38fd1498Szrj 	   <then_bb>
675*38fd1498Szrj 	     ...
676*38fd1498Szrj        */
677*38fd1498Szrj       return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, true,
678*38fd1498Szrj 				true);
679*38fd1498Szrj     }
680*38fd1498Szrj 
681*38fd1498Szrj   /* And a version where the outer condition is negated.  */
682*38fd1498Szrj   if (phi_pred_bb != then_bb
683*38fd1498Szrj       && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &then_bb)
684*38fd1498Szrj       && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
685*38fd1498Szrj     {
686*38fd1498Szrj       /* We have
687*38fd1498Szrj 	   <outer_cond_bb>
688*38fd1498Szrj 	     if (q) goto inner_cond_bb; else goto then_bb;
689*38fd1498Szrj 	   <inner_cond_bb>
690*38fd1498Szrj 	     if (q) goto then_bb; else goto ...;
691*38fd1498Szrj 	   <then_bb>
692*38fd1498Szrj 	     ...
693*38fd1498Szrj        */
694*38fd1498Szrj       return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, false,
695*38fd1498Szrj 				true);
696*38fd1498Szrj     }
697*38fd1498Szrj 
698*38fd1498Szrj   return false;
699*38fd1498Szrj }
700*38fd1498Szrj 
701*38fd1498Szrj /* Recognize a CFG pattern and dispatch to the appropriate
702*38fd1498Szrj    if-conversion helper.  We start with BB as the innermost
703*38fd1498Szrj    worker basic-block.  Returns true if a transformation was done.  */
704*38fd1498Szrj 
705*38fd1498Szrj static bool
706*38fd1498Szrj tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
707*38fd1498Szrj {
708*38fd1498Szrj   basic_block then_bb = NULL, else_bb = NULL;
709*38fd1498Szrj 
710*38fd1498Szrj   if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
711*38fd1498Szrj     return false;
712*38fd1498Szrj 
713*38fd1498Szrj   /* Recognize && and || of two conditions with a common
714*38fd1498Szrj      then/else block which entry edges we can merge.  That is:
715*38fd1498Szrj        if (a || b)
716*38fd1498Szrj 	 ;
717*38fd1498Szrj      and
718*38fd1498Szrj        if (a && b)
719*38fd1498Szrj 	 ;
720*38fd1498Szrj      This requires a single predecessor of the inner cond_bb.  */
721*38fd1498Szrj   if (single_pred_p (inner_cond_bb)
722*38fd1498Szrj       && bb_no_side_effects_p (inner_cond_bb))
723*38fd1498Szrj     {
724*38fd1498Szrj       basic_block outer_cond_bb = single_pred (inner_cond_bb);
725*38fd1498Szrj 
726*38fd1498Szrj       if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
727*38fd1498Szrj 				   then_bb, else_bb, inner_cond_bb))
728*38fd1498Szrj 	return true;
729*38fd1498Szrj 
730*38fd1498Szrj       if (forwarder_block_to (else_bb, then_bb))
731*38fd1498Szrj 	{
732*38fd1498Szrj 	  /* Other possibilities for the && form, if else_bb is
733*38fd1498Szrj 	     empty forwarder block to then_bb.  Compared to the above simpler
734*38fd1498Szrj 	     forms this can be treated as if then_bb and else_bb were swapped,
735*38fd1498Szrj 	     and the corresponding inner_cond_bb not inverted because of that.
736*38fd1498Szrj 	     For same_phi_args_p we look at equality of arguments between
737*38fd1498Szrj 	     edge from outer_cond_bb and the forwarder block.  */
738*38fd1498Szrj 	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
739*38fd1498Szrj 				       then_bb, else_bb))
740*38fd1498Szrj 	    return true;
741*38fd1498Szrj 	}
742*38fd1498Szrj       else if (forwarder_block_to (then_bb, else_bb))
743*38fd1498Szrj 	{
744*38fd1498Szrj 	  /* Other possibilities for the || form, if then_bb is
745*38fd1498Szrj 	     empty forwarder block to else_bb.  Compared to the above simpler
746*38fd1498Szrj 	     forms this can be treated as if then_bb and else_bb were swapped,
747*38fd1498Szrj 	     and the corresponding inner_cond_bb not inverted because of that.
748*38fd1498Szrj 	     For same_phi_args_p we look at equality of arguments between
749*38fd1498Szrj 	     edge from outer_cond_bb and the forwarder block.  */
750*38fd1498Szrj 	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
751*38fd1498Szrj 				       then_bb, then_bb))
752*38fd1498Szrj 	    return true;
753*38fd1498Szrj 	}
754*38fd1498Szrj     }
755*38fd1498Szrj 
756*38fd1498Szrj   return false;
757*38fd1498Szrj }
758*38fd1498Szrj 
759*38fd1498Szrj /* Main entry for the tree if-conversion pass.  */
760*38fd1498Szrj 
761*38fd1498Szrj namespace {
762*38fd1498Szrj 
763*38fd1498Szrj const pass_data pass_data_tree_ifcombine =
764*38fd1498Szrj {
765*38fd1498Szrj   GIMPLE_PASS, /* type */
766*38fd1498Szrj   "ifcombine", /* name */
767*38fd1498Szrj   OPTGROUP_NONE, /* optinfo_flags */
768*38fd1498Szrj   TV_TREE_IFCOMBINE, /* tv_id */
769*38fd1498Szrj   ( PROP_cfg | PROP_ssa ), /* properties_required */
770*38fd1498Szrj   0, /* properties_provided */
771*38fd1498Szrj   0, /* properties_destroyed */
772*38fd1498Szrj   0, /* todo_flags_start */
773*38fd1498Szrj   TODO_update_ssa, /* todo_flags_finish */
774*38fd1498Szrj };
775*38fd1498Szrj 
776*38fd1498Szrj class pass_tree_ifcombine : public gimple_opt_pass
777*38fd1498Szrj {
778*38fd1498Szrj public:
779*38fd1498Szrj   pass_tree_ifcombine (gcc::context *ctxt)
780*38fd1498Szrj     : gimple_opt_pass (pass_data_tree_ifcombine, ctxt)
781*38fd1498Szrj   {}
782*38fd1498Szrj 
783*38fd1498Szrj   /* opt_pass methods: */
784*38fd1498Szrj   virtual unsigned int execute (function *);
785*38fd1498Szrj 
786*38fd1498Szrj }; // class pass_tree_ifcombine
787*38fd1498Szrj 
788*38fd1498Szrj unsigned int
789*38fd1498Szrj pass_tree_ifcombine::execute (function *fun)
790*38fd1498Szrj {
791*38fd1498Szrj   basic_block *bbs;
792*38fd1498Szrj   bool cfg_changed = false;
793*38fd1498Szrj   int i;
794*38fd1498Szrj 
795*38fd1498Szrj   bbs = single_pred_before_succ_order ();
796*38fd1498Szrj   calculate_dominance_info (CDI_DOMINATORS);
797*38fd1498Szrj 
798*38fd1498Szrj   /* Search every basic block for COND_EXPR we may be able to optimize.
799*38fd1498Szrj 
800*38fd1498Szrj      We walk the blocks in order that guarantees that a block with
801*38fd1498Szrj      a single predecessor is processed after the predecessor.
802*38fd1498Szrj      This ensures that we collapse outter ifs before visiting the
803*38fd1498Szrj      inner ones, and also that we do not try to visit a removed
804*38fd1498Szrj      block.  This is opposite of PHI-OPT, because we cascade the
805*38fd1498Szrj      combining rather than cascading PHIs. */
806*38fd1498Szrj   for (i = n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
807*38fd1498Szrj     {
808*38fd1498Szrj       basic_block bb = bbs[i];
809*38fd1498Szrj       gimple *stmt = last_stmt (bb);
810*38fd1498Szrj 
811*38fd1498Szrj       if (stmt
812*38fd1498Szrj 	  && gimple_code (stmt) == GIMPLE_COND)
813*38fd1498Szrj 	if (tree_ssa_ifcombine_bb (bb))
814*38fd1498Szrj 	  {
815*38fd1498Szrj 	    /* Clear range info from all stmts in BB which is now executed
816*38fd1498Szrj 	       conditional on a always true/false condition.  */
817*38fd1498Szrj 	    reset_flow_sensitive_info_in_bb (bb);
818*38fd1498Szrj 	    cfg_changed |= true;
819*38fd1498Szrj 	  }
820*38fd1498Szrj     }
821*38fd1498Szrj 
822*38fd1498Szrj   free (bbs);
823*38fd1498Szrj 
824*38fd1498Szrj   return cfg_changed ? TODO_cleanup_cfg : 0;
825*38fd1498Szrj }
826*38fd1498Szrj 
827*38fd1498Szrj } // anon namespace
828*38fd1498Szrj 
829*38fd1498Szrj gimple_opt_pass *
830*38fd1498Szrj make_pass_tree_ifcombine (gcc::context *ctxt)
831*38fd1498Szrj {
832*38fd1498Szrj   return new pass_tree_ifcombine (ctxt);
833*38fd1498Szrj }
834