11debfc3dSmrg /* Combining of if-expressions on trees.
2*8feb0f0bSmrg Copyright (C) 2007-2020 Free Software Foundation, Inc.
31debfc3dSmrg Contributed by Richard Guenther <rguenther@suse.de>
41debfc3dSmrg
51debfc3dSmrg This file is part of GCC.
61debfc3dSmrg
71debfc3dSmrg GCC is free software; you can redistribute it and/or modify
81debfc3dSmrg it under the terms of the GNU General Public License as published by
91debfc3dSmrg the Free Software Foundation; either version 3, or (at your option)
101debfc3dSmrg any later version.
111debfc3dSmrg
121debfc3dSmrg GCC is distributed in the hope that it will be useful,
131debfc3dSmrg but WITHOUT ANY WARRANTY; without even the implied warranty of
141debfc3dSmrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
151debfc3dSmrg GNU General Public License for more details.
161debfc3dSmrg
171debfc3dSmrg You should have received a copy of the GNU General Public License
181debfc3dSmrg along with GCC; see the file COPYING3. If not see
191debfc3dSmrg <http://www.gnu.org/licenses/>. */
201debfc3dSmrg
211debfc3dSmrg #include "config.h"
221debfc3dSmrg #include "system.h"
231debfc3dSmrg #include "coretypes.h"
241debfc3dSmrg #include "backend.h"
251debfc3dSmrg #include "rtl.h"
261debfc3dSmrg #include "tree.h"
271debfc3dSmrg #include "gimple.h"
281debfc3dSmrg #include "cfghooks.h"
291debfc3dSmrg #include "tree-pass.h"
301debfc3dSmrg #include "memmodel.h"
311debfc3dSmrg #include "tm_p.h"
321debfc3dSmrg #include "ssa.h"
331debfc3dSmrg #include "tree-pretty-print.h"
341debfc3dSmrg /* rtl is needed only because arm back-end requires it for
351debfc3dSmrg BRANCH_COST. */
361debfc3dSmrg #include "fold-const.h"
371debfc3dSmrg #include "cfganal.h"
381debfc3dSmrg #include "gimple-fold.h"
391debfc3dSmrg #include "gimple-iterator.h"
401debfc3dSmrg #include "gimplify-me.h"
411debfc3dSmrg #include "tree-cfg.h"
421debfc3dSmrg #include "tree-ssa.h"
431debfc3dSmrg
441debfc3dSmrg #ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
451debfc3dSmrg #define LOGICAL_OP_NON_SHORT_CIRCUIT \
461debfc3dSmrg (BRANCH_COST (optimize_function_for_speed_p (cfun), \
471debfc3dSmrg false) >= 2)
481debfc3dSmrg #endif
491debfc3dSmrg
501debfc3dSmrg /* This pass combines COND_EXPRs to simplify control flow. It
511debfc3dSmrg currently recognizes bit tests and comparisons in chains that
521debfc3dSmrg represent logical and or logical or of two COND_EXPRs.
531debfc3dSmrg
541debfc3dSmrg It does so by walking basic blocks in a approximate reverse
551debfc3dSmrg post-dominator order and trying to match CFG patterns that
561debfc3dSmrg represent logical and or logical or of two COND_EXPRs.
571debfc3dSmrg Transformations are done if the COND_EXPR conditions match
581debfc3dSmrg either
591debfc3dSmrg
601debfc3dSmrg 1. two single bit tests X & (1 << Yn) (for logical and)
611debfc3dSmrg
621debfc3dSmrg 2. two bit tests X & Yn (for logical or)
631debfc3dSmrg
641debfc3dSmrg 3. two comparisons X OPn Y (for logical or)
651debfc3dSmrg
661debfc3dSmrg To simplify this pass, removing basic blocks and dead code
671debfc3dSmrg is left to CFG cleanup and DCE. */
681debfc3dSmrg
691debfc3dSmrg
701debfc3dSmrg /* Recognize a if-then-else CFG pattern starting to match with the
711debfc3dSmrg COND_BB basic-block containing the COND_EXPR. The recognized
721debfc3dSmrg then end else blocks are stored to *THEN_BB and *ELSE_BB. If
731debfc3dSmrg *THEN_BB and/or *ELSE_BB are already set, they are required to
741debfc3dSmrg match the then and else basic-blocks to make the pattern match.
751debfc3dSmrg Returns true if the pattern matched, false otherwise. */
761debfc3dSmrg
771debfc3dSmrg static bool
recognize_if_then_else(basic_block cond_bb,basic_block * then_bb,basic_block * else_bb)781debfc3dSmrg recognize_if_then_else (basic_block cond_bb,
791debfc3dSmrg basic_block *then_bb, basic_block *else_bb)
801debfc3dSmrg {
811debfc3dSmrg edge t, e;
821debfc3dSmrg
831debfc3dSmrg if (EDGE_COUNT (cond_bb->succs) != 2)
841debfc3dSmrg return false;
851debfc3dSmrg
861debfc3dSmrg /* Find the then/else edges. */
871debfc3dSmrg t = EDGE_SUCC (cond_bb, 0);
881debfc3dSmrg e = EDGE_SUCC (cond_bb, 1);
891debfc3dSmrg if (!(t->flags & EDGE_TRUE_VALUE))
901debfc3dSmrg std::swap (t, e);
911debfc3dSmrg if (!(t->flags & EDGE_TRUE_VALUE)
921debfc3dSmrg || !(e->flags & EDGE_FALSE_VALUE))
931debfc3dSmrg return false;
941debfc3dSmrg
951debfc3dSmrg /* Check if the edge destinations point to the required block. */
961debfc3dSmrg if (*then_bb
971debfc3dSmrg && t->dest != *then_bb)
981debfc3dSmrg return false;
991debfc3dSmrg if (*else_bb
1001debfc3dSmrg && e->dest != *else_bb)
1011debfc3dSmrg return false;
1021debfc3dSmrg
1031debfc3dSmrg if (!*then_bb)
1041debfc3dSmrg *then_bb = t->dest;
1051debfc3dSmrg if (!*else_bb)
1061debfc3dSmrg *else_bb = e->dest;
1071debfc3dSmrg
1081debfc3dSmrg return true;
1091debfc3dSmrg }
1101debfc3dSmrg
1111debfc3dSmrg /* Verify if the basic block BB does not have side-effects. Return
1121debfc3dSmrg true in this case, else false. */
1131debfc3dSmrg
1141debfc3dSmrg static bool
bb_no_side_effects_p(basic_block bb)1151debfc3dSmrg bb_no_side_effects_p (basic_block bb)
1161debfc3dSmrg {
1171debfc3dSmrg gimple_stmt_iterator gsi;
1181debfc3dSmrg
1191debfc3dSmrg for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1201debfc3dSmrg {
1211debfc3dSmrg gimple *stmt = gsi_stmt (gsi);
1221debfc3dSmrg
1231debfc3dSmrg if (is_gimple_debug (stmt))
1241debfc3dSmrg continue;
1251debfc3dSmrg
1261debfc3dSmrg if (gimple_has_side_effects (stmt)
1271debfc3dSmrg || gimple_uses_undefined_value_p (stmt)
1281debfc3dSmrg || gimple_could_trap_p (stmt)
1291debfc3dSmrg || gimple_vuse (stmt)
1301debfc3dSmrg /* const calls don't match any of the above, yet they could
1311debfc3dSmrg still have some side-effects - they could contain
1321debfc3dSmrg gimple_could_trap_p statements, like floating point
1331debfc3dSmrg exceptions or integer division by zero. See PR70586.
1341debfc3dSmrg FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
1351debfc3dSmrg should handle this. */
1361debfc3dSmrg || is_gimple_call (stmt))
1371debfc3dSmrg return false;
1381debfc3dSmrg }
1391debfc3dSmrg
1401debfc3dSmrg return true;
1411debfc3dSmrg }
1421debfc3dSmrg
1431debfc3dSmrg /* Return true if BB is an empty forwarder block to TO_BB. */
1441debfc3dSmrg
1451debfc3dSmrg static bool
forwarder_block_to(basic_block bb,basic_block to_bb)1461debfc3dSmrg forwarder_block_to (basic_block bb, basic_block to_bb)
1471debfc3dSmrg {
1481debfc3dSmrg return empty_block_p (bb)
1491debfc3dSmrg && single_succ_p (bb)
1501debfc3dSmrg && single_succ (bb) == to_bb;
1511debfc3dSmrg }
1521debfc3dSmrg
1531debfc3dSmrg /* Verify if all PHI node arguments in DEST for edges from BB1 or
1541debfc3dSmrg BB2 to DEST are the same. This makes the CFG merge point
1551debfc3dSmrg free from side-effects. Return true in this case, else false. */
1561debfc3dSmrg
1571debfc3dSmrg static bool
same_phi_args_p(basic_block bb1,basic_block bb2,basic_block dest)1581debfc3dSmrg same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
1591debfc3dSmrg {
1601debfc3dSmrg edge e1 = find_edge (bb1, dest);
1611debfc3dSmrg edge e2 = find_edge (bb2, dest);
1621debfc3dSmrg gphi_iterator gsi;
1631debfc3dSmrg gphi *phi;
1641debfc3dSmrg
1651debfc3dSmrg for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
1661debfc3dSmrg {
1671debfc3dSmrg phi = gsi.phi ();
1681debfc3dSmrg if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
1691debfc3dSmrg PHI_ARG_DEF_FROM_EDGE (phi, e2), 0))
1701debfc3dSmrg return false;
1711debfc3dSmrg }
1721debfc3dSmrg
1731debfc3dSmrg return true;
1741debfc3dSmrg }
1751debfc3dSmrg
1761debfc3dSmrg /* Return the best representative SSA name for CANDIDATE which is used
1771debfc3dSmrg in a bit test. */
1781debfc3dSmrg
1791debfc3dSmrg static tree
get_name_for_bit_test(tree candidate)1801debfc3dSmrg get_name_for_bit_test (tree candidate)
1811debfc3dSmrg {
1821debfc3dSmrg /* Skip single-use names in favor of using the name from a
1831debfc3dSmrg non-widening conversion definition. */
1841debfc3dSmrg if (TREE_CODE (candidate) == SSA_NAME
1851debfc3dSmrg && has_single_use (candidate))
1861debfc3dSmrg {
1871debfc3dSmrg gimple *def_stmt = SSA_NAME_DEF_STMT (candidate);
1881debfc3dSmrg if (is_gimple_assign (def_stmt)
1891debfc3dSmrg && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
1901debfc3dSmrg {
1911debfc3dSmrg if (TYPE_PRECISION (TREE_TYPE (candidate))
1921debfc3dSmrg <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
1931debfc3dSmrg return gimple_assign_rhs1 (def_stmt);
1941debfc3dSmrg }
1951debfc3dSmrg }
1961debfc3dSmrg
1971debfc3dSmrg return candidate;
1981debfc3dSmrg }
1991debfc3dSmrg
2001debfc3dSmrg /* Recognize a single bit test pattern in GIMPLE_COND and its defining
2011debfc3dSmrg statements. Store the name being tested in *NAME and the bit
2021debfc3dSmrg in *BIT. The GIMPLE_COND computes *NAME & (1 << *BIT).
2031debfc3dSmrg Returns true if the pattern matched, false otherwise. */
2041debfc3dSmrg
2051debfc3dSmrg static bool
recognize_single_bit_test(gcond * cond,tree * name,tree * bit,bool inv)2061debfc3dSmrg recognize_single_bit_test (gcond *cond, tree *name, tree *bit, bool inv)
2071debfc3dSmrg {
2081debfc3dSmrg gimple *stmt;
2091debfc3dSmrg
2101debfc3dSmrg /* Get at the definition of the result of the bit test. */
2111debfc3dSmrg if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
2121debfc3dSmrg || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
2131debfc3dSmrg || !integer_zerop (gimple_cond_rhs (cond)))
2141debfc3dSmrg return false;
2151debfc3dSmrg stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
2161debfc3dSmrg if (!is_gimple_assign (stmt))
2171debfc3dSmrg return false;
2181debfc3dSmrg
2191debfc3dSmrg /* Look at which bit is tested. One form to recognize is
2201debfc3dSmrg D.1985_5 = state_3(D) >> control1_4(D);
2211debfc3dSmrg D.1986_6 = (int) D.1985_5;
2221debfc3dSmrg D.1987_7 = op0 & 1;
2231debfc3dSmrg if (D.1987_7 != 0) */
2241debfc3dSmrg if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
2251debfc3dSmrg && integer_onep (gimple_assign_rhs2 (stmt))
2261debfc3dSmrg && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2271debfc3dSmrg {
2281debfc3dSmrg tree orig_name = gimple_assign_rhs1 (stmt);
2291debfc3dSmrg
2301debfc3dSmrg /* Look through copies and conversions to eventually
2311debfc3dSmrg find the stmt that computes the shift. */
2321debfc3dSmrg stmt = SSA_NAME_DEF_STMT (orig_name);
2331debfc3dSmrg
2341debfc3dSmrg while (is_gimple_assign (stmt)
2351debfc3dSmrg && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
2361debfc3dSmrg && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
2371debfc3dSmrg <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))
2381debfc3dSmrg && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2391debfc3dSmrg || gimple_assign_ssa_name_copy_p (stmt)))
2401debfc3dSmrg stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2411debfc3dSmrg
2421debfc3dSmrg /* If we found such, decompose it. */
2431debfc3dSmrg if (is_gimple_assign (stmt)
2441debfc3dSmrg && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR)
2451debfc3dSmrg {
2461debfc3dSmrg /* op0 & (1 << op1) */
2471debfc3dSmrg *bit = gimple_assign_rhs2 (stmt);
2481debfc3dSmrg *name = gimple_assign_rhs1 (stmt);
2491debfc3dSmrg }
2501debfc3dSmrg else
2511debfc3dSmrg {
2521debfc3dSmrg /* t & 1 */
2531debfc3dSmrg *bit = integer_zero_node;
2541debfc3dSmrg *name = get_name_for_bit_test (orig_name);
2551debfc3dSmrg }
2561debfc3dSmrg
2571debfc3dSmrg return true;
2581debfc3dSmrg }
2591debfc3dSmrg
2601debfc3dSmrg /* Another form is
2611debfc3dSmrg D.1987_7 = op0 & (1 << CST)
2621debfc3dSmrg if (D.1987_7 != 0) */
2631debfc3dSmrg if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
2641debfc3dSmrg && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2651debfc3dSmrg && integer_pow2p (gimple_assign_rhs2 (stmt)))
2661debfc3dSmrg {
2671debfc3dSmrg *name = gimple_assign_rhs1 (stmt);
2681debfc3dSmrg *bit = build_int_cst (integer_type_node,
2691debfc3dSmrg tree_log2 (gimple_assign_rhs2 (stmt)));
2701debfc3dSmrg return true;
2711debfc3dSmrg }
2721debfc3dSmrg
2731debfc3dSmrg /* Another form is
2741debfc3dSmrg D.1986_6 = 1 << control1_4(D)
2751debfc3dSmrg D.1987_7 = op0 & D.1986_6
2761debfc3dSmrg if (D.1987_7 != 0) */
2771debfc3dSmrg if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
2781debfc3dSmrg && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2791debfc3dSmrg && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
2801debfc3dSmrg {
2811debfc3dSmrg gimple *tmp;
2821debfc3dSmrg
2831debfc3dSmrg /* Both arguments of the BIT_AND_EXPR can be the single-bit
2841debfc3dSmrg specifying expression. */
2851debfc3dSmrg tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2861debfc3dSmrg if (is_gimple_assign (tmp)
2871debfc3dSmrg && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
2881debfc3dSmrg && integer_onep (gimple_assign_rhs1 (tmp)))
2891debfc3dSmrg {
2901debfc3dSmrg *name = gimple_assign_rhs2 (stmt);
2911debfc3dSmrg *bit = gimple_assign_rhs2 (tmp);
2921debfc3dSmrg return true;
2931debfc3dSmrg }
2941debfc3dSmrg
2951debfc3dSmrg tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
2961debfc3dSmrg if (is_gimple_assign (tmp)
2971debfc3dSmrg && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
2981debfc3dSmrg && integer_onep (gimple_assign_rhs1 (tmp)))
2991debfc3dSmrg {
3001debfc3dSmrg *name = gimple_assign_rhs1 (stmt);
3011debfc3dSmrg *bit = gimple_assign_rhs2 (tmp);
3021debfc3dSmrg return true;
3031debfc3dSmrg }
3041debfc3dSmrg }
3051debfc3dSmrg
3061debfc3dSmrg return false;
3071debfc3dSmrg }
3081debfc3dSmrg
3091debfc3dSmrg /* Recognize a bit test pattern in a GIMPLE_COND and its defining
3101debfc3dSmrg statements. Store the name being tested in *NAME and the bits
3111debfc3dSmrg in *BITS. The COND_EXPR computes *NAME & *BITS.
3121debfc3dSmrg Returns true if the pattern matched, false otherwise. */
3131debfc3dSmrg
3141debfc3dSmrg static bool
recognize_bits_test(gcond * cond,tree * name,tree * bits,bool inv)3151debfc3dSmrg recognize_bits_test (gcond *cond, tree *name, tree *bits, bool inv)
3161debfc3dSmrg {
3171debfc3dSmrg gimple *stmt;
3181debfc3dSmrg
3191debfc3dSmrg /* Get at the definition of the result of the bit test. */
3201debfc3dSmrg if (gimple_cond_code (cond) != (inv ? EQ_EXPR : NE_EXPR)
3211debfc3dSmrg || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
3221debfc3dSmrg || !integer_zerop (gimple_cond_rhs (cond)))
3231debfc3dSmrg return false;
3241debfc3dSmrg stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
3251debfc3dSmrg if (!is_gimple_assign (stmt)
3261debfc3dSmrg || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
3271debfc3dSmrg return false;
3281debfc3dSmrg
3291debfc3dSmrg *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt));
3301debfc3dSmrg *bits = gimple_assign_rhs2 (stmt);
3311debfc3dSmrg
3321debfc3dSmrg return true;
3331debfc3dSmrg }
3341debfc3dSmrg
3351debfc3dSmrg
3361debfc3dSmrg /* Update profile after code in outer_cond_bb was adjusted so
3371debfc3dSmrg outer_cond_bb has no condition. */
3381debfc3dSmrg
3391debfc3dSmrg static void
update_profile_after_ifcombine(basic_block inner_cond_bb,basic_block outer_cond_bb)3401debfc3dSmrg update_profile_after_ifcombine (basic_block inner_cond_bb,
3411debfc3dSmrg basic_block outer_cond_bb)
3421debfc3dSmrg {
3431debfc3dSmrg edge outer_to_inner = find_edge (outer_cond_bb, inner_cond_bb);
3441debfc3dSmrg edge outer2 = (EDGE_SUCC (outer_cond_bb, 0) == outer_to_inner
3451debfc3dSmrg ? EDGE_SUCC (outer_cond_bb, 1)
3461debfc3dSmrg : EDGE_SUCC (outer_cond_bb, 0));
3471debfc3dSmrg edge inner_taken = EDGE_SUCC (inner_cond_bb, 0);
3481debfc3dSmrg edge inner_not_taken = EDGE_SUCC (inner_cond_bb, 1);
3491debfc3dSmrg
3501debfc3dSmrg if (inner_taken->dest != outer2->dest)
3511debfc3dSmrg std::swap (inner_taken, inner_not_taken);
3521debfc3dSmrg gcc_assert (inner_taken->dest == outer2->dest);
3531debfc3dSmrg
3541debfc3dSmrg /* In the following we assume that inner_cond_bb has single predecessor. */
3551debfc3dSmrg gcc_assert (single_pred_p (inner_cond_bb));
3561debfc3dSmrg
3571debfc3dSmrg /* Path outer_cond_bb->(outer2) needs to be merged into path
3581debfc3dSmrg outer_cond_bb->(outer_to_inner)->inner_cond_bb->(inner_taken)
3591debfc3dSmrg and probability of inner_not_taken updated. */
3601debfc3dSmrg
3611debfc3dSmrg inner_cond_bb->count = outer_cond_bb->count;
3621debfc3dSmrg
363c0a68be4Smrg /* Handle special case where inner_taken probability is always. In this case
364c0a68be4Smrg we know that the overall outcome will be always as well, but combining
365c0a68be4Smrg probabilities will be conservative because it does not know that
366c0a68be4Smrg outer2->probability is inverse of outer_to_inner->probability. */
367c0a68be4Smrg if (inner_taken->probability == profile_probability::always ())
368c0a68be4Smrg ;
369c0a68be4Smrg else
370a2dc1f3fSmrg inner_taken->probability = outer2->probability + outer_to_inner->probability
371a2dc1f3fSmrg * inner_taken->probability;
372a2dc1f3fSmrg inner_not_taken->probability = profile_probability::always ()
3731debfc3dSmrg - inner_taken->probability;
3741debfc3dSmrg
375a2dc1f3fSmrg outer_to_inner->probability = profile_probability::always ();
376a2dc1f3fSmrg outer2->probability = profile_probability::never ();
3771debfc3dSmrg }
3781debfc3dSmrg
3791debfc3dSmrg /* If-convert on a and pattern with a common else block. The inner
3801debfc3dSmrg if is specified by its INNER_COND_BB, the outer by OUTER_COND_BB.
3811debfc3dSmrg inner_inv, outer_inv and result_inv indicate whether the conditions
3821debfc3dSmrg are inverted.
3831debfc3dSmrg Returns true if the edges to the common else basic-block were merged. */
3841debfc3dSmrg
3851debfc3dSmrg static bool
ifcombine_ifandif(basic_block inner_cond_bb,bool inner_inv,basic_block outer_cond_bb,bool outer_inv,bool result_inv)3861debfc3dSmrg ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
3871debfc3dSmrg basic_block outer_cond_bb, bool outer_inv, bool result_inv)
3881debfc3dSmrg {
3891debfc3dSmrg gimple_stmt_iterator gsi;
3901debfc3dSmrg gimple *inner_stmt, *outer_stmt;
3911debfc3dSmrg gcond *inner_cond, *outer_cond;
3921debfc3dSmrg tree name1, name2, bit1, bit2, bits1, bits2;
3931debfc3dSmrg
3941debfc3dSmrg inner_stmt = last_stmt (inner_cond_bb);
3951debfc3dSmrg if (!inner_stmt
3961debfc3dSmrg || gimple_code (inner_stmt) != GIMPLE_COND)
3971debfc3dSmrg return false;
3981debfc3dSmrg inner_cond = as_a <gcond *> (inner_stmt);
3991debfc3dSmrg
4001debfc3dSmrg outer_stmt = last_stmt (outer_cond_bb);
4011debfc3dSmrg if (!outer_stmt
4021debfc3dSmrg || gimple_code (outer_stmt) != GIMPLE_COND)
4031debfc3dSmrg return false;
4041debfc3dSmrg outer_cond = as_a <gcond *> (outer_stmt);
4051debfc3dSmrg
4061debfc3dSmrg /* See if we test a single bit of the same name in both tests. In
4071debfc3dSmrg that case remove the outer test, merging both else edges,
4081debfc3dSmrg and change the inner one to test for
4091debfc3dSmrg name & (bit1 | bit2) == (bit1 | bit2). */
4101debfc3dSmrg if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
4111debfc3dSmrg && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
4121debfc3dSmrg && name1 == name2)
4131debfc3dSmrg {
4141debfc3dSmrg tree t, t2;
4151debfc3dSmrg
4161debfc3dSmrg /* Do it. */
4171debfc3dSmrg gsi = gsi_for_stmt (inner_cond);
4181debfc3dSmrg t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
4191debfc3dSmrg build_int_cst (TREE_TYPE (name1), 1), bit1);
4201debfc3dSmrg t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
4211debfc3dSmrg build_int_cst (TREE_TYPE (name1), 1), bit2);
4221debfc3dSmrg t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
4231debfc3dSmrg t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4241debfc3dSmrg true, GSI_SAME_STMT);
4251debfc3dSmrg t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
4261debfc3dSmrg t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
4271debfc3dSmrg true, GSI_SAME_STMT);
4281debfc3dSmrg t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
4291debfc3dSmrg boolean_type_node, t2, t);
4301debfc3dSmrg t = canonicalize_cond_expr_cond (t);
4311debfc3dSmrg if (!t)
4321debfc3dSmrg return false;
4331debfc3dSmrg gimple_cond_set_condition_from_tree (inner_cond, t);
4341debfc3dSmrg update_stmt (inner_cond);
4351debfc3dSmrg
4361debfc3dSmrg /* Leave CFG optimization to cfg_cleanup. */
4371debfc3dSmrg gimple_cond_set_condition_from_tree (outer_cond,
4381debfc3dSmrg outer_inv ? boolean_false_node : boolean_true_node);
4391debfc3dSmrg update_stmt (outer_cond);
4401debfc3dSmrg
4411debfc3dSmrg update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
4421debfc3dSmrg
4431debfc3dSmrg if (dump_file)
4441debfc3dSmrg {
4451debfc3dSmrg fprintf (dump_file, "optimizing double bit test to ");
446a2dc1f3fSmrg print_generic_expr (dump_file, name1);
4471debfc3dSmrg fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
448a2dc1f3fSmrg print_generic_expr (dump_file, bit1);
4491debfc3dSmrg fprintf (dump_file, ") | (1 << ");
450a2dc1f3fSmrg print_generic_expr (dump_file, bit2);
4511debfc3dSmrg fprintf (dump_file, ")\n");
4521debfc3dSmrg }
4531debfc3dSmrg
4541debfc3dSmrg return true;
4551debfc3dSmrg }
4561debfc3dSmrg
4571debfc3dSmrg /* See if we have two bit tests of the same name in both tests.
4581debfc3dSmrg In that case remove the outer test and change the inner one to
4591debfc3dSmrg test for name & (bits1 | bits2) != 0. */
4601debfc3dSmrg else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
4611debfc3dSmrg && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
4621debfc3dSmrg {
4631debfc3dSmrg gimple_stmt_iterator gsi;
4641debfc3dSmrg tree t;
4651debfc3dSmrg
4661debfc3dSmrg /* Find the common name which is bit-tested. */
4671debfc3dSmrg if (name1 == name2)
4681debfc3dSmrg ;
4691debfc3dSmrg else if (bits1 == bits2)
4701debfc3dSmrg {
4711debfc3dSmrg std::swap (name2, bits2);
4721debfc3dSmrg std::swap (name1, bits1);
4731debfc3dSmrg }
4741debfc3dSmrg else if (name1 == bits2)
4751debfc3dSmrg std::swap (name2, bits2);
4761debfc3dSmrg else if (bits1 == name2)
4771debfc3dSmrg std::swap (name1, bits1);
4781debfc3dSmrg else
4791debfc3dSmrg return false;
4801debfc3dSmrg
4811debfc3dSmrg /* As we strip non-widening conversions in finding a common
4821debfc3dSmrg name that is tested make sure to end up with an integral
4831debfc3dSmrg type for building the bit operations. */
4841debfc3dSmrg if (TYPE_PRECISION (TREE_TYPE (bits1))
4851debfc3dSmrg >= TYPE_PRECISION (TREE_TYPE (bits2)))
4861debfc3dSmrg {
4871debfc3dSmrg bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
4881debfc3dSmrg name1 = fold_convert (TREE_TYPE (bits1), name1);
4891debfc3dSmrg bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
4901debfc3dSmrg bits2 = fold_convert (TREE_TYPE (bits1), bits2);
4911debfc3dSmrg }
4921debfc3dSmrg else
4931debfc3dSmrg {
4941debfc3dSmrg bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
4951debfc3dSmrg name1 = fold_convert (TREE_TYPE (bits2), name1);
4961debfc3dSmrg bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
4971debfc3dSmrg bits1 = fold_convert (TREE_TYPE (bits2), bits1);
4981debfc3dSmrg }
4991debfc3dSmrg
5001debfc3dSmrg /* Do it. */
5011debfc3dSmrg gsi = gsi_for_stmt (inner_cond);
5021debfc3dSmrg t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
5031debfc3dSmrg t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5041debfc3dSmrg true, GSI_SAME_STMT);
5051debfc3dSmrg t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
5061debfc3dSmrg t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5071debfc3dSmrg true, GSI_SAME_STMT);
5081debfc3dSmrg t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
5091debfc3dSmrg build_int_cst (TREE_TYPE (t), 0));
5101debfc3dSmrg t = canonicalize_cond_expr_cond (t);
5111debfc3dSmrg if (!t)
5121debfc3dSmrg return false;
5131debfc3dSmrg gimple_cond_set_condition_from_tree (inner_cond, t);
5141debfc3dSmrg update_stmt (inner_cond);
5151debfc3dSmrg
5161debfc3dSmrg /* Leave CFG optimization to cfg_cleanup. */
5171debfc3dSmrg gimple_cond_set_condition_from_tree (outer_cond,
5181debfc3dSmrg outer_inv ? boolean_false_node : boolean_true_node);
5191debfc3dSmrg update_stmt (outer_cond);
5201debfc3dSmrg update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
5211debfc3dSmrg
5221debfc3dSmrg if (dump_file)
5231debfc3dSmrg {
5241debfc3dSmrg fprintf (dump_file, "optimizing bits or bits test to ");
525a2dc1f3fSmrg print_generic_expr (dump_file, name1);
5261debfc3dSmrg fprintf (dump_file, " & T != 0\nwith temporary T = ");
527a2dc1f3fSmrg print_generic_expr (dump_file, bits1);
5281debfc3dSmrg fprintf (dump_file, " | ");
529a2dc1f3fSmrg print_generic_expr (dump_file, bits2);
5301debfc3dSmrg fprintf (dump_file, "\n");
5311debfc3dSmrg }
5321debfc3dSmrg
5331debfc3dSmrg return true;
5341debfc3dSmrg }
5351debfc3dSmrg
5361debfc3dSmrg /* See if we have two comparisons that we can merge into one. */
5371debfc3dSmrg else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
5381debfc3dSmrg && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
5391debfc3dSmrg {
5401debfc3dSmrg tree t;
5411debfc3dSmrg enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
5421debfc3dSmrg enum tree_code outer_cond_code = gimple_cond_code (outer_cond);
5431debfc3dSmrg
5441debfc3dSmrg /* Invert comparisons if necessary (and possible). */
5451debfc3dSmrg if (inner_inv)
5461debfc3dSmrg inner_cond_code = invert_tree_comparison (inner_cond_code,
5471debfc3dSmrg HONOR_NANS (gimple_cond_lhs (inner_cond)));
5481debfc3dSmrg if (inner_cond_code == ERROR_MARK)
5491debfc3dSmrg return false;
5501debfc3dSmrg if (outer_inv)
5511debfc3dSmrg outer_cond_code = invert_tree_comparison (outer_cond_code,
5521debfc3dSmrg HONOR_NANS (gimple_cond_lhs (outer_cond)));
5531debfc3dSmrg if (outer_cond_code == ERROR_MARK)
5541debfc3dSmrg return false;
5551debfc3dSmrg /* Don't return false so fast, try maybe_fold_or_comparisons? */
5561debfc3dSmrg
557*8feb0f0bSmrg if (!(t = maybe_fold_and_comparisons (boolean_type_node, inner_cond_code,
5581debfc3dSmrg gimple_cond_lhs (inner_cond),
5591debfc3dSmrg gimple_cond_rhs (inner_cond),
5601debfc3dSmrg outer_cond_code,
5611debfc3dSmrg gimple_cond_lhs (outer_cond),
5621debfc3dSmrg gimple_cond_rhs (outer_cond))))
5631debfc3dSmrg {
5641debfc3dSmrg tree t1, t2;
5651debfc3dSmrg gimple_stmt_iterator gsi;
566a2dc1f3fSmrg bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
567*8feb0f0bSmrg if (param_logical_op_non_short_circuit != -1)
568a2dc1f3fSmrg logical_op_non_short_circuit
569*8feb0f0bSmrg = param_logical_op_non_short_circuit;
570a2dc1f3fSmrg if (!logical_op_non_short_circuit || flag_sanitize_coverage)
5711debfc3dSmrg return false;
5721debfc3dSmrg /* Only do this optimization if the inner bb contains only the conditional. */
5731debfc3dSmrg if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
5741debfc3dSmrg return false;
5751debfc3dSmrg t1 = fold_build2_loc (gimple_location (inner_cond),
5761debfc3dSmrg inner_cond_code,
5771debfc3dSmrg boolean_type_node,
5781debfc3dSmrg gimple_cond_lhs (inner_cond),
5791debfc3dSmrg gimple_cond_rhs (inner_cond));
5801debfc3dSmrg t2 = fold_build2_loc (gimple_location (outer_cond),
5811debfc3dSmrg outer_cond_code,
5821debfc3dSmrg boolean_type_node,
5831debfc3dSmrg gimple_cond_lhs (outer_cond),
5841debfc3dSmrg gimple_cond_rhs (outer_cond));
5851debfc3dSmrg t = fold_build2_loc (gimple_location (inner_cond),
5861debfc3dSmrg TRUTH_AND_EXPR, boolean_type_node, t1, t2);
5871debfc3dSmrg if (result_inv)
5881debfc3dSmrg {
5891debfc3dSmrg t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
5901debfc3dSmrg result_inv = false;
5911debfc3dSmrg }
5921debfc3dSmrg gsi = gsi_for_stmt (inner_cond);
5931debfc3dSmrg t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
5941debfc3dSmrg GSI_SAME_STMT);
5951debfc3dSmrg }
5961debfc3dSmrg if (result_inv)
5971debfc3dSmrg t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
5981debfc3dSmrg t = canonicalize_cond_expr_cond (t);
5991debfc3dSmrg if (!t)
6001debfc3dSmrg return false;
601*8feb0f0bSmrg if (!is_gimple_condexpr_for_cond (t))
602*8feb0f0bSmrg {
603*8feb0f0bSmrg gsi = gsi_for_stmt (inner_cond);
604*8feb0f0bSmrg t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
605*8feb0f0bSmrg NULL, true, GSI_SAME_STMT);
606*8feb0f0bSmrg }
6071debfc3dSmrg gimple_cond_set_condition_from_tree (inner_cond, t);
6081debfc3dSmrg update_stmt (inner_cond);
6091debfc3dSmrg
6101debfc3dSmrg /* Leave CFG optimization to cfg_cleanup. */
6111debfc3dSmrg gimple_cond_set_condition_from_tree (outer_cond,
6121debfc3dSmrg outer_inv ? boolean_false_node : boolean_true_node);
6131debfc3dSmrg update_stmt (outer_cond);
6141debfc3dSmrg update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
6151debfc3dSmrg
6161debfc3dSmrg if (dump_file)
6171debfc3dSmrg {
6181debfc3dSmrg fprintf (dump_file, "optimizing two comparisons to ");
619a2dc1f3fSmrg print_generic_expr (dump_file, t);
6201debfc3dSmrg fprintf (dump_file, "\n");
6211debfc3dSmrg }
6221debfc3dSmrg
6231debfc3dSmrg return true;
6241debfc3dSmrg }
6251debfc3dSmrg
6261debfc3dSmrg return false;
6271debfc3dSmrg }
6281debfc3dSmrg
6291debfc3dSmrg /* Helper function for tree_ssa_ifcombine_bb. Recognize a CFG pattern and
6301debfc3dSmrg dispatch to the appropriate if-conversion helper for a particular
6311debfc3dSmrg set of INNER_COND_BB, OUTER_COND_BB, THEN_BB and ELSE_BB.
6321debfc3dSmrg PHI_PRED_BB should be one of INNER_COND_BB, THEN_BB or ELSE_BB. */
6331debfc3dSmrg
6341debfc3dSmrg static bool
tree_ssa_ifcombine_bb_1(basic_block inner_cond_bb,basic_block outer_cond_bb,basic_block then_bb,basic_block else_bb,basic_block phi_pred_bb)6351debfc3dSmrg tree_ssa_ifcombine_bb_1 (basic_block inner_cond_bb, basic_block outer_cond_bb,
6361debfc3dSmrg basic_block then_bb, basic_block else_bb,
6371debfc3dSmrg basic_block phi_pred_bb)
6381debfc3dSmrg {
6391debfc3dSmrg /* The && form is characterized by a common else_bb with
6401debfc3dSmrg the two edges leading to it mergable. The latter is
6411debfc3dSmrg guaranteed by matching PHI arguments in the else_bb and
6421debfc3dSmrg the inner cond_bb having no side-effects. */
6431debfc3dSmrg if (phi_pred_bb != else_bb
6441debfc3dSmrg && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &else_bb)
6451debfc3dSmrg && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
6461debfc3dSmrg {
6471debfc3dSmrg /* We have
6481debfc3dSmrg <outer_cond_bb>
6491debfc3dSmrg if (q) goto inner_cond_bb; else goto else_bb;
6501debfc3dSmrg <inner_cond_bb>
6511debfc3dSmrg if (p) goto ...; else goto else_bb;
6521debfc3dSmrg ...
6531debfc3dSmrg <else_bb>
6541debfc3dSmrg ...
6551debfc3dSmrg */
6561debfc3dSmrg return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, false,
6571debfc3dSmrg false);
6581debfc3dSmrg }
6591debfc3dSmrg
6601debfc3dSmrg /* And a version where the outer condition is negated. */
6611debfc3dSmrg if (phi_pred_bb != else_bb
6621debfc3dSmrg && recognize_if_then_else (outer_cond_bb, &else_bb, &inner_cond_bb)
6631debfc3dSmrg && same_phi_args_p (outer_cond_bb, phi_pred_bb, else_bb))
6641debfc3dSmrg {
6651debfc3dSmrg /* We have
6661debfc3dSmrg <outer_cond_bb>
6671debfc3dSmrg if (q) goto else_bb; else goto inner_cond_bb;
6681debfc3dSmrg <inner_cond_bb>
6691debfc3dSmrg if (p) goto ...; else goto else_bb;
6701debfc3dSmrg ...
6711debfc3dSmrg <else_bb>
6721debfc3dSmrg ...
6731debfc3dSmrg */
6741debfc3dSmrg return ifcombine_ifandif (inner_cond_bb, false, outer_cond_bb, true,
6751debfc3dSmrg false);
6761debfc3dSmrg }
6771debfc3dSmrg
6781debfc3dSmrg /* The || form is characterized by a common then_bb with the
6791debfc3dSmrg two edges leading to it mergable. The latter is guaranteed
6801debfc3dSmrg by matching PHI arguments in the then_bb and the inner cond_bb
6811debfc3dSmrg having no side-effects. */
6821debfc3dSmrg if (phi_pred_bb != then_bb
6831debfc3dSmrg && recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
6841debfc3dSmrg && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
6851debfc3dSmrg {
6861debfc3dSmrg /* We have
6871debfc3dSmrg <outer_cond_bb>
6881debfc3dSmrg if (q) goto then_bb; else goto inner_cond_bb;
6891debfc3dSmrg <inner_cond_bb>
6901debfc3dSmrg if (q) goto then_bb; else goto ...;
6911debfc3dSmrg <then_bb>
6921debfc3dSmrg ...
6931debfc3dSmrg */
6941debfc3dSmrg return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, true,
6951debfc3dSmrg true);
6961debfc3dSmrg }
6971debfc3dSmrg
6981debfc3dSmrg /* And a version where the outer condition is negated. */
6991debfc3dSmrg if (phi_pred_bb != then_bb
7001debfc3dSmrg && recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &then_bb)
7011debfc3dSmrg && same_phi_args_p (outer_cond_bb, phi_pred_bb, then_bb))
7021debfc3dSmrg {
7031debfc3dSmrg /* We have
7041debfc3dSmrg <outer_cond_bb>
7051debfc3dSmrg if (q) goto inner_cond_bb; else goto then_bb;
7061debfc3dSmrg <inner_cond_bb>
7071debfc3dSmrg if (q) goto then_bb; else goto ...;
7081debfc3dSmrg <then_bb>
7091debfc3dSmrg ...
7101debfc3dSmrg */
7111debfc3dSmrg return ifcombine_ifandif (inner_cond_bb, true, outer_cond_bb, false,
7121debfc3dSmrg true);
7131debfc3dSmrg }
7141debfc3dSmrg
7151debfc3dSmrg return false;
7161debfc3dSmrg }
7171debfc3dSmrg
7181debfc3dSmrg /* Recognize a CFG pattern and dispatch to the appropriate
7191debfc3dSmrg if-conversion helper. We start with BB as the innermost
7201debfc3dSmrg worker basic-block. Returns true if a transformation was done. */
7211debfc3dSmrg
7221debfc3dSmrg static bool
tree_ssa_ifcombine_bb(basic_block inner_cond_bb)7231debfc3dSmrg tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
7241debfc3dSmrg {
7251debfc3dSmrg basic_block then_bb = NULL, else_bb = NULL;
7261debfc3dSmrg
7271debfc3dSmrg if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
7281debfc3dSmrg return false;
7291debfc3dSmrg
7301debfc3dSmrg /* Recognize && and || of two conditions with a common
7311debfc3dSmrg then/else block which entry edges we can merge. That is:
7321debfc3dSmrg if (a || b)
7331debfc3dSmrg ;
7341debfc3dSmrg and
7351debfc3dSmrg if (a && b)
7361debfc3dSmrg ;
7371debfc3dSmrg This requires a single predecessor of the inner cond_bb. */
7381debfc3dSmrg if (single_pred_p (inner_cond_bb)
7391debfc3dSmrg && bb_no_side_effects_p (inner_cond_bb))
7401debfc3dSmrg {
7411debfc3dSmrg basic_block outer_cond_bb = single_pred (inner_cond_bb);
7421debfc3dSmrg
7431debfc3dSmrg if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
7441debfc3dSmrg then_bb, else_bb, inner_cond_bb))
7451debfc3dSmrg return true;
7461debfc3dSmrg
7471debfc3dSmrg if (forwarder_block_to (else_bb, then_bb))
7481debfc3dSmrg {
7491debfc3dSmrg /* Other possibilities for the && form, if else_bb is
7501debfc3dSmrg empty forwarder block to then_bb. Compared to the above simpler
7511debfc3dSmrg forms this can be treated as if then_bb and else_bb were swapped,
7521debfc3dSmrg and the corresponding inner_cond_bb not inverted because of that.
7531debfc3dSmrg For same_phi_args_p we look at equality of arguments between
7541debfc3dSmrg edge from outer_cond_bb and the forwarder block. */
7551debfc3dSmrg if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
7561debfc3dSmrg then_bb, else_bb))
7571debfc3dSmrg return true;
7581debfc3dSmrg }
7591debfc3dSmrg else if (forwarder_block_to (then_bb, else_bb))
7601debfc3dSmrg {
7611debfc3dSmrg /* Other possibilities for the || form, if then_bb is
7621debfc3dSmrg empty forwarder block to else_bb. Compared to the above simpler
7631debfc3dSmrg forms this can be treated as if then_bb and else_bb were swapped,
7641debfc3dSmrg and the corresponding inner_cond_bb not inverted because of that.
7651debfc3dSmrg For same_phi_args_p we look at equality of arguments between
7661debfc3dSmrg edge from outer_cond_bb and the forwarder block. */
7671debfc3dSmrg if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
7681debfc3dSmrg then_bb, then_bb))
7691debfc3dSmrg return true;
7701debfc3dSmrg }
7711debfc3dSmrg }
7721debfc3dSmrg
7731debfc3dSmrg return false;
7741debfc3dSmrg }
7751debfc3dSmrg
7761debfc3dSmrg /* Main entry for the tree if-conversion pass. */
7771debfc3dSmrg
7781debfc3dSmrg namespace {
7791debfc3dSmrg
7801debfc3dSmrg const pass_data pass_data_tree_ifcombine =
7811debfc3dSmrg {
7821debfc3dSmrg GIMPLE_PASS, /* type */
7831debfc3dSmrg "ifcombine", /* name */
7841debfc3dSmrg OPTGROUP_NONE, /* optinfo_flags */
7851debfc3dSmrg TV_TREE_IFCOMBINE, /* tv_id */
7861debfc3dSmrg ( PROP_cfg | PROP_ssa ), /* properties_required */
7871debfc3dSmrg 0, /* properties_provided */
7881debfc3dSmrg 0, /* properties_destroyed */
7891debfc3dSmrg 0, /* todo_flags_start */
7901debfc3dSmrg TODO_update_ssa, /* todo_flags_finish */
7911debfc3dSmrg };
7921debfc3dSmrg
7931debfc3dSmrg class pass_tree_ifcombine : public gimple_opt_pass
7941debfc3dSmrg {
7951debfc3dSmrg public:
pass_tree_ifcombine(gcc::context * ctxt)7961debfc3dSmrg pass_tree_ifcombine (gcc::context *ctxt)
7971debfc3dSmrg : gimple_opt_pass (pass_data_tree_ifcombine, ctxt)
7981debfc3dSmrg {}
7991debfc3dSmrg
8001debfc3dSmrg /* opt_pass methods: */
8011debfc3dSmrg virtual unsigned int execute (function *);
8021debfc3dSmrg
8031debfc3dSmrg }; // class pass_tree_ifcombine
8041debfc3dSmrg
8051debfc3dSmrg unsigned int
execute(function * fun)8061debfc3dSmrg pass_tree_ifcombine::execute (function *fun)
8071debfc3dSmrg {
8081debfc3dSmrg basic_block *bbs;
8091debfc3dSmrg bool cfg_changed = false;
8101debfc3dSmrg int i;
8111debfc3dSmrg
8121debfc3dSmrg bbs = single_pred_before_succ_order ();
8131debfc3dSmrg calculate_dominance_info (CDI_DOMINATORS);
8141debfc3dSmrg
8151debfc3dSmrg /* Search every basic block for COND_EXPR we may be able to optimize.
8161debfc3dSmrg
8171debfc3dSmrg We walk the blocks in order that guarantees that a block with
8181debfc3dSmrg a single predecessor is processed after the predecessor.
8191debfc3dSmrg This ensures that we collapse outter ifs before visiting the
8201debfc3dSmrg inner ones, and also that we do not try to visit a removed
8211debfc3dSmrg block. This is opposite of PHI-OPT, because we cascade the
8221debfc3dSmrg combining rather than cascading PHIs. */
8231debfc3dSmrg for (i = n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
8241debfc3dSmrg {
8251debfc3dSmrg basic_block bb = bbs[i];
8261debfc3dSmrg gimple *stmt = last_stmt (bb);
8271debfc3dSmrg
8281debfc3dSmrg if (stmt
8291debfc3dSmrg && gimple_code (stmt) == GIMPLE_COND)
8301debfc3dSmrg if (tree_ssa_ifcombine_bb (bb))
8311debfc3dSmrg {
8321debfc3dSmrg /* Clear range info from all stmts in BB which is now executed
8331debfc3dSmrg conditional on a always true/false condition. */
8341debfc3dSmrg reset_flow_sensitive_info_in_bb (bb);
8351debfc3dSmrg cfg_changed |= true;
8361debfc3dSmrg }
8371debfc3dSmrg }
8381debfc3dSmrg
8391debfc3dSmrg free (bbs);
8401debfc3dSmrg
8411debfc3dSmrg return cfg_changed ? TODO_cleanup_cfg : 0;
8421debfc3dSmrg }
8431debfc3dSmrg
8441debfc3dSmrg } // anon namespace
8451debfc3dSmrg
8461debfc3dSmrg gimple_opt_pass *
make_pass_tree_ifcombine(gcc::context * ctxt)8471debfc3dSmrg make_pass_tree_ifcombine (gcc::context *ctxt)
8481debfc3dSmrg {
8491debfc3dSmrg return new pass_tree_ifcombine (ctxt);
8501debfc3dSmrg }
851