1*38fd1498Szrj /* Optimization of PHI nodes by converting them into straightline code.
2*38fd1498Szrj Copyright (C) 2004-2018 Free Software Foundation, Inc.
3*38fd1498Szrj
4*38fd1498Szrj This file is part of GCC.
5*38fd1498Szrj
6*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it
7*38fd1498Szrj under the terms of the GNU General Public License as published by the
8*38fd1498Szrj Free Software Foundation; either version 3, or (at your option) any
9*38fd1498Szrj later version.
10*38fd1498Szrj
11*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT
12*38fd1498Szrj ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14*38fd1498Szrj for more details.
15*38fd1498Szrj
16*38fd1498Szrj You should have received a copy of the GNU General Public License
17*38fd1498Szrj along with GCC; see the file COPYING3. If not see
18*38fd1498Szrj <http://www.gnu.org/licenses/>. */
19*38fd1498Szrj
20*38fd1498Szrj #include "config.h"
21*38fd1498Szrj #include "system.h"
22*38fd1498Szrj #include "coretypes.h"
23*38fd1498Szrj #include "backend.h"
24*38fd1498Szrj #include "insn-codes.h"
25*38fd1498Szrj #include "rtl.h"
26*38fd1498Szrj #include "tree.h"
27*38fd1498Szrj #include "gimple.h"
28*38fd1498Szrj #include "cfghooks.h"
29*38fd1498Szrj #include "tree-pass.h"
30*38fd1498Szrj #include "ssa.h"
31*38fd1498Szrj #include "optabs-tree.h"
32*38fd1498Szrj #include "insn-config.h"
33*38fd1498Szrj #include "gimple-pretty-print.h"
34*38fd1498Szrj #include "fold-const.h"
35*38fd1498Szrj #include "stor-layout.h"
36*38fd1498Szrj #include "cfganal.h"
37*38fd1498Szrj #include "gimplify.h"
38*38fd1498Szrj #include "gimple-iterator.h"
39*38fd1498Szrj #include "gimplify-me.h"
40*38fd1498Szrj #include "tree-cfg.h"
41*38fd1498Szrj #include "tree-dfa.h"
42*38fd1498Szrj #include "domwalk.h"
43*38fd1498Szrj #include "cfgloop.h"
44*38fd1498Szrj #include "tree-data-ref.h"
45*38fd1498Szrj #include "tree-scalar-evolution.h"
46*38fd1498Szrj #include "tree-inline.h"
47*38fd1498Szrj #include "params.h"
48*38fd1498Szrj
49*38fd1498Szrj static unsigned int tree_ssa_phiopt_worker (bool, bool);
50*38fd1498Szrj static bool conditional_replacement (basic_block, basic_block,
51*38fd1498Szrj edge, edge, gphi *, tree, tree);
52*38fd1498Szrj static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
53*38fd1498Szrj gimple *);
54*38fd1498Szrj static int value_replacement (basic_block, basic_block,
55*38fd1498Szrj edge, edge, gimple *, tree, tree);
56*38fd1498Szrj static bool minmax_replacement (basic_block, basic_block,
57*38fd1498Szrj edge, edge, gimple *, tree, tree);
58*38fd1498Szrj static bool abs_replacement (basic_block, basic_block,
59*38fd1498Szrj edge, edge, gimple *, tree, tree);
60*38fd1498Szrj static bool cond_store_replacement (basic_block, basic_block, edge, edge,
61*38fd1498Szrj hash_set<tree> *);
62*38fd1498Szrj static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
63*38fd1498Szrj static hash_set<tree> * get_non_trapping ();
64*38fd1498Szrj static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
65*38fd1498Szrj static void hoist_adjacent_loads (basic_block, basic_block,
66*38fd1498Szrj basic_block, basic_block);
67*38fd1498Szrj static bool gate_hoist_loads (void);
68*38fd1498Szrj
69*38fd1498Szrj /* This pass tries to transform conditional stores into unconditional
70*38fd1498Szrj ones, enabling further simplifications with the simpler then and else
71*38fd1498Szrj blocks. In particular it replaces this:
72*38fd1498Szrj
73*38fd1498Szrj bb0:
74*38fd1498Szrj if (cond) goto bb2; else goto bb1;
75*38fd1498Szrj bb1:
76*38fd1498Szrj *p = RHS;
77*38fd1498Szrj bb2:
78*38fd1498Szrj
79*38fd1498Szrj with
80*38fd1498Szrj
81*38fd1498Szrj bb0:
82*38fd1498Szrj if (cond) goto bb1; else goto bb2;
83*38fd1498Szrj bb1:
84*38fd1498Szrj condtmp' = *p;
85*38fd1498Szrj bb2:
86*38fd1498Szrj condtmp = PHI <RHS, condtmp'>
87*38fd1498Szrj *p = condtmp;
88*38fd1498Szrj
89*38fd1498Szrj This transformation can only be done under several constraints,
90*38fd1498Szrj documented below. It also replaces:
91*38fd1498Szrj
92*38fd1498Szrj bb0:
93*38fd1498Szrj if (cond) goto bb2; else goto bb1;
94*38fd1498Szrj bb1:
95*38fd1498Szrj *p = RHS1;
96*38fd1498Szrj goto bb3;
97*38fd1498Szrj bb2:
98*38fd1498Szrj *p = RHS2;
99*38fd1498Szrj bb3:
100*38fd1498Szrj
101*38fd1498Szrj with
102*38fd1498Szrj
103*38fd1498Szrj bb0:
104*38fd1498Szrj if (cond) goto bb3; else goto bb1;
105*38fd1498Szrj bb1:
106*38fd1498Szrj bb3:
107*38fd1498Szrj condtmp = PHI <RHS1, RHS2>
108*38fd1498Szrj *p = condtmp; */
109*38fd1498Szrj
110*38fd1498Szrj static unsigned int
tree_ssa_cs_elim(void)111*38fd1498Szrj tree_ssa_cs_elim (void)
112*38fd1498Szrj {
113*38fd1498Szrj unsigned todo;
114*38fd1498Szrj /* ??? We are not interested in loop related info, but the following
115*38fd1498Szrj will create it, ICEing as we didn't init loops with pre-headers.
116*38fd1498Szrj An interfacing issue of find_data_references_in_bb. */
117*38fd1498Szrj loop_optimizer_init (LOOPS_NORMAL);
118*38fd1498Szrj scev_initialize ();
119*38fd1498Szrj todo = tree_ssa_phiopt_worker (true, false);
120*38fd1498Szrj scev_finalize ();
121*38fd1498Szrj loop_optimizer_finalize ();
122*38fd1498Szrj return todo;
123*38fd1498Szrj }
124*38fd1498Szrj
125*38fd1498Szrj /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
126*38fd1498Szrj
127*38fd1498Szrj static gphi *
single_non_singleton_phi_for_edges(gimple_seq seq,edge e0,edge e1)128*38fd1498Szrj single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
129*38fd1498Szrj {
130*38fd1498Szrj gimple_stmt_iterator i;
131*38fd1498Szrj gphi *phi = NULL;
132*38fd1498Szrj if (gimple_seq_singleton_p (seq))
133*38fd1498Szrj return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
134*38fd1498Szrj for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
135*38fd1498Szrj {
136*38fd1498Szrj gphi *p = as_a <gphi *> (gsi_stmt (i));
137*38fd1498Szrj /* If the PHI arguments are equal then we can skip this PHI. */
138*38fd1498Szrj if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
139*38fd1498Szrj gimple_phi_arg_def (p, e1->dest_idx)))
140*38fd1498Szrj continue;
141*38fd1498Szrj
142*38fd1498Szrj /* If we already have a PHI that has the two edge arguments are
143*38fd1498Szrj different, then return it is not a singleton for these PHIs. */
144*38fd1498Szrj if (phi)
145*38fd1498Szrj return NULL;
146*38fd1498Szrj
147*38fd1498Szrj phi = p;
148*38fd1498Szrj }
149*38fd1498Szrj return phi;
150*38fd1498Szrj }
151*38fd1498Szrj
152*38fd1498Szrj /* The core routine of conditional store replacement and normal
153*38fd1498Szrj phi optimizations. Both share much of the infrastructure in how
154*38fd1498Szrj to match applicable basic block patterns. DO_STORE_ELIM is true
155*38fd1498Szrj when we want to do conditional store replacement, false otherwise.
156*38fd1498Szrj DO_HOIST_LOADS is true when we want to hoist adjacent loads out
157*38fd1498Szrj of diamond control flow patterns, false otherwise. */
158*38fd1498Szrj static unsigned int
tree_ssa_phiopt_worker(bool do_store_elim,bool do_hoist_loads)159*38fd1498Szrj tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
160*38fd1498Szrj {
161*38fd1498Szrj basic_block bb;
162*38fd1498Szrj basic_block *bb_order;
163*38fd1498Szrj unsigned n, i;
164*38fd1498Szrj bool cfgchanged = false;
165*38fd1498Szrj hash_set<tree> *nontrap = 0;
166*38fd1498Szrj
167*38fd1498Szrj if (do_store_elim)
168*38fd1498Szrj /* Calculate the set of non-trapping memory accesses. */
169*38fd1498Szrj nontrap = get_non_trapping ();
170*38fd1498Szrj
171*38fd1498Szrj /* Search every basic block for COND_EXPR we may be able to optimize.
172*38fd1498Szrj
173*38fd1498Szrj We walk the blocks in order that guarantees that a block with
174*38fd1498Szrj a single predecessor is processed before the predecessor.
175*38fd1498Szrj This ensures that we collapse inner ifs before visiting the
176*38fd1498Szrj outer ones, and also that we do not try to visit a removed
177*38fd1498Szrj block. */
178*38fd1498Szrj bb_order = single_pred_before_succ_order ();
179*38fd1498Szrj n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
180*38fd1498Szrj
181*38fd1498Szrj for (i = 0; i < n; i++)
182*38fd1498Szrj {
183*38fd1498Szrj gimple *cond_stmt;
184*38fd1498Szrj gphi *phi;
185*38fd1498Szrj basic_block bb1, bb2;
186*38fd1498Szrj edge e1, e2;
187*38fd1498Szrj tree arg0, arg1;
188*38fd1498Szrj
189*38fd1498Szrj bb = bb_order[i];
190*38fd1498Szrj
191*38fd1498Szrj cond_stmt = last_stmt (bb);
192*38fd1498Szrj /* Check to see if the last statement is a GIMPLE_COND. */
193*38fd1498Szrj if (!cond_stmt
194*38fd1498Szrj || gimple_code (cond_stmt) != GIMPLE_COND)
195*38fd1498Szrj continue;
196*38fd1498Szrj
197*38fd1498Szrj e1 = EDGE_SUCC (bb, 0);
198*38fd1498Szrj bb1 = e1->dest;
199*38fd1498Szrj e2 = EDGE_SUCC (bb, 1);
200*38fd1498Szrj bb2 = e2->dest;
201*38fd1498Szrj
202*38fd1498Szrj /* We cannot do the optimization on abnormal edges. */
203*38fd1498Szrj if ((e1->flags & EDGE_ABNORMAL) != 0
204*38fd1498Szrj || (e2->flags & EDGE_ABNORMAL) != 0)
205*38fd1498Szrj continue;
206*38fd1498Szrj
207*38fd1498Szrj /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
208*38fd1498Szrj if (EDGE_COUNT (bb1->succs) == 0
209*38fd1498Szrj || bb2 == NULL
210*38fd1498Szrj || EDGE_COUNT (bb2->succs) == 0)
211*38fd1498Szrj continue;
212*38fd1498Szrj
213*38fd1498Szrj /* Find the bb which is the fall through to the other. */
214*38fd1498Szrj if (EDGE_SUCC (bb1, 0)->dest == bb2)
215*38fd1498Szrj ;
216*38fd1498Szrj else if (EDGE_SUCC (bb2, 0)->dest == bb1)
217*38fd1498Szrj {
218*38fd1498Szrj std::swap (bb1, bb2);
219*38fd1498Szrj std::swap (e1, e2);
220*38fd1498Szrj }
221*38fd1498Szrj else if (do_store_elim
222*38fd1498Szrj && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
223*38fd1498Szrj {
224*38fd1498Szrj basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
225*38fd1498Szrj
226*38fd1498Szrj if (!single_succ_p (bb1)
227*38fd1498Szrj || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
228*38fd1498Szrj || !single_succ_p (bb2)
229*38fd1498Szrj || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
230*38fd1498Szrj || EDGE_COUNT (bb3->preds) != 2)
231*38fd1498Szrj continue;
232*38fd1498Szrj if (cond_if_else_store_replacement (bb1, bb2, bb3))
233*38fd1498Szrj cfgchanged = true;
234*38fd1498Szrj continue;
235*38fd1498Szrj }
236*38fd1498Szrj else if (do_hoist_loads
237*38fd1498Szrj && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238*38fd1498Szrj {
239*38fd1498Szrj basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240*38fd1498Szrj
241*38fd1498Szrj if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
242*38fd1498Szrj && single_succ_p (bb1)
243*38fd1498Szrj && single_succ_p (bb2)
244*38fd1498Szrj && single_pred_p (bb1)
245*38fd1498Szrj && single_pred_p (bb2)
246*38fd1498Szrj && EDGE_COUNT (bb->succs) == 2
247*38fd1498Szrj && EDGE_COUNT (bb3->preds) == 2
248*38fd1498Szrj /* If one edge or the other is dominant, a conditional move
249*38fd1498Szrj is likely to perform worse than the well-predicted branch. */
250*38fd1498Szrj && !predictable_edge_p (EDGE_SUCC (bb, 0))
251*38fd1498Szrj && !predictable_edge_p (EDGE_SUCC (bb, 1)))
252*38fd1498Szrj hoist_adjacent_loads (bb, bb1, bb2, bb3);
253*38fd1498Szrj continue;
254*38fd1498Szrj }
255*38fd1498Szrj else
256*38fd1498Szrj continue;
257*38fd1498Szrj
258*38fd1498Szrj e1 = EDGE_SUCC (bb1, 0);
259*38fd1498Szrj
260*38fd1498Szrj /* Make sure that bb1 is just a fall through. */
261*38fd1498Szrj if (!single_succ_p (bb1)
262*38fd1498Szrj || (e1->flags & EDGE_FALLTHRU) == 0)
263*38fd1498Szrj continue;
264*38fd1498Szrj
265*38fd1498Szrj /* Also make sure that bb1 only have one predecessor and that it
266*38fd1498Szrj is bb. */
267*38fd1498Szrj if (!single_pred_p (bb1)
268*38fd1498Szrj || single_pred (bb1) != bb)
269*38fd1498Szrj continue;
270*38fd1498Szrj
271*38fd1498Szrj if (do_store_elim)
272*38fd1498Szrj {
273*38fd1498Szrj /* bb1 is the middle block, bb2 the join block, bb the split block,
274*38fd1498Szrj e1 the fallthrough edge from bb1 to bb2. We can't do the
275*38fd1498Szrj optimization if the join block has more than two predecessors. */
276*38fd1498Szrj if (EDGE_COUNT (bb2->preds) > 2)
277*38fd1498Szrj continue;
278*38fd1498Szrj if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
279*38fd1498Szrj cfgchanged = true;
280*38fd1498Szrj }
281*38fd1498Szrj else
282*38fd1498Szrj {
283*38fd1498Szrj gimple_seq phis = phi_nodes (bb2);
284*38fd1498Szrj gimple_stmt_iterator gsi;
285*38fd1498Szrj bool candorest = true;
286*38fd1498Szrj
287*38fd1498Szrj /* Value replacement can work with more than one PHI
288*38fd1498Szrj so try that first. */
289*38fd1498Szrj for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
290*38fd1498Szrj {
291*38fd1498Szrj phi = as_a <gphi *> (gsi_stmt (gsi));
292*38fd1498Szrj arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
293*38fd1498Szrj arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
294*38fd1498Szrj if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
295*38fd1498Szrj {
296*38fd1498Szrj candorest = false;
297*38fd1498Szrj cfgchanged = true;
298*38fd1498Szrj break;
299*38fd1498Szrj }
300*38fd1498Szrj }
301*38fd1498Szrj
302*38fd1498Szrj if (!candorest)
303*38fd1498Szrj continue;
304*38fd1498Szrj
305*38fd1498Szrj phi = single_non_singleton_phi_for_edges (phis, e1, e2);
306*38fd1498Szrj if (!phi)
307*38fd1498Szrj continue;
308*38fd1498Szrj
309*38fd1498Szrj arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
310*38fd1498Szrj arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
311*38fd1498Szrj
312*38fd1498Szrj /* Something is wrong if we cannot find the arguments in the PHI
313*38fd1498Szrj node. */
314*38fd1498Szrj gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
315*38fd1498Szrj
316*38fd1498Szrj gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
317*38fd1498Szrj arg0, arg1,
318*38fd1498Szrj cond_stmt);
319*38fd1498Szrj if (newphi != NULL)
320*38fd1498Szrj {
321*38fd1498Szrj phi = newphi;
322*38fd1498Szrj /* factor_out_conditional_conversion may create a new PHI in
323*38fd1498Szrj BB2 and eliminate an existing PHI in BB2. Recompute values
324*38fd1498Szrj that may be affected by that change. */
325*38fd1498Szrj arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
326*38fd1498Szrj arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
327*38fd1498Szrj gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
328*38fd1498Szrj }
329*38fd1498Szrj
330*38fd1498Szrj /* Do the replacement of conditional if it can be done. */
331*38fd1498Szrj if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332*38fd1498Szrj cfgchanged = true;
333*38fd1498Szrj else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334*38fd1498Szrj cfgchanged = true;
335*38fd1498Szrj else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
336*38fd1498Szrj cfgchanged = true;
337*38fd1498Szrj }
338*38fd1498Szrj }
339*38fd1498Szrj
340*38fd1498Szrj free (bb_order);
341*38fd1498Szrj
342*38fd1498Szrj if (do_store_elim)
343*38fd1498Szrj delete nontrap;
344*38fd1498Szrj /* If the CFG has changed, we should cleanup the CFG. */
345*38fd1498Szrj if (cfgchanged && do_store_elim)
346*38fd1498Szrj {
347*38fd1498Szrj /* In cond-store replacement we have added some loads on edges
348*38fd1498Szrj and new VOPS (as we moved the store, and created a load). */
349*38fd1498Szrj gsi_commit_edge_inserts ();
350*38fd1498Szrj return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
351*38fd1498Szrj }
352*38fd1498Szrj else if (cfgchanged)
353*38fd1498Szrj return TODO_cleanup_cfg;
354*38fd1498Szrj return 0;
355*38fd1498Szrj }
356*38fd1498Szrj
357*38fd1498Szrj /* Replace PHI node element whose edge is E in block BB with variable NEW.
358*38fd1498Szrj Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
359*38fd1498Szrj is known to have two edges, one of which must reach BB). */
360*38fd1498Szrj
361*38fd1498Szrj static void
replace_phi_edge_with_variable(basic_block cond_block,edge e,gimple * phi,tree new_tree)362*38fd1498Szrj replace_phi_edge_with_variable (basic_block cond_block,
363*38fd1498Szrj edge e, gimple *phi, tree new_tree)
364*38fd1498Szrj {
365*38fd1498Szrj basic_block bb = gimple_bb (phi);
366*38fd1498Szrj basic_block block_to_remove;
367*38fd1498Szrj gimple_stmt_iterator gsi;
368*38fd1498Szrj
369*38fd1498Szrj /* Change the PHI argument to new. */
370*38fd1498Szrj SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
371*38fd1498Szrj
372*38fd1498Szrj /* Remove the empty basic block. */
373*38fd1498Szrj if (EDGE_SUCC (cond_block, 0)->dest == bb)
374*38fd1498Szrj {
375*38fd1498Szrj EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
376*38fd1498Szrj EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
377*38fd1498Szrj EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
378*38fd1498Szrj
379*38fd1498Szrj block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
380*38fd1498Szrj }
381*38fd1498Szrj else
382*38fd1498Szrj {
383*38fd1498Szrj EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
384*38fd1498Szrj EDGE_SUCC (cond_block, 1)->flags
385*38fd1498Szrj &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
386*38fd1498Szrj EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
387*38fd1498Szrj
388*38fd1498Szrj block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
389*38fd1498Szrj }
390*38fd1498Szrj delete_basic_block (block_to_remove);
391*38fd1498Szrj
392*38fd1498Szrj /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
393*38fd1498Szrj gsi = gsi_last_bb (cond_block);
394*38fd1498Szrj gsi_remove (&gsi, true);
395*38fd1498Szrj
396*38fd1498Szrj if (dump_file && (dump_flags & TDF_DETAILS))
397*38fd1498Szrj fprintf (dump_file,
398*38fd1498Szrj "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
399*38fd1498Szrj cond_block->index,
400*38fd1498Szrj bb->index);
401*38fd1498Szrj }
402*38fd1498Szrj
403*38fd1498Szrj /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
404*38fd1498Szrj stmt are CONVERT_STMT, factor out the conversion and perform the conversion
405*38fd1498Szrj to the result of PHI stmt. COND_STMT is the controlling predicate.
406*38fd1498Szrj Return the newly-created PHI, if any. */
407*38fd1498Szrj
408*38fd1498Szrj static gphi *
factor_out_conditional_conversion(edge e0,edge e1,gphi * phi,tree arg0,tree arg1,gimple * cond_stmt)409*38fd1498Szrj factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
410*38fd1498Szrj tree arg0, tree arg1, gimple *cond_stmt)
411*38fd1498Szrj {
412*38fd1498Szrj gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
413*38fd1498Szrj tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
414*38fd1498Szrj tree temp, result;
415*38fd1498Szrj gphi *newphi;
416*38fd1498Szrj gimple_stmt_iterator gsi, gsi_for_def;
417*38fd1498Szrj source_location locus = gimple_location (phi);
418*38fd1498Szrj enum tree_code convert_code;
419*38fd1498Szrj
420*38fd1498Szrj /* Handle only PHI statements with two arguments. TODO: If all
421*38fd1498Szrj other arguments to PHI are INTEGER_CST or if their defining
422*38fd1498Szrj statement have the same unary operation, we can handle more
423*38fd1498Szrj than two arguments too. */
424*38fd1498Szrj if (gimple_phi_num_args (phi) != 2)
425*38fd1498Szrj return NULL;
426*38fd1498Szrj
427*38fd1498Szrj /* First canonicalize to simplify tests. */
428*38fd1498Szrj if (TREE_CODE (arg0) != SSA_NAME)
429*38fd1498Szrj {
430*38fd1498Szrj std::swap (arg0, arg1);
431*38fd1498Szrj std::swap (e0, e1);
432*38fd1498Szrj }
433*38fd1498Szrj
434*38fd1498Szrj if (TREE_CODE (arg0) != SSA_NAME
435*38fd1498Szrj || (TREE_CODE (arg1) != SSA_NAME
436*38fd1498Szrj && TREE_CODE (arg1) != INTEGER_CST))
437*38fd1498Szrj return NULL;
438*38fd1498Szrj
439*38fd1498Szrj /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
440*38fd1498Szrj a conversion. */
441*38fd1498Szrj arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
442*38fd1498Szrj if (!gimple_assign_cast_p (arg0_def_stmt))
443*38fd1498Szrj return NULL;
444*38fd1498Szrj
445*38fd1498Szrj /* Use the RHS as new_arg0. */
446*38fd1498Szrj convert_code = gimple_assign_rhs_code (arg0_def_stmt);
447*38fd1498Szrj new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
448*38fd1498Szrj if (convert_code == VIEW_CONVERT_EXPR)
449*38fd1498Szrj {
450*38fd1498Szrj new_arg0 = TREE_OPERAND (new_arg0, 0);
451*38fd1498Szrj if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
452*38fd1498Szrj return NULL;
453*38fd1498Szrj }
454*38fd1498Szrj
455*38fd1498Szrj if (TREE_CODE (arg1) == SSA_NAME)
456*38fd1498Szrj {
457*38fd1498Szrj /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
458*38fd1498Szrj is a conversion. */
459*38fd1498Szrj arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
460*38fd1498Szrj if (!is_gimple_assign (arg1_def_stmt)
461*38fd1498Szrj || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
462*38fd1498Szrj return NULL;
463*38fd1498Szrj
464*38fd1498Szrj /* Use the RHS as new_arg1. */
465*38fd1498Szrj new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
466*38fd1498Szrj if (convert_code == VIEW_CONVERT_EXPR)
467*38fd1498Szrj new_arg1 = TREE_OPERAND (new_arg1, 0);
468*38fd1498Szrj }
469*38fd1498Szrj else
470*38fd1498Szrj {
471*38fd1498Szrj /* If arg1 is an INTEGER_CST, fold it to new type. */
472*38fd1498Szrj if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
473*38fd1498Szrj && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
474*38fd1498Szrj {
475*38fd1498Szrj if (gimple_assign_cast_p (arg0_def_stmt))
476*38fd1498Szrj {
477*38fd1498Szrj /* For the INTEGER_CST case, we are just moving the
478*38fd1498Szrj conversion from one place to another, which can often
479*38fd1498Szrj hurt as the conversion moves further away from the
480*38fd1498Szrj statement that computes the value. So, perform this
481*38fd1498Szrj only if new_arg0 is an operand of COND_STMT, or
482*38fd1498Szrj if arg0_def_stmt is the only non-debug stmt in
483*38fd1498Szrj its basic block, because then it is possible this
484*38fd1498Szrj could enable further optimizations (minmax replacement
485*38fd1498Szrj etc.). See PR71016. */
486*38fd1498Szrj if (new_arg0 != gimple_cond_lhs (cond_stmt)
487*38fd1498Szrj && new_arg0 != gimple_cond_rhs (cond_stmt)
488*38fd1498Szrj && gimple_bb (arg0_def_stmt) == e0->src)
489*38fd1498Szrj {
490*38fd1498Szrj gsi = gsi_for_stmt (arg0_def_stmt);
491*38fd1498Szrj gsi_prev_nondebug (&gsi);
492*38fd1498Szrj if (!gsi_end_p (gsi))
493*38fd1498Szrj return NULL;
494*38fd1498Szrj gsi = gsi_for_stmt (arg0_def_stmt);
495*38fd1498Szrj gsi_next_nondebug (&gsi);
496*38fd1498Szrj if (!gsi_end_p (gsi))
497*38fd1498Szrj return NULL;
498*38fd1498Szrj }
499*38fd1498Szrj new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
500*38fd1498Szrj }
501*38fd1498Szrj else
502*38fd1498Szrj return NULL;
503*38fd1498Szrj }
504*38fd1498Szrj else
505*38fd1498Szrj return NULL;
506*38fd1498Szrj }
507*38fd1498Szrj
508*38fd1498Szrj /* If arg0/arg1 have > 1 use, then this transformation actually increases
509*38fd1498Szrj the number of expressions evaluated at runtime. */
510*38fd1498Szrj if (!has_single_use (arg0)
511*38fd1498Szrj || (arg1_def_stmt && !has_single_use (arg1)))
512*38fd1498Szrj return NULL;
513*38fd1498Szrj
514*38fd1498Szrj /* If types of new_arg0 and new_arg1 are different bailout. */
515*38fd1498Szrj if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
516*38fd1498Szrj return NULL;
517*38fd1498Szrj
518*38fd1498Szrj /* Create a new PHI stmt. */
519*38fd1498Szrj result = PHI_RESULT (phi);
520*38fd1498Szrj temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
521*38fd1498Szrj newphi = create_phi_node (temp, gimple_bb (phi));
522*38fd1498Szrj
523*38fd1498Szrj if (dump_file && (dump_flags & TDF_DETAILS))
524*38fd1498Szrj {
525*38fd1498Szrj fprintf (dump_file, "PHI ");
526*38fd1498Szrj print_generic_expr (dump_file, gimple_phi_result (phi));
527*38fd1498Szrj fprintf (dump_file,
528*38fd1498Szrj " changed to factor conversion out from COND_EXPR.\n");
529*38fd1498Szrj fprintf (dump_file, "New stmt with CAST that defines ");
530*38fd1498Szrj print_generic_expr (dump_file, result);
531*38fd1498Szrj fprintf (dump_file, ".\n");
532*38fd1498Szrj }
533*38fd1498Szrj
534*38fd1498Szrj /* Remove the old cast(s) that has single use. */
535*38fd1498Szrj gsi_for_def = gsi_for_stmt (arg0_def_stmt);
536*38fd1498Szrj gsi_remove (&gsi_for_def, true);
537*38fd1498Szrj release_defs (arg0_def_stmt);
538*38fd1498Szrj
539*38fd1498Szrj if (arg1_def_stmt)
540*38fd1498Szrj {
541*38fd1498Szrj gsi_for_def = gsi_for_stmt (arg1_def_stmt);
542*38fd1498Szrj gsi_remove (&gsi_for_def, true);
543*38fd1498Szrj release_defs (arg1_def_stmt);
544*38fd1498Szrj }
545*38fd1498Szrj
546*38fd1498Szrj add_phi_arg (newphi, new_arg0, e0, locus);
547*38fd1498Szrj add_phi_arg (newphi, new_arg1, e1, locus);
548*38fd1498Szrj
549*38fd1498Szrj /* Create the conversion stmt and insert it. */
550*38fd1498Szrj if (convert_code == VIEW_CONVERT_EXPR)
551*38fd1498Szrj {
552*38fd1498Szrj temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
553*38fd1498Szrj new_stmt = gimple_build_assign (result, temp);
554*38fd1498Szrj }
555*38fd1498Szrj else
556*38fd1498Szrj new_stmt = gimple_build_assign (result, convert_code, temp);
557*38fd1498Szrj gsi = gsi_after_labels (gimple_bb (phi));
558*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
559*38fd1498Szrj
560*38fd1498Szrj /* Remove the original PHI stmt. */
561*38fd1498Szrj gsi = gsi_for_stmt (phi);
562*38fd1498Szrj gsi_remove (&gsi, true);
563*38fd1498Szrj return newphi;
564*38fd1498Szrj }
565*38fd1498Szrj
566*38fd1498Szrj /* The function conditional_replacement does the main work of doing the
567*38fd1498Szrj conditional replacement. Return true if the replacement is done.
568*38fd1498Szrj Otherwise return false.
569*38fd1498Szrj BB is the basic block where the replacement is going to be done on. ARG0
570*38fd1498Szrj is argument 0 from PHI. Likewise for ARG1. */
571*38fd1498Szrj
572*38fd1498Szrj static bool
conditional_replacement(basic_block cond_bb,basic_block middle_bb,edge e0,edge e1,gphi * phi,tree arg0,tree arg1)573*38fd1498Szrj conditional_replacement (basic_block cond_bb, basic_block middle_bb,
574*38fd1498Szrj edge e0, edge e1, gphi *phi,
575*38fd1498Szrj tree arg0, tree arg1)
576*38fd1498Szrj {
577*38fd1498Szrj tree result;
578*38fd1498Szrj gimple *stmt;
579*38fd1498Szrj gassign *new_stmt;
580*38fd1498Szrj tree cond;
581*38fd1498Szrj gimple_stmt_iterator gsi;
582*38fd1498Szrj edge true_edge, false_edge;
583*38fd1498Szrj tree new_var, new_var2;
584*38fd1498Szrj bool neg;
585*38fd1498Szrj
586*38fd1498Szrj /* FIXME: Gimplification of complex type is too hard for now. */
587*38fd1498Szrj /* We aren't prepared to handle vectors either (and it is a question
588*38fd1498Szrj if it would be worthwhile anyway). */
589*38fd1498Szrj if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
590*38fd1498Szrj || POINTER_TYPE_P (TREE_TYPE (arg0)))
591*38fd1498Szrj || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
592*38fd1498Szrj || POINTER_TYPE_P (TREE_TYPE (arg1))))
593*38fd1498Szrj return false;
594*38fd1498Szrj
595*38fd1498Szrj /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
596*38fd1498Szrj convert it to the conditional. */
597*38fd1498Szrj if ((integer_zerop (arg0) && integer_onep (arg1))
598*38fd1498Szrj || (integer_zerop (arg1) && integer_onep (arg0)))
599*38fd1498Szrj neg = false;
600*38fd1498Szrj else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
601*38fd1498Szrj || (integer_zerop (arg1) && integer_all_onesp (arg0)))
602*38fd1498Szrj neg = true;
603*38fd1498Szrj else
604*38fd1498Szrj return false;
605*38fd1498Szrj
606*38fd1498Szrj if (!empty_block_p (middle_bb))
607*38fd1498Szrj return false;
608*38fd1498Szrj
609*38fd1498Szrj /* At this point we know we have a GIMPLE_COND with two successors.
610*38fd1498Szrj One successor is BB, the other successor is an empty block which
611*38fd1498Szrj falls through into BB.
612*38fd1498Szrj
613*38fd1498Szrj There is a single PHI node at the join point (BB) and its arguments
614*38fd1498Szrj are constants (0, 1) or (0, -1).
615*38fd1498Szrj
616*38fd1498Szrj So, given the condition COND, and the two PHI arguments, we can
617*38fd1498Szrj rewrite this PHI into non-branching code:
618*38fd1498Szrj
619*38fd1498Szrj dest = (COND) or dest = COND'
620*38fd1498Szrj
621*38fd1498Szrj We use the condition as-is if the argument associated with the
622*38fd1498Szrj true edge has the value one or the argument associated with the
623*38fd1498Szrj false edge as the value zero. Note that those conditions are not
624*38fd1498Szrj the same since only one of the outgoing edges from the GIMPLE_COND
625*38fd1498Szrj will directly reach BB and thus be associated with an argument. */
626*38fd1498Szrj
627*38fd1498Szrj stmt = last_stmt (cond_bb);
628*38fd1498Szrj result = PHI_RESULT (phi);
629*38fd1498Szrj
630*38fd1498Szrj /* To handle special cases like floating point comparison, it is easier and
631*38fd1498Szrj less error-prone to build a tree and gimplify it on the fly though it is
632*38fd1498Szrj less efficient. */
633*38fd1498Szrj cond = fold_build2_loc (gimple_location (stmt),
634*38fd1498Szrj gimple_cond_code (stmt), boolean_type_node,
635*38fd1498Szrj gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
636*38fd1498Szrj
637*38fd1498Szrj /* We need to know which is the true edge and which is the false
638*38fd1498Szrj edge so that we know when to invert the condition below. */
639*38fd1498Szrj extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
640*38fd1498Szrj if ((e0 == true_edge && integer_zerop (arg0))
641*38fd1498Szrj || (e0 == false_edge && !integer_zerop (arg0))
642*38fd1498Szrj || (e1 == true_edge && integer_zerop (arg1))
643*38fd1498Szrj || (e1 == false_edge && !integer_zerop (arg1)))
644*38fd1498Szrj cond = fold_build1_loc (gimple_location (stmt),
645*38fd1498Szrj TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
646*38fd1498Szrj
647*38fd1498Szrj if (neg)
648*38fd1498Szrj {
649*38fd1498Szrj cond = fold_convert_loc (gimple_location (stmt),
650*38fd1498Szrj TREE_TYPE (result), cond);
651*38fd1498Szrj cond = fold_build1_loc (gimple_location (stmt),
652*38fd1498Szrj NEGATE_EXPR, TREE_TYPE (cond), cond);
653*38fd1498Szrj }
654*38fd1498Szrj
655*38fd1498Szrj /* Insert our new statements at the end of conditional block before the
656*38fd1498Szrj COND_STMT. */
657*38fd1498Szrj gsi = gsi_for_stmt (stmt);
658*38fd1498Szrj new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
659*38fd1498Szrj GSI_SAME_STMT);
660*38fd1498Szrj
661*38fd1498Szrj if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
662*38fd1498Szrj {
663*38fd1498Szrj source_location locus_0, locus_1;
664*38fd1498Szrj
665*38fd1498Szrj new_var2 = make_ssa_name (TREE_TYPE (result));
666*38fd1498Szrj new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
667*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
668*38fd1498Szrj new_var = new_var2;
669*38fd1498Szrj
670*38fd1498Szrj /* Set the locus to the first argument, unless is doesn't have one. */
671*38fd1498Szrj locus_0 = gimple_phi_arg_location (phi, 0);
672*38fd1498Szrj locus_1 = gimple_phi_arg_location (phi, 1);
673*38fd1498Szrj if (locus_0 == UNKNOWN_LOCATION)
674*38fd1498Szrj locus_0 = locus_1;
675*38fd1498Szrj gimple_set_location (new_stmt, locus_0);
676*38fd1498Szrj }
677*38fd1498Szrj
678*38fd1498Szrj replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
679*38fd1498Szrj
680*38fd1498Szrj /* Note that we optimized this PHI. */
681*38fd1498Szrj return true;
682*38fd1498Szrj }
683*38fd1498Szrj
684*38fd1498Szrj /* Update *ARG which is defined in STMT so that it contains the
685*38fd1498Szrj computed value if that seems profitable. Return true if the
686*38fd1498Szrj statement is made dead by that rewriting. */
687*38fd1498Szrj
688*38fd1498Szrj static bool
jump_function_from_stmt(tree * arg,gimple * stmt)689*38fd1498Szrj jump_function_from_stmt (tree *arg, gimple *stmt)
690*38fd1498Szrj {
691*38fd1498Szrj enum tree_code code = gimple_assign_rhs_code (stmt);
692*38fd1498Szrj if (code == ADDR_EXPR)
693*38fd1498Szrj {
694*38fd1498Szrj /* For arg = &p->i transform it to p, if possible. */
695*38fd1498Szrj tree rhs1 = gimple_assign_rhs1 (stmt);
696*38fd1498Szrj poly_int64 offset;
697*38fd1498Szrj tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
698*38fd1498Szrj &offset);
699*38fd1498Szrj if (tem
700*38fd1498Szrj && TREE_CODE (tem) == MEM_REF
701*38fd1498Szrj && known_eq (mem_ref_offset (tem) + offset, 0))
702*38fd1498Szrj {
703*38fd1498Szrj *arg = TREE_OPERAND (tem, 0);
704*38fd1498Szrj return true;
705*38fd1498Szrj }
706*38fd1498Szrj }
707*38fd1498Szrj /* TODO: Much like IPA-CP jump-functions we want to handle constant
708*38fd1498Szrj additions symbolically here, and we'd need to update the comparison
709*38fd1498Szrj code that compares the arg + cst tuples in our caller. For now the
710*38fd1498Szrj code above exactly handles the VEC_BASE pattern from vec.h. */
711*38fd1498Szrj return false;
712*38fd1498Szrj }
713*38fd1498Szrj
714*38fd1498Szrj /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
715*38fd1498Szrj of the form SSA_NAME NE 0.
716*38fd1498Szrj
717*38fd1498Szrj If RHS is fed by a simple EQ_EXPR comparison of two values, see if
718*38fd1498Szrj the two input values of the EQ_EXPR match arg0 and arg1.
719*38fd1498Szrj
720*38fd1498Szrj If so update *code and return TRUE. Otherwise return FALSE. */
721*38fd1498Szrj
722*38fd1498Szrj static bool
rhs_is_fed_for_value_replacement(const_tree arg0,const_tree arg1,enum tree_code * code,const_tree rhs)723*38fd1498Szrj rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
724*38fd1498Szrj enum tree_code *code, const_tree rhs)
725*38fd1498Szrj {
726*38fd1498Szrj /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
727*38fd1498Szrj statement. */
728*38fd1498Szrj if (TREE_CODE (rhs) == SSA_NAME)
729*38fd1498Szrj {
730*38fd1498Szrj gimple *def1 = SSA_NAME_DEF_STMT (rhs);
731*38fd1498Szrj
732*38fd1498Szrj /* Verify the defining statement has an EQ_EXPR on the RHS. */
733*38fd1498Szrj if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
734*38fd1498Szrj {
735*38fd1498Szrj /* Finally verify the source operands of the EQ_EXPR are equal
736*38fd1498Szrj to arg0 and arg1. */
737*38fd1498Szrj tree op0 = gimple_assign_rhs1 (def1);
738*38fd1498Szrj tree op1 = gimple_assign_rhs2 (def1);
739*38fd1498Szrj if ((operand_equal_for_phi_arg_p (arg0, op0)
740*38fd1498Szrj && operand_equal_for_phi_arg_p (arg1, op1))
741*38fd1498Szrj || (operand_equal_for_phi_arg_p (arg0, op1)
742*38fd1498Szrj && operand_equal_for_phi_arg_p (arg1, op0)))
743*38fd1498Szrj {
744*38fd1498Szrj /* We will perform the optimization. */
745*38fd1498Szrj *code = gimple_assign_rhs_code (def1);
746*38fd1498Szrj return true;
747*38fd1498Szrj }
748*38fd1498Szrj }
749*38fd1498Szrj }
750*38fd1498Szrj return false;
751*38fd1498Szrj }
752*38fd1498Szrj
753*38fd1498Szrj /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
754*38fd1498Szrj
755*38fd1498Szrj Also return TRUE if arg0/arg1 are equal to the source arguments of a
756*38fd1498Szrj an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
757*38fd1498Szrj
758*38fd1498Szrj Return FALSE otherwise. */
759*38fd1498Szrj
760*38fd1498Szrj static bool
operand_equal_for_value_replacement(const_tree arg0,const_tree arg1,enum tree_code * code,gimple * cond)761*38fd1498Szrj operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
762*38fd1498Szrj enum tree_code *code, gimple *cond)
763*38fd1498Szrj {
764*38fd1498Szrj gimple *def;
765*38fd1498Szrj tree lhs = gimple_cond_lhs (cond);
766*38fd1498Szrj tree rhs = gimple_cond_rhs (cond);
767*38fd1498Szrj
768*38fd1498Szrj if ((operand_equal_for_phi_arg_p (arg0, lhs)
769*38fd1498Szrj && operand_equal_for_phi_arg_p (arg1, rhs))
770*38fd1498Szrj || (operand_equal_for_phi_arg_p (arg1, lhs)
771*38fd1498Szrj && operand_equal_for_phi_arg_p (arg0, rhs)))
772*38fd1498Szrj return true;
773*38fd1498Szrj
774*38fd1498Szrj /* Now handle more complex case where we have an EQ comparison
775*38fd1498Szrj which feeds a BIT_AND_EXPR which feeds COND.
776*38fd1498Szrj
777*38fd1498Szrj First verify that COND is of the form SSA_NAME NE 0. */
778*38fd1498Szrj if (*code != NE_EXPR || !integer_zerop (rhs)
779*38fd1498Szrj || TREE_CODE (lhs) != SSA_NAME)
780*38fd1498Szrj return false;
781*38fd1498Szrj
782*38fd1498Szrj /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
783*38fd1498Szrj def = SSA_NAME_DEF_STMT (lhs);
784*38fd1498Szrj if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
785*38fd1498Szrj return false;
786*38fd1498Szrj
787*38fd1498Szrj /* Now verify arg0/arg1 correspond to the source arguments of an
788*38fd1498Szrj EQ comparison feeding the BIT_AND_EXPR. */
789*38fd1498Szrj
790*38fd1498Szrj tree tmp = gimple_assign_rhs1 (def);
791*38fd1498Szrj if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
792*38fd1498Szrj return true;
793*38fd1498Szrj
794*38fd1498Szrj tmp = gimple_assign_rhs2 (def);
795*38fd1498Szrj if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
796*38fd1498Szrj return true;
797*38fd1498Szrj
798*38fd1498Szrj return false;
799*38fd1498Szrj }
800*38fd1498Szrj
801*38fd1498Szrj /* Returns true if ARG is a neutral element for operation CODE
802*38fd1498Szrj on the RIGHT side. */
803*38fd1498Szrj
804*38fd1498Szrj static bool
neutral_element_p(tree_code code,tree arg,bool right)805*38fd1498Szrj neutral_element_p (tree_code code, tree arg, bool right)
806*38fd1498Szrj {
807*38fd1498Szrj switch (code)
808*38fd1498Szrj {
809*38fd1498Szrj case PLUS_EXPR:
810*38fd1498Szrj case BIT_IOR_EXPR:
811*38fd1498Szrj case BIT_XOR_EXPR:
812*38fd1498Szrj return integer_zerop (arg);
813*38fd1498Szrj
814*38fd1498Szrj case LROTATE_EXPR:
815*38fd1498Szrj case RROTATE_EXPR:
816*38fd1498Szrj case LSHIFT_EXPR:
817*38fd1498Szrj case RSHIFT_EXPR:
818*38fd1498Szrj case MINUS_EXPR:
819*38fd1498Szrj case POINTER_PLUS_EXPR:
820*38fd1498Szrj return right && integer_zerop (arg);
821*38fd1498Szrj
822*38fd1498Szrj case MULT_EXPR:
823*38fd1498Szrj return integer_onep (arg);
824*38fd1498Szrj
825*38fd1498Szrj case TRUNC_DIV_EXPR:
826*38fd1498Szrj case CEIL_DIV_EXPR:
827*38fd1498Szrj case FLOOR_DIV_EXPR:
828*38fd1498Szrj case ROUND_DIV_EXPR:
829*38fd1498Szrj case EXACT_DIV_EXPR:
830*38fd1498Szrj return right && integer_onep (arg);
831*38fd1498Szrj
832*38fd1498Szrj case BIT_AND_EXPR:
833*38fd1498Szrj return integer_all_onesp (arg);
834*38fd1498Szrj
835*38fd1498Szrj default:
836*38fd1498Szrj return false;
837*38fd1498Szrj }
838*38fd1498Szrj }
839*38fd1498Szrj
840*38fd1498Szrj /* Returns true if ARG is an absorbing element for operation CODE. */
841*38fd1498Szrj
842*38fd1498Szrj static bool
absorbing_element_p(tree_code code,tree arg,bool right,tree rval)843*38fd1498Szrj absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
844*38fd1498Szrj {
845*38fd1498Szrj switch (code)
846*38fd1498Szrj {
847*38fd1498Szrj case BIT_IOR_EXPR:
848*38fd1498Szrj return integer_all_onesp (arg);
849*38fd1498Szrj
850*38fd1498Szrj case MULT_EXPR:
851*38fd1498Szrj case BIT_AND_EXPR:
852*38fd1498Szrj return integer_zerop (arg);
853*38fd1498Szrj
854*38fd1498Szrj case LSHIFT_EXPR:
855*38fd1498Szrj case RSHIFT_EXPR:
856*38fd1498Szrj case LROTATE_EXPR:
857*38fd1498Szrj case RROTATE_EXPR:
858*38fd1498Szrj return !right && integer_zerop (arg);
859*38fd1498Szrj
860*38fd1498Szrj case TRUNC_DIV_EXPR:
861*38fd1498Szrj case CEIL_DIV_EXPR:
862*38fd1498Szrj case FLOOR_DIV_EXPR:
863*38fd1498Szrj case ROUND_DIV_EXPR:
864*38fd1498Szrj case EXACT_DIV_EXPR:
865*38fd1498Szrj case TRUNC_MOD_EXPR:
866*38fd1498Szrj case CEIL_MOD_EXPR:
867*38fd1498Szrj case FLOOR_MOD_EXPR:
868*38fd1498Szrj case ROUND_MOD_EXPR:
869*38fd1498Szrj return (!right
870*38fd1498Szrj && integer_zerop (arg)
871*38fd1498Szrj && tree_single_nonzero_warnv_p (rval, NULL));
872*38fd1498Szrj
873*38fd1498Szrj default:
874*38fd1498Szrj return false;
875*38fd1498Szrj }
876*38fd1498Szrj }
877*38fd1498Szrj
878*38fd1498Szrj /* The function value_replacement does the main work of doing the value
879*38fd1498Szrj replacement. Return non-zero if the replacement is done. Otherwise return
880*38fd1498Szrj 0. If we remove the middle basic block, return 2.
881*38fd1498Szrj BB is the basic block where the replacement is going to be done on. ARG0
882*38fd1498Szrj is argument 0 from the PHI. Likewise for ARG1. */
883*38fd1498Szrj
884*38fd1498Szrj static int
value_replacement(basic_block cond_bb,basic_block middle_bb,edge e0,edge e1,gimple * phi,tree arg0,tree arg1)885*38fd1498Szrj value_replacement (basic_block cond_bb, basic_block middle_bb,
886*38fd1498Szrj edge e0, edge e1, gimple *phi,
887*38fd1498Szrj tree arg0, tree arg1)
888*38fd1498Szrj {
889*38fd1498Szrj gimple_stmt_iterator gsi;
890*38fd1498Szrj gimple *cond;
891*38fd1498Szrj edge true_edge, false_edge;
892*38fd1498Szrj enum tree_code code;
893*38fd1498Szrj bool emtpy_or_with_defined_p = true;
894*38fd1498Szrj
895*38fd1498Szrj /* If the type says honor signed zeros we cannot do this
896*38fd1498Szrj optimization. */
897*38fd1498Szrj if (HONOR_SIGNED_ZEROS (arg1))
898*38fd1498Szrj return 0;
899*38fd1498Szrj
900*38fd1498Szrj /* If there is a statement in MIDDLE_BB that defines one of the PHI
901*38fd1498Szrj arguments, then adjust arg0 or arg1. */
902*38fd1498Szrj gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
903*38fd1498Szrj while (!gsi_end_p (gsi))
904*38fd1498Szrj {
905*38fd1498Szrj gimple *stmt = gsi_stmt (gsi);
906*38fd1498Szrj tree lhs;
907*38fd1498Szrj gsi_next_nondebug (&gsi);
908*38fd1498Szrj if (!is_gimple_assign (stmt))
909*38fd1498Szrj {
910*38fd1498Szrj emtpy_or_with_defined_p = false;
911*38fd1498Szrj continue;
912*38fd1498Szrj }
913*38fd1498Szrj /* Now try to adjust arg0 or arg1 according to the computation
914*38fd1498Szrj in the statement. */
915*38fd1498Szrj lhs = gimple_assign_lhs (stmt);
916*38fd1498Szrj if (!(lhs == arg0
917*38fd1498Szrj && jump_function_from_stmt (&arg0, stmt))
918*38fd1498Szrj || (lhs == arg1
919*38fd1498Szrj && jump_function_from_stmt (&arg1, stmt)))
920*38fd1498Szrj emtpy_or_with_defined_p = false;
921*38fd1498Szrj }
922*38fd1498Szrj
923*38fd1498Szrj cond = last_stmt (cond_bb);
924*38fd1498Szrj code = gimple_cond_code (cond);
925*38fd1498Szrj
926*38fd1498Szrj /* This transformation is only valid for equality comparisons. */
927*38fd1498Szrj if (code != NE_EXPR && code != EQ_EXPR)
928*38fd1498Szrj return 0;
929*38fd1498Szrj
930*38fd1498Szrj /* We need to know which is the true edge and which is the false
931*38fd1498Szrj edge so that we know if have abs or negative abs. */
932*38fd1498Szrj extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
933*38fd1498Szrj
934*38fd1498Szrj /* At this point we know we have a COND_EXPR with two successors.
935*38fd1498Szrj One successor is BB, the other successor is an empty block which
936*38fd1498Szrj falls through into BB.
937*38fd1498Szrj
938*38fd1498Szrj The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
939*38fd1498Szrj
940*38fd1498Szrj There is a single PHI node at the join point (BB) with two arguments.
941*38fd1498Szrj
942*38fd1498Szrj We now need to verify that the two arguments in the PHI node match
943*38fd1498Szrj the two arguments to the equality comparison. */
944*38fd1498Szrj
945*38fd1498Szrj if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
946*38fd1498Szrj {
947*38fd1498Szrj edge e;
948*38fd1498Szrj tree arg;
949*38fd1498Szrj
950*38fd1498Szrj /* For NE_EXPR, we want to build an assignment result = arg where
951*38fd1498Szrj arg is the PHI argument associated with the true edge. For
952*38fd1498Szrj EQ_EXPR we want the PHI argument associated with the false edge. */
953*38fd1498Szrj e = (code == NE_EXPR ? true_edge : false_edge);
954*38fd1498Szrj
955*38fd1498Szrj /* Unfortunately, E may not reach BB (it may instead have gone to
956*38fd1498Szrj OTHER_BLOCK). If that is the case, then we want the single outgoing
957*38fd1498Szrj edge from OTHER_BLOCK which reaches BB and represents the desired
958*38fd1498Szrj path from COND_BLOCK. */
959*38fd1498Szrj if (e->dest == middle_bb)
960*38fd1498Szrj e = single_succ_edge (e->dest);
961*38fd1498Szrj
962*38fd1498Szrj /* Now we know the incoming edge to BB that has the argument for the
963*38fd1498Szrj RHS of our new assignment statement. */
964*38fd1498Szrj if (e0 == e)
965*38fd1498Szrj arg = arg0;
966*38fd1498Szrj else
967*38fd1498Szrj arg = arg1;
968*38fd1498Szrj
969*38fd1498Szrj /* If the middle basic block was empty or is defining the
970*38fd1498Szrj PHI arguments and this is a single phi where the args are different
971*38fd1498Szrj for the edges e0 and e1 then we can remove the middle basic block. */
972*38fd1498Szrj if (emtpy_or_with_defined_p
973*38fd1498Szrj && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
974*38fd1498Szrj e0, e1) == phi)
975*38fd1498Szrj {
976*38fd1498Szrj replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
977*38fd1498Szrj /* Note that we optimized this PHI. */
978*38fd1498Szrj return 2;
979*38fd1498Szrj }
980*38fd1498Szrj else
981*38fd1498Szrj {
982*38fd1498Szrj /* Replace the PHI arguments with arg. */
983*38fd1498Szrj SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
984*38fd1498Szrj SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
985*38fd1498Szrj if (dump_file && (dump_flags & TDF_DETAILS))
986*38fd1498Szrj {
987*38fd1498Szrj fprintf (dump_file, "PHI ");
988*38fd1498Szrj print_generic_expr (dump_file, gimple_phi_result (phi));
989*38fd1498Szrj fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
990*38fd1498Szrj cond_bb->index);
991*38fd1498Szrj print_generic_expr (dump_file, arg);
992*38fd1498Szrj fprintf (dump_file, ".\n");
993*38fd1498Szrj }
994*38fd1498Szrj return 1;
995*38fd1498Szrj }
996*38fd1498Szrj
997*38fd1498Szrj }
998*38fd1498Szrj
999*38fd1498Szrj /* Now optimize (x != 0) ? x + y : y to just x + y. */
1000*38fd1498Szrj gsi = gsi_last_nondebug_bb (middle_bb);
1001*38fd1498Szrj if (gsi_end_p (gsi))
1002*38fd1498Szrj return 0;
1003*38fd1498Szrj
1004*38fd1498Szrj gimple *assign = gsi_stmt (gsi);
1005*38fd1498Szrj if (!is_gimple_assign (assign)
1006*38fd1498Szrj || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1007*38fd1498Szrj || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1008*38fd1498Szrj && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1009*38fd1498Szrj return 0;
1010*38fd1498Szrj
1011*38fd1498Szrj /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1012*38fd1498Szrj if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1013*38fd1498Szrj return 0;
1014*38fd1498Szrj
1015*38fd1498Szrj /* Allow up to 2 cheap preparation statements that prepare argument
1016*38fd1498Szrj for assign, e.g.:
1017*38fd1498Szrj if (y_4 != 0)
1018*38fd1498Szrj goto <bb 3>;
1019*38fd1498Szrj else
1020*38fd1498Szrj goto <bb 4>;
1021*38fd1498Szrj <bb 3>:
1022*38fd1498Szrj _1 = (int) y_4;
1023*38fd1498Szrj iftmp.0_6 = x_5(D) r<< _1;
1024*38fd1498Szrj <bb 4>:
1025*38fd1498Szrj # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1026*38fd1498Szrj or:
1027*38fd1498Szrj if (y_3(D) == 0)
1028*38fd1498Szrj goto <bb 4>;
1029*38fd1498Szrj else
1030*38fd1498Szrj goto <bb 3>;
1031*38fd1498Szrj <bb 3>:
1032*38fd1498Szrj y_4 = y_3(D) & 31;
1033*38fd1498Szrj _1 = (int) y_4;
1034*38fd1498Szrj _6 = x_5(D) r<< _1;
1035*38fd1498Szrj <bb 4>:
1036*38fd1498Szrj # _2 = PHI <x_5(D)(2), _6(3)> */
1037*38fd1498Szrj gimple *prep_stmt[2] = { NULL, NULL };
1038*38fd1498Szrj int prep_cnt;
1039*38fd1498Szrj for (prep_cnt = 0; ; prep_cnt++)
1040*38fd1498Szrj {
1041*38fd1498Szrj gsi_prev_nondebug (&gsi);
1042*38fd1498Szrj if (gsi_end_p (gsi))
1043*38fd1498Szrj break;
1044*38fd1498Szrj
1045*38fd1498Szrj gimple *g = gsi_stmt (gsi);
1046*38fd1498Szrj if (gimple_code (g) == GIMPLE_LABEL)
1047*38fd1498Szrj break;
1048*38fd1498Szrj
1049*38fd1498Szrj if (prep_cnt == 2 || !is_gimple_assign (g))
1050*38fd1498Szrj return 0;
1051*38fd1498Szrj
1052*38fd1498Szrj tree lhs = gimple_assign_lhs (g);
1053*38fd1498Szrj tree rhs1 = gimple_assign_rhs1 (g);
1054*38fd1498Szrj use_operand_p use_p;
1055*38fd1498Szrj gimple *use_stmt;
1056*38fd1498Szrj if (TREE_CODE (lhs) != SSA_NAME
1057*38fd1498Szrj || TREE_CODE (rhs1) != SSA_NAME
1058*38fd1498Szrj || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1059*38fd1498Szrj || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1060*38fd1498Szrj || !single_imm_use (lhs, &use_p, &use_stmt)
1061*38fd1498Szrj || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1062*38fd1498Szrj return 0;
1063*38fd1498Szrj switch (gimple_assign_rhs_code (g))
1064*38fd1498Szrj {
1065*38fd1498Szrj CASE_CONVERT:
1066*38fd1498Szrj break;
1067*38fd1498Szrj case PLUS_EXPR:
1068*38fd1498Szrj case BIT_AND_EXPR:
1069*38fd1498Szrj case BIT_IOR_EXPR:
1070*38fd1498Szrj case BIT_XOR_EXPR:
1071*38fd1498Szrj if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1072*38fd1498Szrj return 0;
1073*38fd1498Szrj break;
1074*38fd1498Szrj default:
1075*38fd1498Szrj return 0;
1076*38fd1498Szrj }
1077*38fd1498Szrj prep_stmt[prep_cnt] = g;
1078*38fd1498Szrj }
1079*38fd1498Szrj
1080*38fd1498Szrj /* Only transform if it removes the condition. */
1081*38fd1498Szrj if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1082*38fd1498Szrj return 0;
1083*38fd1498Szrj
1084*38fd1498Szrj /* Size-wise, this is always profitable. */
1085*38fd1498Szrj if (optimize_bb_for_speed_p (cond_bb)
1086*38fd1498Szrj /* The special case is useless if it has a low probability. */
1087*38fd1498Szrj && profile_status_for_fn (cfun) != PROFILE_ABSENT
1088*38fd1498Szrj && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1089*38fd1498Szrj /* If assign is cheap, there is no point avoiding it. */
1090*38fd1498Szrj && estimate_num_insns (bb_seq (middle_bb), &eni_time_weights)
1091*38fd1498Szrj >= 3 * estimate_num_insns (cond, &eni_time_weights))
1092*38fd1498Szrj return 0;
1093*38fd1498Szrj
1094*38fd1498Szrj tree lhs = gimple_assign_lhs (assign);
1095*38fd1498Szrj tree rhs1 = gimple_assign_rhs1 (assign);
1096*38fd1498Szrj tree rhs2 = gimple_assign_rhs2 (assign);
1097*38fd1498Szrj enum tree_code code_def = gimple_assign_rhs_code (assign);
1098*38fd1498Szrj tree cond_lhs = gimple_cond_lhs (cond);
1099*38fd1498Szrj tree cond_rhs = gimple_cond_rhs (cond);
1100*38fd1498Szrj
1101*38fd1498Szrj /* Propagate the cond_rhs constant through preparation stmts,
1102*38fd1498Szrj make sure UB isn't invoked while doing that. */
1103*38fd1498Szrj for (int i = prep_cnt - 1; i >= 0; --i)
1104*38fd1498Szrj {
1105*38fd1498Szrj gimple *g = prep_stmt[i];
1106*38fd1498Szrj tree grhs1 = gimple_assign_rhs1 (g);
1107*38fd1498Szrj if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1108*38fd1498Szrj return 0;
1109*38fd1498Szrj cond_lhs = gimple_assign_lhs (g);
1110*38fd1498Szrj cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1111*38fd1498Szrj if (TREE_CODE (cond_rhs) != INTEGER_CST
1112*38fd1498Szrj || TREE_OVERFLOW (cond_rhs))
1113*38fd1498Szrj return 0;
1114*38fd1498Szrj if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1115*38fd1498Szrj {
1116*38fd1498Szrj cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1117*38fd1498Szrj gimple_assign_rhs2 (g));
1118*38fd1498Szrj if (TREE_OVERFLOW (cond_rhs))
1119*38fd1498Szrj return 0;
1120*38fd1498Szrj }
1121*38fd1498Szrj cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1122*38fd1498Szrj if (TREE_CODE (cond_rhs) != INTEGER_CST
1123*38fd1498Szrj || TREE_OVERFLOW (cond_rhs))
1124*38fd1498Szrj return 0;
1125*38fd1498Szrj }
1126*38fd1498Szrj
1127*38fd1498Szrj if (((code == NE_EXPR && e1 == false_edge)
1128*38fd1498Szrj || (code == EQ_EXPR && e1 == true_edge))
1129*38fd1498Szrj && arg0 == lhs
1130*38fd1498Szrj && ((arg1 == rhs1
1131*38fd1498Szrj && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1132*38fd1498Szrj && neutral_element_p (code_def, cond_rhs, true))
1133*38fd1498Szrj || (arg1 == rhs2
1134*38fd1498Szrj && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1135*38fd1498Szrj && neutral_element_p (code_def, cond_rhs, false))
1136*38fd1498Szrj || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1137*38fd1498Szrj && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1138*38fd1498Szrj && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1139*38fd1498Szrj || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1140*38fd1498Szrj && absorbing_element_p (code_def,
1141*38fd1498Szrj cond_rhs, false, rhs2))))))
1142*38fd1498Szrj {
1143*38fd1498Szrj gsi = gsi_for_stmt (cond);
1144*38fd1498Szrj /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1145*38fd1498Szrj def-stmt in:
1146*38fd1498Szrj if (n_5 != 0)
1147*38fd1498Szrj goto <bb 3>;
1148*38fd1498Szrj else
1149*38fd1498Szrj goto <bb 4>;
1150*38fd1498Szrj
1151*38fd1498Szrj <bb 3>:
1152*38fd1498Szrj # RANGE [0, 4294967294]
1153*38fd1498Szrj u_6 = n_5 + 4294967295;
1154*38fd1498Szrj
1155*38fd1498Szrj <bb 4>:
1156*38fd1498Szrj # u_3 = PHI <u_6(3), 4294967295(2)> */
1157*38fd1498Szrj reset_flow_sensitive_info (lhs);
1158*38fd1498Szrj if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1159*38fd1498Szrj {
1160*38fd1498Szrj /* If available, we can use VR of phi result at least. */
1161*38fd1498Szrj tree phires = gimple_phi_result (phi);
1162*38fd1498Szrj struct range_info_def *phires_range_info
1163*38fd1498Szrj = SSA_NAME_RANGE_INFO (phires);
1164*38fd1498Szrj if (phires_range_info)
1165*38fd1498Szrj duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1166*38fd1498Szrj phires_range_info);
1167*38fd1498Szrj }
1168*38fd1498Szrj gimple_stmt_iterator gsi_from;
1169*38fd1498Szrj for (int i = prep_cnt - 1; i >= 0; --i)
1170*38fd1498Szrj {
1171*38fd1498Szrj tree plhs = gimple_assign_lhs (prep_stmt[i]);
1172*38fd1498Szrj reset_flow_sensitive_info (plhs);
1173*38fd1498Szrj gsi_from = gsi_for_stmt (prep_stmt[i]);
1174*38fd1498Szrj gsi_move_before (&gsi_from, &gsi);
1175*38fd1498Szrj }
1176*38fd1498Szrj gsi_from = gsi_for_stmt (assign);
1177*38fd1498Szrj gsi_move_before (&gsi_from, &gsi);
1178*38fd1498Szrj replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1179*38fd1498Szrj return 2;
1180*38fd1498Szrj }
1181*38fd1498Szrj
1182*38fd1498Szrj return 0;
1183*38fd1498Szrj }
1184*38fd1498Szrj
1185*38fd1498Szrj /* The function minmax_replacement does the main work of doing the minmax
1186*38fd1498Szrj replacement. Return true if the replacement is done. Otherwise return
1187*38fd1498Szrj false.
1188*38fd1498Szrj BB is the basic block where the replacement is going to be done on. ARG0
1189*38fd1498Szrj is argument 0 from the PHI. Likewise for ARG1. */
1190*38fd1498Szrj
1191*38fd1498Szrj static bool
minmax_replacement(basic_block cond_bb,basic_block middle_bb,edge e0,edge e1,gimple * phi,tree arg0,tree arg1)1192*38fd1498Szrj minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1193*38fd1498Szrj edge e0, edge e1, gimple *phi,
1194*38fd1498Szrj tree arg0, tree arg1)
1195*38fd1498Szrj {
1196*38fd1498Szrj tree result, type;
1197*38fd1498Szrj gcond *cond;
1198*38fd1498Szrj gassign *new_stmt;
1199*38fd1498Szrj edge true_edge, false_edge;
1200*38fd1498Szrj enum tree_code cmp, minmax, ass_code;
1201*38fd1498Szrj tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
1202*38fd1498Szrj gimple_stmt_iterator gsi, gsi_from;
1203*38fd1498Szrj
1204*38fd1498Szrj type = TREE_TYPE (PHI_RESULT (phi));
1205*38fd1498Szrj
1206*38fd1498Szrj /* The optimization may be unsafe due to NaNs. */
1207*38fd1498Szrj if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1208*38fd1498Szrj return false;
1209*38fd1498Szrj
1210*38fd1498Szrj cond = as_a <gcond *> (last_stmt (cond_bb));
1211*38fd1498Szrj cmp = gimple_cond_code (cond);
1212*38fd1498Szrj
1213*38fd1498Szrj /* This transformation is only valid for order comparisons. Record which
1214*38fd1498Szrj operand is smaller/larger if the result of the comparison is true. */
1215*38fd1498Szrj alt_smaller = NULL_TREE;
1216*38fd1498Szrj alt_larger = NULL_TREE;
1217*38fd1498Szrj if (cmp == LT_EXPR || cmp == LE_EXPR)
1218*38fd1498Szrj {
1219*38fd1498Szrj smaller = gimple_cond_lhs (cond);
1220*38fd1498Szrj larger = gimple_cond_rhs (cond);
1221*38fd1498Szrj /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1222*38fd1498Szrj Likewise smaller <= CST is equivalent to smaller < CST+1. */
1223*38fd1498Szrj if (TREE_CODE (larger) == INTEGER_CST)
1224*38fd1498Szrj {
1225*38fd1498Szrj if (cmp == LT_EXPR)
1226*38fd1498Szrj {
1227*38fd1498Szrj bool overflow;
1228*38fd1498Szrj wide_int alt = wi::sub (wi::to_wide (larger), 1,
1229*38fd1498Szrj TYPE_SIGN (TREE_TYPE (larger)),
1230*38fd1498Szrj &overflow);
1231*38fd1498Szrj if (! overflow)
1232*38fd1498Szrj alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1233*38fd1498Szrj }
1234*38fd1498Szrj else
1235*38fd1498Szrj {
1236*38fd1498Szrj bool overflow;
1237*38fd1498Szrj wide_int alt = wi::add (wi::to_wide (larger), 1,
1238*38fd1498Szrj TYPE_SIGN (TREE_TYPE (larger)),
1239*38fd1498Szrj &overflow);
1240*38fd1498Szrj if (! overflow)
1241*38fd1498Szrj alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1242*38fd1498Szrj }
1243*38fd1498Szrj }
1244*38fd1498Szrj }
1245*38fd1498Szrj else if (cmp == GT_EXPR || cmp == GE_EXPR)
1246*38fd1498Szrj {
1247*38fd1498Szrj smaller = gimple_cond_rhs (cond);
1248*38fd1498Szrj larger = gimple_cond_lhs (cond);
1249*38fd1498Szrj /* If we have larger > CST it is equivalent to larger >= CST+1.
1250*38fd1498Szrj Likewise larger >= CST is equivalent to larger > CST-1. */
1251*38fd1498Szrj if (TREE_CODE (smaller) == INTEGER_CST)
1252*38fd1498Szrj {
1253*38fd1498Szrj if (cmp == GT_EXPR)
1254*38fd1498Szrj {
1255*38fd1498Szrj bool overflow;
1256*38fd1498Szrj wide_int alt = wi::add (wi::to_wide (smaller), 1,
1257*38fd1498Szrj TYPE_SIGN (TREE_TYPE (smaller)),
1258*38fd1498Szrj &overflow);
1259*38fd1498Szrj if (! overflow)
1260*38fd1498Szrj alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1261*38fd1498Szrj }
1262*38fd1498Szrj else
1263*38fd1498Szrj {
1264*38fd1498Szrj bool overflow;
1265*38fd1498Szrj wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1266*38fd1498Szrj TYPE_SIGN (TREE_TYPE (smaller)),
1267*38fd1498Szrj &overflow);
1268*38fd1498Szrj if (! overflow)
1269*38fd1498Szrj alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1270*38fd1498Szrj }
1271*38fd1498Szrj }
1272*38fd1498Szrj }
1273*38fd1498Szrj else
1274*38fd1498Szrj return false;
1275*38fd1498Szrj
1276*38fd1498Szrj /* We need to know which is the true edge and which is the false
1277*38fd1498Szrj edge so that we know if have abs or negative abs. */
1278*38fd1498Szrj extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1279*38fd1498Szrj
1280*38fd1498Szrj /* Forward the edges over the middle basic block. */
1281*38fd1498Szrj if (true_edge->dest == middle_bb)
1282*38fd1498Szrj true_edge = EDGE_SUCC (true_edge->dest, 0);
1283*38fd1498Szrj if (false_edge->dest == middle_bb)
1284*38fd1498Szrj false_edge = EDGE_SUCC (false_edge->dest, 0);
1285*38fd1498Szrj
1286*38fd1498Szrj if (true_edge == e0)
1287*38fd1498Szrj {
1288*38fd1498Szrj gcc_assert (false_edge == e1);
1289*38fd1498Szrj arg_true = arg0;
1290*38fd1498Szrj arg_false = arg1;
1291*38fd1498Szrj }
1292*38fd1498Szrj else
1293*38fd1498Szrj {
1294*38fd1498Szrj gcc_assert (false_edge == e0);
1295*38fd1498Szrj gcc_assert (true_edge == e1);
1296*38fd1498Szrj arg_true = arg1;
1297*38fd1498Szrj arg_false = arg0;
1298*38fd1498Szrj }
1299*38fd1498Szrj
1300*38fd1498Szrj if (empty_block_p (middle_bb))
1301*38fd1498Szrj {
1302*38fd1498Szrj if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1303*38fd1498Szrj || (alt_smaller
1304*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1305*38fd1498Szrj && (operand_equal_for_phi_arg_p (arg_false, larger)
1306*38fd1498Szrj || (alt_larger
1307*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1308*38fd1498Szrj {
1309*38fd1498Szrj /* Case
1310*38fd1498Szrj
1311*38fd1498Szrj if (smaller < larger)
1312*38fd1498Szrj rslt = smaller;
1313*38fd1498Szrj else
1314*38fd1498Szrj rslt = larger; */
1315*38fd1498Szrj minmax = MIN_EXPR;
1316*38fd1498Szrj }
1317*38fd1498Szrj else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1318*38fd1498Szrj || (alt_smaller
1319*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1320*38fd1498Szrj && (operand_equal_for_phi_arg_p (arg_true, larger)
1321*38fd1498Szrj || (alt_larger
1322*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1323*38fd1498Szrj minmax = MAX_EXPR;
1324*38fd1498Szrj else
1325*38fd1498Szrj return false;
1326*38fd1498Szrj }
1327*38fd1498Szrj else
1328*38fd1498Szrj {
1329*38fd1498Szrj /* Recognize the following case, assuming d <= u:
1330*38fd1498Szrj
1331*38fd1498Szrj if (a <= u)
1332*38fd1498Szrj b = MAX (a, d);
1333*38fd1498Szrj x = PHI <b, u>
1334*38fd1498Szrj
1335*38fd1498Szrj This is equivalent to
1336*38fd1498Szrj
1337*38fd1498Szrj b = MAX (a, d);
1338*38fd1498Szrj x = MIN (b, u); */
1339*38fd1498Szrj
1340*38fd1498Szrj gimple *assign = last_and_only_stmt (middle_bb);
1341*38fd1498Szrj tree lhs, op0, op1, bound;
1342*38fd1498Szrj
1343*38fd1498Szrj if (!assign
1344*38fd1498Szrj || gimple_code (assign) != GIMPLE_ASSIGN)
1345*38fd1498Szrj return false;
1346*38fd1498Szrj
1347*38fd1498Szrj lhs = gimple_assign_lhs (assign);
1348*38fd1498Szrj ass_code = gimple_assign_rhs_code (assign);
1349*38fd1498Szrj if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1350*38fd1498Szrj return false;
1351*38fd1498Szrj op0 = gimple_assign_rhs1 (assign);
1352*38fd1498Szrj op1 = gimple_assign_rhs2 (assign);
1353*38fd1498Szrj
1354*38fd1498Szrj if (true_edge->src == middle_bb)
1355*38fd1498Szrj {
1356*38fd1498Szrj /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1357*38fd1498Szrj if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1358*38fd1498Szrj return false;
1359*38fd1498Szrj
1360*38fd1498Szrj if (operand_equal_for_phi_arg_p (arg_false, larger)
1361*38fd1498Szrj || (alt_larger
1362*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1363*38fd1498Szrj {
1364*38fd1498Szrj /* Case
1365*38fd1498Szrj
1366*38fd1498Szrj if (smaller < larger)
1367*38fd1498Szrj {
1368*38fd1498Szrj r' = MAX_EXPR (smaller, bound)
1369*38fd1498Szrj }
1370*38fd1498Szrj r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1371*38fd1498Szrj if (ass_code != MAX_EXPR)
1372*38fd1498Szrj return false;
1373*38fd1498Szrj
1374*38fd1498Szrj minmax = MIN_EXPR;
1375*38fd1498Szrj if (operand_equal_for_phi_arg_p (op0, smaller)
1376*38fd1498Szrj || (alt_smaller
1377*38fd1498Szrj && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1378*38fd1498Szrj bound = op1;
1379*38fd1498Szrj else if (operand_equal_for_phi_arg_p (op1, smaller)
1380*38fd1498Szrj || (alt_smaller
1381*38fd1498Szrj && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1382*38fd1498Szrj bound = op0;
1383*38fd1498Szrj else
1384*38fd1498Szrj return false;
1385*38fd1498Szrj
1386*38fd1498Szrj /* We need BOUND <= LARGER. */
1387*38fd1498Szrj if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1388*38fd1498Szrj bound, larger)))
1389*38fd1498Szrj return false;
1390*38fd1498Szrj }
1391*38fd1498Szrj else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1392*38fd1498Szrj || (alt_smaller
1393*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1394*38fd1498Szrj {
1395*38fd1498Szrj /* Case
1396*38fd1498Szrj
1397*38fd1498Szrj if (smaller < larger)
1398*38fd1498Szrj {
1399*38fd1498Szrj r' = MIN_EXPR (larger, bound)
1400*38fd1498Szrj }
1401*38fd1498Szrj r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1402*38fd1498Szrj if (ass_code != MIN_EXPR)
1403*38fd1498Szrj return false;
1404*38fd1498Szrj
1405*38fd1498Szrj minmax = MAX_EXPR;
1406*38fd1498Szrj if (operand_equal_for_phi_arg_p (op0, larger)
1407*38fd1498Szrj || (alt_larger
1408*38fd1498Szrj && operand_equal_for_phi_arg_p (op0, alt_larger)))
1409*38fd1498Szrj bound = op1;
1410*38fd1498Szrj else if (operand_equal_for_phi_arg_p (op1, larger)
1411*38fd1498Szrj || (alt_larger
1412*38fd1498Szrj && operand_equal_for_phi_arg_p (op1, alt_larger)))
1413*38fd1498Szrj bound = op0;
1414*38fd1498Szrj else
1415*38fd1498Szrj return false;
1416*38fd1498Szrj
1417*38fd1498Szrj /* We need BOUND >= SMALLER. */
1418*38fd1498Szrj if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1419*38fd1498Szrj bound, smaller)))
1420*38fd1498Szrj return false;
1421*38fd1498Szrj }
1422*38fd1498Szrj else
1423*38fd1498Szrj return false;
1424*38fd1498Szrj }
1425*38fd1498Szrj else
1426*38fd1498Szrj {
1427*38fd1498Szrj /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1428*38fd1498Szrj if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1429*38fd1498Szrj return false;
1430*38fd1498Szrj
1431*38fd1498Szrj if (operand_equal_for_phi_arg_p (arg_true, larger)
1432*38fd1498Szrj || (alt_larger
1433*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1434*38fd1498Szrj {
1435*38fd1498Szrj /* Case
1436*38fd1498Szrj
1437*38fd1498Szrj if (smaller > larger)
1438*38fd1498Szrj {
1439*38fd1498Szrj r' = MIN_EXPR (smaller, bound)
1440*38fd1498Szrj }
1441*38fd1498Szrj r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1442*38fd1498Szrj if (ass_code != MIN_EXPR)
1443*38fd1498Szrj return false;
1444*38fd1498Szrj
1445*38fd1498Szrj minmax = MAX_EXPR;
1446*38fd1498Szrj if (operand_equal_for_phi_arg_p (op0, smaller)
1447*38fd1498Szrj || (alt_smaller
1448*38fd1498Szrj && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1449*38fd1498Szrj bound = op1;
1450*38fd1498Szrj else if (operand_equal_for_phi_arg_p (op1, smaller)
1451*38fd1498Szrj || (alt_smaller
1452*38fd1498Szrj && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1453*38fd1498Szrj bound = op0;
1454*38fd1498Szrj else
1455*38fd1498Szrj return false;
1456*38fd1498Szrj
1457*38fd1498Szrj /* We need BOUND >= LARGER. */
1458*38fd1498Szrj if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1459*38fd1498Szrj bound, larger)))
1460*38fd1498Szrj return false;
1461*38fd1498Szrj }
1462*38fd1498Szrj else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1463*38fd1498Szrj || (alt_smaller
1464*38fd1498Szrj && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1465*38fd1498Szrj {
1466*38fd1498Szrj /* Case
1467*38fd1498Szrj
1468*38fd1498Szrj if (smaller > larger)
1469*38fd1498Szrj {
1470*38fd1498Szrj r' = MAX_EXPR (larger, bound)
1471*38fd1498Szrj }
1472*38fd1498Szrj r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1473*38fd1498Szrj if (ass_code != MAX_EXPR)
1474*38fd1498Szrj return false;
1475*38fd1498Szrj
1476*38fd1498Szrj minmax = MIN_EXPR;
1477*38fd1498Szrj if (operand_equal_for_phi_arg_p (op0, larger))
1478*38fd1498Szrj bound = op1;
1479*38fd1498Szrj else if (operand_equal_for_phi_arg_p (op1, larger))
1480*38fd1498Szrj bound = op0;
1481*38fd1498Szrj else
1482*38fd1498Szrj return false;
1483*38fd1498Szrj
1484*38fd1498Szrj /* We need BOUND <= SMALLER. */
1485*38fd1498Szrj if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1486*38fd1498Szrj bound, smaller)))
1487*38fd1498Szrj return false;
1488*38fd1498Szrj }
1489*38fd1498Szrj else
1490*38fd1498Szrj return false;
1491*38fd1498Szrj }
1492*38fd1498Szrj
1493*38fd1498Szrj /* Move the statement from the middle block. */
1494*38fd1498Szrj gsi = gsi_last_bb (cond_bb);
1495*38fd1498Szrj gsi_from = gsi_last_nondebug_bb (middle_bb);
1496*38fd1498Szrj reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1497*38fd1498Szrj SSA_OP_DEF));
1498*38fd1498Szrj gsi_move_before (&gsi_from, &gsi);
1499*38fd1498Szrj }
1500*38fd1498Szrj
1501*38fd1498Szrj /* Create an SSA var to hold the min/max result. If we're the only
1502*38fd1498Szrj things setting the target PHI, then we can clone the PHI
1503*38fd1498Szrj variable. Otherwise we must create a new one. */
1504*38fd1498Szrj result = PHI_RESULT (phi);
1505*38fd1498Szrj if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1506*38fd1498Szrj result = duplicate_ssa_name (result, NULL);
1507*38fd1498Szrj else
1508*38fd1498Szrj result = make_ssa_name (TREE_TYPE (result));
1509*38fd1498Szrj
1510*38fd1498Szrj /* Emit the statement to compute min/max. */
1511*38fd1498Szrj new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1512*38fd1498Szrj gsi = gsi_last_bb (cond_bb);
1513*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1514*38fd1498Szrj
1515*38fd1498Szrj replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1516*38fd1498Szrj
1517*38fd1498Szrj return true;
1518*38fd1498Szrj }
1519*38fd1498Szrj
1520*38fd1498Szrj /* The function absolute_replacement does the main work of doing the absolute
1521*38fd1498Szrj replacement. Return true if the replacement is done. Otherwise return
1522*38fd1498Szrj false.
1523*38fd1498Szrj bb is the basic block where the replacement is going to be done on. arg0
1524*38fd1498Szrj is argument 0 from the phi. Likewise for arg1. */
1525*38fd1498Szrj
1526*38fd1498Szrj static bool
abs_replacement(basic_block cond_bb,basic_block middle_bb,edge e0 ATTRIBUTE_UNUSED,edge e1,gimple * phi,tree arg0,tree arg1)1527*38fd1498Szrj abs_replacement (basic_block cond_bb, basic_block middle_bb,
1528*38fd1498Szrj edge e0 ATTRIBUTE_UNUSED, edge e1,
1529*38fd1498Szrj gimple *phi, tree arg0, tree arg1)
1530*38fd1498Szrj {
1531*38fd1498Szrj tree result;
1532*38fd1498Szrj gassign *new_stmt;
1533*38fd1498Szrj gimple *cond;
1534*38fd1498Szrj gimple_stmt_iterator gsi;
1535*38fd1498Szrj edge true_edge, false_edge;
1536*38fd1498Szrj gimple *assign;
1537*38fd1498Szrj edge e;
1538*38fd1498Szrj tree rhs, lhs;
1539*38fd1498Szrj bool negate;
1540*38fd1498Szrj enum tree_code cond_code;
1541*38fd1498Szrj
1542*38fd1498Szrj /* If the type says honor signed zeros we cannot do this
1543*38fd1498Szrj optimization. */
1544*38fd1498Szrj if (HONOR_SIGNED_ZEROS (arg1))
1545*38fd1498Szrj return false;
1546*38fd1498Szrj
1547*38fd1498Szrj /* OTHER_BLOCK must have only one executable statement which must have the
1548*38fd1498Szrj form arg0 = -arg1 or arg1 = -arg0. */
1549*38fd1498Szrj
1550*38fd1498Szrj assign = last_and_only_stmt (middle_bb);
1551*38fd1498Szrj /* If we did not find the proper negation assignment, then we can not
1552*38fd1498Szrj optimize. */
1553*38fd1498Szrj if (assign == NULL)
1554*38fd1498Szrj return false;
1555*38fd1498Szrj
1556*38fd1498Szrj /* If we got here, then we have found the only executable statement
1557*38fd1498Szrj in OTHER_BLOCK. If it is anything other than arg = -arg1 or
1558*38fd1498Szrj arg1 = -arg0, then we can not optimize. */
1559*38fd1498Szrj if (gimple_code (assign) != GIMPLE_ASSIGN)
1560*38fd1498Szrj return false;
1561*38fd1498Szrj
1562*38fd1498Szrj lhs = gimple_assign_lhs (assign);
1563*38fd1498Szrj
1564*38fd1498Szrj if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1565*38fd1498Szrj return false;
1566*38fd1498Szrj
1567*38fd1498Szrj rhs = gimple_assign_rhs1 (assign);
1568*38fd1498Szrj
1569*38fd1498Szrj /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
1570*38fd1498Szrj if (!(lhs == arg0 && rhs == arg1)
1571*38fd1498Szrj && !(lhs == arg1 && rhs == arg0))
1572*38fd1498Szrj return false;
1573*38fd1498Szrj
1574*38fd1498Szrj cond = last_stmt (cond_bb);
1575*38fd1498Szrj result = PHI_RESULT (phi);
1576*38fd1498Szrj
1577*38fd1498Szrj /* Only relationals comparing arg[01] against zero are interesting. */
1578*38fd1498Szrj cond_code = gimple_cond_code (cond);
1579*38fd1498Szrj if (cond_code != GT_EXPR && cond_code != GE_EXPR
1580*38fd1498Szrj && cond_code != LT_EXPR && cond_code != LE_EXPR)
1581*38fd1498Szrj return false;
1582*38fd1498Szrj
1583*38fd1498Szrj /* Make sure the conditional is arg[01] OP y. */
1584*38fd1498Szrj if (gimple_cond_lhs (cond) != rhs)
1585*38fd1498Szrj return false;
1586*38fd1498Szrj
1587*38fd1498Szrj if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1588*38fd1498Szrj ? real_zerop (gimple_cond_rhs (cond))
1589*38fd1498Szrj : integer_zerop (gimple_cond_rhs (cond)))
1590*38fd1498Szrj ;
1591*38fd1498Szrj else
1592*38fd1498Szrj return false;
1593*38fd1498Szrj
1594*38fd1498Szrj /* We need to know which is the true edge and which is the false
1595*38fd1498Szrj edge so that we know if have abs or negative abs. */
1596*38fd1498Szrj extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1597*38fd1498Szrj
1598*38fd1498Szrj /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1599*38fd1498Szrj will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
1600*38fd1498Szrj the false edge goes to OTHER_BLOCK. */
1601*38fd1498Szrj if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1602*38fd1498Szrj e = true_edge;
1603*38fd1498Szrj else
1604*38fd1498Szrj e = false_edge;
1605*38fd1498Szrj
1606*38fd1498Szrj if (e->dest == middle_bb)
1607*38fd1498Szrj negate = true;
1608*38fd1498Szrj else
1609*38fd1498Szrj negate = false;
1610*38fd1498Szrj
1611*38fd1498Szrj /* If the code negates only iff positive then make sure to not
1612*38fd1498Szrj introduce undefined behavior when negating or computing the absolute.
1613*38fd1498Szrj ??? We could use range info if present to check for arg1 == INT_MIN. */
1614*38fd1498Szrj if (negate
1615*38fd1498Szrj && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
1616*38fd1498Szrj && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
1617*38fd1498Szrj return false;
1618*38fd1498Szrj
1619*38fd1498Szrj result = duplicate_ssa_name (result, NULL);
1620*38fd1498Szrj
1621*38fd1498Szrj if (negate)
1622*38fd1498Szrj lhs = make_ssa_name (TREE_TYPE (result));
1623*38fd1498Szrj else
1624*38fd1498Szrj lhs = result;
1625*38fd1498Szrj
1626*38fd1498Szrj /* Build the modify expression with abs expression. */
1627*38fd1498Szrj new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1628*38fd1498Szrj
1629*38fd1498Szrj gsi = gsi_last_bb (cond_bb);
1630*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1631*38fd1498Szrj
1632*38fd1498Szrj if (negate)
1633*38fd1498Szrj {
1634*38fd1498Szrj /* Get the right GSI. We want to insert after the recently
1635*38fd1498Szrj added ABS_EXPR statement (which we know is the first statement
1636*38fd1498Szrj in the block. */
1637*38fd1498Szrj new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1638*38fd1498Szrj
1639*38fd1498Szrj gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1640*38fd1498Szrj }
1641*38fd1498Szrj
1642*38fd1498Szrj replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1643*38fd1498Szrj
1644*38fd1498Szrj /* Note that we optimized this PHI. */
1645*38fd1498Szrj return true;
1646*38fd1498Szrj }
1647*38fd1498Szrj
1648*38fd1498Szrj /* Auxiliary functions to determine the set of memory accesses which
1649*38fd1498Szrj can't trap because they are preceded by accesses to the same memory
1650*38fd1498Szrj portion. We do that for MEM_REFs, so we only need to track
1651*38fd1498Szrj the SSA_NAME of the pointer indirectly referenced. The algorithm
1652*38fd1498Szrj simply is a walk over all instructions in dominator order. When
1653*38fd1498Szrj we see an MEM_REF we determine if we've already seen a same
1654*38fd1498Szrj ref anywhere up to the root of the dominator tree. If we do the
1655*38fd1498Szrj current access can't trap. If we don't see any dominating access
1656*38fd1498Szrj the current access might trap, but might also make later accesses
1657*38fd1498Szrj non-trapping, so we remember it. We need to be careful with loads
1658*38fd1498Szrj or stores, for instance a load might not trap, while a store would,
1659*38fd1498Szrj so if we see a dominating read access this doesn't mean that a later
1660*38fd1498Szrj write access would not trap. Hence we also need to differentiate the
1661*38fd1498Szrj type of access(es) seen.
1662*38fd1498Szrj
1663*38fd1498Szrj ??? We currently are very conservative and assume that a load might
1664*38fd1498Szrj trap even if a store doesn't (write-only memory). This probably is
1665*38fd1498Szrj overly conservative. */
1666*38fd1498Szrj
1667*38fd1498Szrj /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1668*38fd1498Szrj through it was seen, which would constitute a no-trap region for
1669*38fd1498Szrj same accesses. */
1670*38fd1498Szrj struct name_to_bb
1671*38fd1498Szrj {
1672*38fd1498Szrj unsigned int ssa_name_ver;
1673*38fd1498Szrj unsigned int phase;
1674*38fd1498Szrj bool store;
1675*38fd1498Szrj HOST_WIDE_INT offset, size;
1676*38fd1498Szrj basic_block bb;
1677*38fd1498Szrj };
1678*38fd1498Szrj
1679*38fd1498Szrj /* Hashtable helpers. */
1680*38fd1498Szrj
1681*38fd1498Szrj struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1682*38fd1498Szrj {
1683*38fd1498Szrj static inline hashval_t hash (const name_to_bb *);
1684*38fd1498Szrj static inline bool equal (const name_to_bb *, const name_to_bb *);
1685*38fd1498Szrj };
1686*38fd1498Szrj
1687*38fd1498Szrj /* Used for quick clearing of the hash-table when we see calls.
1688*38fd1498Szrj Hash entries with phase < nt_call_phase are invalid. */
1689*38fd1498Szrj static unsigned int nt_call_phase;
1690*38fd1498Szrj
1691*38fd1498Szrj /* The hash function. */
1692*38fd1498Szrj
1693*38fd1498Szrj inline hashval_t
hash(const name_to_bb * n)1694*38fd1498Szrj ssa_names_hasher::hash (const name_to_bb *n)
1695*38fd1498Szrj {
1696*38fd1498Szrj return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1697*38fd1498Szrj ^ (n->offset << 6) ^ (n->size << 3);
1698*38fd1498Szrj }
1699*38fd1498Szrj
1700*38fd1498Szrj /* The equality function of *P1 and *P2. */
1701*38fd1498Szrj
1702*38fd1498Szrj inline bool
equal(const name_to_bb * n1,const name_to_bb * n2)1703*38fd1498Szrj ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1704*38fd1498Szrj {
1705*38fd1498Szrj return n1->ssa_name_ver == n2->ssa_name_ver
1706*38fd1498Szrj && n1->store == n2->store
1707*38fd1498Szrj && n1->offset == n2->offset
1708*38fd1498Szrj && n1->size == n2->size;
1709*38fd1498Szrj }
1710*38fd1498Szrj
1711*38fd1498Szrj class nontrapping_dom_walker : public dom_walker
1712*38fd1498Szrj {
1713*38fd1498Szrj public:
nontrapping_dom_walker(cdi_direction direction,hash_set<tree> * ps)1714*38fd1498Szrj nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1715*38fd1498Szrj : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1716*38fd1498Szrj
1717*38fd1498Szrj virtual edge before_dom_children (basic_block);
1718*38fd1498Szrj virtual void after_dom_children (basic_block);
1719*38fd1498Szrj
1720*38fd1498Szrj private:
1721*38fd1498Szrj
1722*38fd1498Szrj /* We see the expression EXP in basic block BB. If it's an interesting
1723*38fd1498Szrj expression (an MEM_REF through an SSA_NAME) possibly insert the
1724*38fd1498Szrj expression into the set NONTRAP or the hash table of seen expressions.
1725*38fd1498Szrj STORE is true if this expression is on the LHS, otherwise it's on
1726*38fd1498Szrj the RHS. */
1727*38fd1498Szrj void add_or_mark_expr (basic_block, tree, bool);
1728*38fd1498Szrj
1729*38fd1498Szrj hash_set<tree> *m_nontrapping;
1730*38fd1498Szrj
1731*38fd1498Szrj /* The hash table for remembering what we've seen. */
1732*38fd1498Szrj hash_table<ssa_names_hasher> m_seen_ssa_names;
1733*38fd1498Szrj };
1734*38fd1498Szrj
1735*38fd1498Szrj /* Called by walk_dominator_tree, when entering the block BB. */
1736*38fd1498Szrj edge
before_dom_children(basic_block bb)1737*38fd1498Szrj nontrapping_dom_walker::before_dom_children (basic_block bb)
1738*38fd1498Szrj {
1739*38fd1498Szrj edge e;
1740*38fd1498Szrj edge_iterator ei;
1741*38fd1498Szrj gimple_stmt_iterator gsi;
1742*38fd1498Szrj
1743*38fd1498Szrj /* If we haven't seen all our predecessors, clear the hash-table. */
1744*38fd1498Szrj FOR_EACH_EDGE (e, ei, bb->preds)
1745*38fd1498Szrj if ((((size_t)e->src->aux) & 2) == 0)
1746*38fd1498Szrj {
1747*38fd1498Szrj nt_call_phase++;
1748*38fd1498Szrj break;
1749*38fd1498Szrj }
1750*38fd1498Szrj
1751*38fd1498Szrj /* Mark this BB as being on the path to dominator root and as visited. */
1752*38fd1498Szrj bb->aux = (void*)(1 | 2);
1753*38fd1498Szrj
1754*38fd1498Szrj /* And walk the statements in order. */
1755*38fd1498Szrj for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1756*38fd1498Szrj {
1757*38fd1498Szrj gimple *stmt = gsi_stmt (gsi);
1758*38fd1498Szrj
1759*38fd1498Szrj if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1760*38fd1498Szrj || (is_gimple_call (stmt)
1761*38fd1498Szrj && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1762*38fd1498Szrj nt_call_phase++;
1763*38fd1498Szrj else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1764*38fd1498Szrj {
1765*38fd1498Szrj add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1766*38fd1498Szrj add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1767*38fd1498Szrj }
1768*38fd1498Szrj }
1769*38fd1498Szrj return NULL;
1770*38fd1498Szrj }
1771*38fd1498Szrj
1772*38fd1498Szrj /* Called by walk_dominator_tree, when basic block BB is exited. */
1773*38fd1498Szrj void
after_dom_children(basic_block bb)1774*38fd1498Szrj nontrapping_dom_walker::after_dom_children (basic_block bb)
1775*38fd1498Szrj {
1776*38fd1498Szrj /* This BB isn't on the path to dominator root anymore. */
1777*38fd1498Szrj bb->aux = (void*)2;
1778*38fd1498Szrj }
1779*38fd1498Szrj
1780*38fd1498Szrj /* We see the expression EXP in basic block BB. If it's an interesting
1781*38fd1498Szrj expression (an MEM_REF through an SSA_NAME) possibly insert the
1782*38fd1498Szrj expression into the set NONTRAP or the hash table of seen expressions.
1783*38fd1498Szrj STORE is true if this expression is on the LHS, otherwise it's on
1784*38fd1498Szrj the RHS. */
1785*38fd1498Szrj void
add_or_mark_expr(basic_block bb,tree exp,bool store)1786*38fd1498Szrj nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1787*38fd1498Szrj {
1788*38fd1498Szrj HOST_WIDE_INT size;
1789*38fd1498Szrj
1790*38fd1498Szrj if (TREE_CODE (exp) == MEM_REF
1791*38fd1498Szrj && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1792*38fd1498Szrj && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1793*38fd1498Szrj && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1794*38fd1498Szrj {
1795*38fd1498Szrj tree name = TREE_OPERAND (exp, 0);
1796*38fd1498Szrj struct name_to_bb map;
1797*38fd1498Szrj name_to_bb **slot;
1798*38fd1498Szrj struct name_to_bb *n2bb;
1799*38fd1498Szrj basic_block found_bb = 0;
1800*38fd1498Szrj
1801*38fd1498Szrj /* Try to find the last seen MEM_REF through the same
1802*38fd1498Szrj SSA_NAME, which can trap. */
1803*38fd1498Szrj map.ssa_name_ver = SSA_NAME_VERSION (name);
1804*38fd1498Szrj map.phase = 0;
1805*38fd1498Szrj map.bb = 0;
1806*38fd1498Szrj map.store = store;
1807*38fd1498Szrj map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1808*38fd1498Szrj map.size = size;
1809*38fd1498Szrj
1810*38fd1498Szrj slot = m_seen_ssa_names.find_slot (&map, INSERT);
1811*38fd1498Szrj n2bb = *slot;
1812*38fd1498Szrj if (n2bb && n2bb->phase >= nt_call_phase)
1813*38fd1498Szrj found_bb = n2bb->bb;
1814*38fd1498Szrj
1815*38fd1498Szrj /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1816*38fd1498Szrj (it's in a basic block on the path from us to the dominator root)
1817*38fd1498Szrj then we can't trap. */
1818*38fd1498Szrj if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1819*38fd1498Szrj {
1820*38fd1498Szrj m_nontrapping->add (exp);
1821*38fd1498Szrj }
1822*38fd1498Szrj else
1823*38fd1498Szrj {
1824*38fd1498Szrj /* EXP might trap, so insert it into the hash table. */
1825*38fd1498Szrj if (n2bb)
1826*38fd1498Szrj {
1827*38fd1498Szrj n2bb->phase = nt_call_phase;
1828*38fd1498Szrj n2bb->bb = bb;
1829*38fd1498Szrj }
1830*38fd1498Szrj else
1831*38fd1498Szrj {
1832*38fd1498Szrj n2bb = XNEW (struct name_to_bb);
1833*38fd1498Szrj n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1834*38fd1498Szrj n2bb->phase = nt_call_phase;
1835*38fd1498Szrj n2bb->bb = bb;
1836*38fd1498Szrj n2bb->store = store;
1837*38fd1498Szrj n2bb->offset = map.offset;
1838*38fd1498Szrj n2bb->size = size;
1839*38fd1498Szrj *slot = n2bb;
1840*38fd1498Szrj }
1841*38fd1498Szrj }
1842*38fd1498Szrj }
1843*38fd1498Szrj }
1844*38fd1498Szrj
1845*38fd1498Szrj /* This is the entry point of gathering non trapping memory accesses.
1846*38fd1498Szrj It will do a dominator walk over the whole function, and it will
1847*38fd1498Szrj make use of the bb->aux pointers. It returns a set of trees
1848*38fd1498Szrj (the MEM_REFs itself) which can't trap. */
1849*38fd1498Szrj static hash_set<tree> *
get_non_trapping(void)1850*38fd1498Szrj get_non_trapping (void)
1851*38fd1498Szrj {
1852*38fd1498Szrj nt_call_phase = 0;
1853*38fd1498Szrj hash_set<tree> *nontrap = new hash_set<tree>;
1854*38fd1498Szrj /* We're going to do a dominator walk, so ensure that we have
1855*38fd1498Szrj dominance information. */
1856*38fd1498Szrj calculate_dominance_info (CDI_DOMINATORS);
1857*38fd1498Szrj
1858*38fd1498Szrj nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1859*38fd1498Szrj .walk (cfun->cfg->x_entry_block_ptr);
1860*38fd1498Szrj
1861*38fd1498Szrj clear_aux_for_blocks ();
1862*38fd1498Szrj return nontrap;
1863*38fd1498Szrj }
1864*38fd1498Szrj
1865*38fd1498Szrj /* Do the main work of conditional store replacement. We already know
1866*38fd1498Szrj that the recognized pattern looks like so:
1867*38fd1498Szrj
1868*38fd1498Szrj split:
1869*38fd1498Szrj if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1870*38fd1498Szrj MIDDLE_BB:
1871*38fd1498Szrj something
1872*38fd1498Szrj fallthrough (edge E0)
1873*38fd1498Szrj JOIN_BB:
1874*38fd1498Szrj some more
1875*38fd1498Szrj
1876*38fd1498Szrj We check that MIDDLE_BB contains only one store, that that store
1877*38fd1498Szrj doesn't trap (not via NOTRAP, but via checking if an access to the same
1878*38fd1498Szrj memory location dominates us) and that the store has a "simple" RHS. */
1879*38fd1498Szrj
1880*38fd1498Szrj static bool
cond_store_replacement(basic_block middle_bb,basic_block join_bb,edge e0,edge e1,hash_set<tree> * nontrap)1881*38fd1498Szrj cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1882*38fd1498Szrj edge e0, edge e1, hash_set<tree> *nontrap)
1883*38fd1498Szrj {
1884*38fd1498Szrj gimple *assign = last_and_only_stmt (middle_bb);
1885*38fd1498Szrj tree lhs, rhs, name, name2;
1886*38fd1498Szrj gphi *newphi;
1887*38fd1498Szrj gassign *new_stmt;
1888*38fd1498Szrj gimple_stmt_iterator gsi;
1889*38fd1498Szrj source_location locus;
1890*38fd1498Szrj
1891*38fd1498Szrj /* Check if middle_bb contains of only one store. */
1892*38fd1498Szrj if (!assign
1893*38fd1498Szrj || !gimple_assign_single_p (assign)
1894*38fd1498Szrj || gimple_has_volatile_ops (assign))
1895*38fd1498Szrj return false;
1896*38fd1498Szrj
1897*38fd1498Szrj locus = gimple_location (assign);
1898*38fd1498Szrj lhs = gimple_assign_lhs (assign);
1899*38fd1498Szrj rhs = gimple_assign_rhs1 (assign);
1900*38fd1498Szrj if (TREE_CODE (lhs) != MEM_REF
1901*38fd1498Szrj || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1902*38fd1498Szrj || !is_gimple_reg_type (TREE_TYPE (lhs)))
1903*38fd1498Szrj return false;
1904*38fd1498Szrj
1905*38fd1498Szrj /* Prove that we can move the store down. We could also check
1906*38fd1498Szrj TREE_THIS_NOTRAP here, but in that case we also could move stores,
1907*38fd1498Szrj whose value is not available readily, which we want to avoid. */
1908*38fd1498Szrj if (!nontrap->contains (lhs))
1909*38fd1498Szrj return false;
1910*38fd1498Szrj
1911*38fd1498Szrj /* Now we've checked the constraints, so do the transformation:
1912*38fd1498Szrj 1) Remove the single store. */
1913*38fd1498Szrj gsi = gsi_for_stmt (assign);
1914*38fd1498Szrj unlink_stmt_vdef (assign);
1915*38fd1498Szrj gsi_remove (&gsi, true);
1916*38fd1498Szrj release_defs (assign);
1917*38fd1498Szrj
1918*38fd1498Szrj /* Make both store and load use alias-set zero as we have to
1919*38fd1498Szrj deal with the case of the store being a conditional change
1920*38fd1498Szrj of the dynamic type. */
1921*38fd1498Szrj lhs = unshare_expr (lhs);
1922*38fd1498Szrj tree *basep = &lhs;
1923*38fd1498Szrj while (handled_component_p (*basep))
1924*38fd1498Szrj basep = &TREE_OPERAND (*basep, 0);
1925*38fd1498Szrj if (TREE_CODE (*basep) == MEM_REF
1926*38fd1498Szrj || TREE_CODE (*basep) == TARGET_MEM_REF)
1927*38fd1498Szrj TREE_OPERAND (*basep, 1)
1928*38fd1498Szrj = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
1929*38fd1498Szrj else
1930*38fd1498Szrj *basep = build2 (MEM_REF, TREE_TYPE (*basep),
1931*38fd1498Szrj build_fold_addr_expr (*basep),
1932*38fd1498Szrj build_zero_cst (ptr_type_node));
1933*38fd1498Szrj
1934*38fd1498Szrj /* 2) Insert a load from the memory of the store to the temporary
1935*38fd1498Szrj on the edge which did not contain the store. */
1936*38fd1498Szrj name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1937*38fd1498Szrj new_stmt = gimple_build_assign (name, lhs);
1938*38fd1498Szrj gimple_set_location (new_stmt, locus);
1939*38fd1498Szrj gsi_insert_on_edge (e1, new_stmt);
1940*38fd1498Szrj
1941*38fd1498Szrj /* 3) Create a PHI node at the join block, with one argument
1942*38fd1498Szrj holding the old RHS, and the other holding the temporary
1943*38fd1498Szrj where we stored the old memory contents. */
1944*38fd1498Szrj name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1945*38fd1498Szrj newphi = create_phi_node (name2, join_bb);
1946*38fd1498Szrj add_phi_arg (newphi, rhs, e0, locus);
1947*38fd1498Szrj add_phi_arg (newphi, name, e1, locus);
1948*38fd1498Szrj
1949*38fd1498Szrj lhs = unshare_expr (lhs);
1950*38fd1498Szrj new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1951*38fd1498Szrj
1952*38fd1498Szrj /* 4) Insert that PHI node. */
1953*38fd1498Szrj gsi = gsi_after_labels (join_bb);
1954*38fd1498Szrj if (gsi_end_p (gsi))
1955*38fd1498Szrj {
1956*38fd1498Szrj gsi = gsi_last_bb (join_bb);
1957*38fd1498Szrj gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1958*38fd1498Szrj }
1959*38fd1498Szrj else
1960*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1961*38fd1498Szrj
1962*38fd1498Szrj return true;
1963*38fd1498Szrj }
1964*38fd1498Szrj
1965*38fd1498Szrj /* Do the main work of conditional store replacement. */
1966*38fd1498Szrj
1967*38fd1498Szrj static bool
cond_if_else_store_replacement_1(basic_block then_bb,basic_block else_bb,basic_block join_bb,gimple * then_assign,gimple * else_assign)1968*38fd1498Szrj cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1969*38fd1498Szrj basic_block join_bb, gimple *then_assign,
1970*38fd1498Szrj gimple *else_assign)
1971*38fd1498Szrj {
1972*38fd1498Szrj tree lhs_base, lhs, then_rhs, else_rhs, name;
1973*38fd1498Szrj source_location then_locus, else_locus;
1974*38fd1498Szrj gimple_stmt_iterator gsi;
1975*38fd1498Szrj gphi *newphi;
1976*38fd1498Szrj gassign *new_stmt;
1977*38fd1498Szrj
1978*38fd1498Szrj if (then_assign == NULL
1979*38fd1498Szrj || !gimple_assign_single_p (then_assign)
1980*38fd1498Szrj || gimple_clobber_p (then_assign)
1981*38fd1498Szrj || gimple_has_volatile_ops (then_assign)
1982*38fd1498Szrj || else_assign == NULL
1983*38fd1498Szrj || !gimple_assign_single_p (else_assign)
1984*38fd1498Szrj || gimple_clobber_p (else_assign)
1985*38fd1498Szrj || gimple_has_volatile_ops (else_assign))
1986*38fd1498Szrj return false;
1987*38fd1498Szrj
1988*38fd1498Szrj lhs = gimple_assign_lhs (then_assign);
1989*38fd1498Szrj if (!is_gimple_reg_type (TREE_TYPE (lhs))
1990*38fd1498Szrj || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1991*38fd1498Szrj return false;
1992*38fd1498Szrj
1993*38fd1498Szrj lhs_base = get_base_address (lhs);
1994*38fd1498Szrj if (lhs_base == NULL_TREE
1995*38fd1498Szrj || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1996*38fd1498Szrj return false;
1997*38fd1498Szrj
1998*38fd1498Szrj then_rhs = gimple_assign_rhs1 (then_assign);
1999*38fd1498Szrj else_rhs = gimple_assign_rhs1 (else_assign);
2000*38fd1498Szrj then_locus = gimple_location (then_assign);
2001*38fd1498Szrj else_locus = gimple_location (else_assign);
2002*38fd1498Szrj
2003*38fd1498Szrj /* Now we've checked the constraints, so do the transformation:
2004*38fd1498Szrj 1) Remove the stores. */
2005*38fd1498Szrj gsi = gsi_for_stmt (then_assign);
2006*38fd1498Szrj unlink_stmt_vdef (then_assign);
2007*38fd1498Szrj gsi_remove (&gsi, true);
2008*38fd1498Szrj release_defs (then_assign);
2009*38fd1498Szrj
2010*38fd1498Szrj gsi = gsi_for_stmt (else_assign);
2011*38fd1498Szrj unlink_stmt_vdef (else_assign);
2012*38fd1498Szrj gsi_remove (&gsi, true);
2013*38fd1498Szrj release_defs (else_assign);
2014*38fd1498Szrj
2015*38fd1498Szrj /* 2) Create a PHI node at the join block, with one argument
2016*38fd1498Szrj holding the old RHS, and the other holding the temporary
2017*38fd1498Szrj where we stored the old memory contents. */
2018*38fd1498Szrj name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2019*38fd1498Szrj newphi = create_phi_node (name, join_bb);
2020*38fd1498Szrj add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2021*38fd1498Szrj add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2022*38fd1498Szrj
2023*38fd1498Szrj new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2024*38fd1498Szrj
2025*38fd1498Szrj /* 3) Insert that PHI node. */
2026*38fd1498Szrj gsi = gsi_after_labels (join_bb);
2027*38fd1498Szrj if (gsi_end_p (gsi))
2028*38fd1498Szrj {
2029*38fd1498Szrj gsi = gsi_last_bb (join_bb);
2030*38fd1498Szrj gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2031*38fd1498Szrj }
2032*38fd1498Szrj else
2033*38fd1498Szrj gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2034*38fd1498Szrj
2035*38fd1498Szrj return true;
2036*38fd1498Szrj }
2037*38fd1498Szrj
2038*38fd1498Szrj /* Return the single store in BB with VDEF or NULL if there are
2039*38fd1498Szrj other stores in the BB or loads following the store. */
2040*38fd1498Szrj
2041*38fd1498Szrj static gimple *
single_trailing_store_in_bb(basic_block bb,tree vdef)2042*38fd1498Szrj single_trailing_store_in_bb (basic_block bb, tree vdef)
2043*38fd1498Szrj {
2044*38fd1498Szrj if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2045*38fd1498Szrj return NULL;
2046*38fd1498Szrj gimple *store = SSA_NAME_DEF_STMT (vdef);
2047*38fd1498Szrj if (gimple_bb (store) != bb
2048*38fd1498Szrj || gimple_code (store) == GIMPLE_PHI)
2049*38fd1498Szrj return NULL;
2050*38fd1498Szrj
2051*38fd1498Szrj /* Verify there is no other store in this BB. */
2052*38fd1498Szrj if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2053*38fd1498Szrj && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2054*38fd1498Szrj && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2055*38fd1498Szrj return NULL;
2056*38fd1498Szrj
2057*38fd1498Szrj /* Verify there is no load or store after the store. */
2058*38fd1498Szrj use_operand_p use_p;
2059*38fd1498Szrj imm_use_iterator imm_iter;
2060*38fd1498Szrj FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2061*38fd1498Szrj if (USE_STMT (use_p) != store
2062*38fd1498Szrj && gimple_bb (USE_STMT (use_p)) == bb)
2063*38fd1498Szrj return NULL;
2064*38fd1498Szrj
2065*38fd1498Szrj return store;
2066*38fd1498Szrj }
2067*38fd1498Szrj
2068*38fd1498Szrj /* Conditional store replacement. We already know
2069*38fd1498Szrj that the recognized pattern looks like so:
2070*38fd1498Szrj
2071*38fd1498Szrj split:
2072*38fd1498Szrj if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2073*38fd1498Szrj THEN_BB:
2074*38fd1498Szrj ...
2075*38fd1498Szrj X = Y;
2076*38fd1498Szrj ...
2077*38fd1498Szrj goto JOIN_BB;
2078*38fd1498Szrj ELSE_BB:
2079*38fd1498Szrj ...
2080*38fd1498Szrj X = Z;
2081*38fd1498Szrj ...
2082*38fd1498Szrj fallthrough (edge E0)
2083*38fd1498Szrj JOIN_BB:
2084*38fd1498Szrj some more
2085*38fd1498Szrj
2086*38fd1498Szrj We check that it is safe to sink the store to JOIN_BB by verifying that
2087*38fd1498Szrj there are no read-after-write or write-after-write dependencies in
2088*38fd1498Szrj THEN_BB and ELSE_BB. */
2089*38fd1498Szrj
2090*38fd1498Szrj static bool
cond_if_else_store_replacement(basic_block then_bb,basic_block else_bb,basic_block join_bb)2091*38fd1498Szrj cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2092*38fd1498Szrj basic_block join_bb)
2093*38fd1498Szrj {
2094*38fd1498Szrj vec<data_reference_p> then_datarefs, else_datarefs;
2095*38fd1498Szrj vec<ddr_p> then_ddrs, else_ddrs;
2096*38fd1498Szrj gimple *then_store, *else_store;
2097*38fd1498Szrj bool found, ok = false, res;
2098*38fd1498Szrj struct data_dependence_relation *ddr;
2099*38fd1498Szrj data_reference_p then_dr, else_dr;
2100*38fd1498Szrj int i, j;
2101*38fd1498Szrj tree then_lhs, else_lhs;
2102*38fd1498Szrj basic_block blocks[3];
2103*38fd1498Szrj
2104*38fd1498Szrj /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2105*38fd1498Szrj cheap enough to always handle as it allows us to elide dependence
2106*38fd1498Szrj checking. */
2107*38fd1498Szrj gphi *vphi = NULL;
2108*38fd1498Szrj for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2109*38fd1498Szrj gsi_next (&si))
2110*38fd1498Szrj if (virtual_operand_p (gimple_phi_result (si.phi ())))
2111*38fd1498Szrj {
2112*38fd1498Szrj vphi = si.phi ();
2113*38fd1498Szrj break;
2114*38fd1498Szrj }
2115*38fd1498Szrj if (!vphi)
2116*38fd1498Szrj return false;
2117*38fd1498Szrj tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2118*38fd1498Szrj tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2119*38fd1498Szrj gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2120*38fd1498Szrj if (then_assign)
2121*38fd1498Szrj {
2122*38fd1498Szrj gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2123*38fd1498Szrj if (else_assign)
2124*38fd1498Szrj return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2125*38fd1498Szrj then_assign, else_assign);
2126*38fd1498Szrj }
2127*38fd1498Szrj
2128*38fd1498Szrj if (MAX_STORES_TO_SINK == 0)
2129*38fd1498Szrj return false;
2130*38fd1498Szrj
2131*38fd1498Szrj /* Find data references. */
2132*38fd1498Szrj then_datarefs.create (1);
2133*38fd1498Szrj else_datarefs.create (1);
2134*38fd1498Szrj if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2135*38fd1498Szrj == chrec_dont_know)
2136*38fd1498Szrj || !then_datarefs.length ()
2137*38fd1498Szrj || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2138*38fd1498Szrj == chrec_dont_know)
2139*38fd1498Szrj || !else_datarefs.length ())
2140*38fd1498Szrj {
2141*38fd1498Szrj free_data_refs (then_datarefs);
2142*38fd1498Szrj free_data_refs (else_datarefs);
2143*38fd1498Szrj return false;
2144*38fd1498Szrj }
2145*38fd1498Szrj
2146*38fd1498Szrj /* Find pairs of stores with equal LHS. */
2147*38fd1498Szrj auto_vec<gimple *, 1> then_stores, else_stores;
2148*38fd1498Szrj FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2149*38fd1498Szrj {
2150*38fd1498Szrj if (DR_IS_READ (then_dr))
2151*38fd1498Szrj continue;
2152*38fd1498Szrj
2153*38fd1498Szrj then_store = DR_STMT (then_dr);
2154*38fd1498Szrj then_lhs = gimple_get_lhs (then_store);
2155*38fd1498Szrj if (then_lhs == NULL_TREE)
2156*38fd1498Szrj continue;
2157*38fd1498Szrj found = false;
2158*38fd1498Szrj
2159*38fd1498Szrj FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2160*38fd1498Szrj {
2161*38fd1498Szrj if (DR_IS_READ (else_dr))
2162*38fd1498Szrj continue;
2163*38fd1498Szrj
2164*38fd1498Szrj else_store = DR_STMT (else_dr);
2165*38fd1498Szrj else_lhs = gimple_get_lhs (else_store);
2166*38fd1498Szrj if (else_lhs == NULL_TREE)
2167*38fd1498Szrj continue;
2168*38fd1498Szrj
2169*38fd1498Szrj if (operand_equal_p (then_lhs, else_lhs, 0))
2170*38fd1498Szrj {
2171*38fd1498Szrj found = true;
2172*38fd1498Szrj break;
2173*38fd1498Szrj }
2174*38fd1498Szrj }
2175*38fd1498Szrj
2176*38fd1498Szrj if (!found)
2177*38fd1498Szrj continue;
2178*38fd1498Szrj
2179*38fd1498Szrj then_stores.safe_push (then_store);
2180*38fd1498Szrj else_stores.safe_push (else_store);
2181*38fd1498Szrj }
2182*38fd1498Szrj
2183*38fd1498Szrj /* No pairs of stores found. */
2184*38fd1498Szrj if (!then_stores.length ()
2185*38fd1498Szrj || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
2186*38fd1498Szrj {
2187*38fd1498Szrj free_data_refs (then_datarefs);
2188*38fd1498Szrj free_data_refs (else_datarefs);
2189*38fd1498Szrj return false;
2190*38fd1498Szrj }
2191*38fd1498Szrj
2192*38fd1498Szrj /* Compute and check data dependencies in both basic blocks. */
2193*38fd1498Szrj then_ddrs.create (1);
2194*38fd1498Szrj else_ddrs.create (1);
2195*38fd1498Szrj if (!compute_all_dependences (then_datarefs, &then_ddrs,
2196*38fd1498Szrj vNULL, false)
2197*38fd1498Szrj || !compute_all_dependences (else_datarefs, &else_ddrs,
2198*38fd1498Szrj vNULL, false))
2199*38fd1498Szrj {
2200*38fd1498Szrj free_dependence_relations (then_ddrs);
2201*38fd1498Szrj free_dependence_relations (else_ddrs);
2202*38fd1498Szrj free_data_refs (then_datarefs);
2203*38fd1498Szrj free_data_refs (else_datarefs);
2204*38fd1498Szrj return false;
2205*38fd1498Szrj }
2206*38fd1498Szrj blocks[0] = then_bb;
2207*38fd1498Szrj blocks[1] = else_bb;
2208*38fd1498Szrj blocks[2] = join_bb;
2209*38fd1498Szrj renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2210*38fd1498Szrj
2211*38fd1498Szrj /* Check that there are no read-after-write or write-after-write dependencies
2212*38fd1498Szrj in THEN_BB. */
2213*38fd1498Szrj FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2214*38fd1498Szrj {
2215*38fd1498Szrj struct data_reference *dra = DDR_A (ddr);
2216*38fd1498Szrj struct data_reference *drb = DDR_B (ddr);
2217*38fd1498Szrj
2218*38fd1498Szrj if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2219*38fd1498Szrj && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2220*38fd1498Szrj && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2221*38fd1498Szrj || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2222*38fd1498Szrj && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2223*38fd1498Szrj || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2224*38fd1498Szrj {
2225*38fd1498Szrj free_dependence_relations (then_ddrs);
2226*38fd1498Szrj free_dependence_relations (else_ddrs);
2227*38fd1498Szrj free_data_refs (then_datarefs);
2228*38fd1498Szrj free_data_refs (else_datarefs);
2229*38fd1498Szrj return false;
2230*38fd1498Szrj }
2231*38fd1498Szrj }
2232*38fd1498Szrj
2233*38fd1498Szrj /* Check that there are no read-after-write or write-after-write dependencies
2234*38fd1498Szrj in ELSE_BB. */
2235*38fd1498Szrj FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2236*38fd1498Szrj {
2237*38fd1498Szrj struct data_reference *dra = DDR_A (ddr);
2238*38fd1498Szrj struct data_reference *drb = DDR_B (ddr);
2239*38fd1498Szrj
2240*38fd1498Szrj if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2241*38fd1498Szrj && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2242*38fd1498Szrj && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2243*38fd1498Szrj || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2244*38fd1498Szrj && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2245*38fd1498Szrj || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2246*38fd1498Szrj {
2247*38fd1498Szrj free_dependence_relations (then_ddrs);
2248*38fd1498Szrj free_dependence_relations (else_ddrs);
2249*38fd1498Szrj free_data_refs (then_datarefs);
2250*38fd1498Szrj free_data_refs (else_datarefs);
2251*38fd1498Szrj return false;
2252*38fd1498Szrj }
2253*38fd1498Szrj }
2254*38fd1498Szrj
2255*38fd1498Szrj /* Sink stores with same LHS. */
2256*38fd1498Szrj FOR_EACH_VEC_ELT (then_stores, i, then_store)
2257*38fd1498Szrj {
2258*38fd1498Szrj else_store = else_stores[i];
2259*38fd1498Szrj res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2260*38fd1498Szrj then_store, else_store);
2261*38fd1498Szrj ok = ok || res;
2262*38fd1498Szrj }
2263*38fd1498Szrj
2264*38fd1498Szrj free_dependence_relations (then_ddrs);
2265*38fd1498Szrj free_dependence_relations (else_ddrs);
2266*38fd1498Szrj free_data_refs (then_datarefs);
2267*38fd1498Szrj free_data_refs (else_datarefs);
2268*38fd1498Szrj
2269*38fd1498Szrj return ok;
2270*38fd1498Szrj }
2271*38fd1498Szrj
2272*38fd1498Szrj /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2273*38fd1498Szrj
2274*38fd1498Szrj static bool
local_mem_dependence(gimple * stmt,basic_block bb)2275*38fd1498Szrj local_mem_dependence (gimple *stmt, basic_block bb)
2276*38fd1498Szrj {
2277*38fd1498Szrj tree vuse = gimple_vuse (stmt);
2278*38fd1498Szrj gimple *def;
2279*38fd1498Szrj
2280*38fd1498Szrj if (!vuse)
2281*38fd1498Szrj return false;
2282*38fd1498Szrj
2283*38fd1498Szrj def = SSA_NAME_DEF_STMT (vuse);
2284*38fd1498Szrj return (def && gimple_bb (def) == bb);
2285*38fd1498Szrj }
2286*38fd1498Szrj
2287*38fd1498Szrj /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2288*38fd1498Szrj BB1 and BB2 are "then" and "else" blocks dependent on this test,
2289*38fd1498Szrj and BB3 rejoins control flow following BB1 and BB2, look for
2290*38fd1498Szrj opportunities to hoist loads as follows. If BB3 contains a PHI of
2291*38fd1498Szrj two loads, one each occurring in BB1 and BB2, and the loads are
2292*38fd1498Szrj provably of adjacent fields in the same structure, then move both
2293*38fd1498Szrj loads into BB0. Of course this can only be done if there are no
2294*38fd1498Szrj dependencies preventing such motion.
2295*38fd1498Szrj
2296*38fd1498Szrj One of the hoisted loads will always be speculative, so the
2297*38fd1498Szrj transformation is currently conservative:
2298*38fd1498Szrj
2299*38fd1498Szrj - The fields must be strictly adjacent.
2300*38fd1498Szrj - The two fields must occupy a single memory block that is
2301*38fd1498Szrj guaranteed to not cross a page boundary.
2302*38fd1498Szrj
2303*38fd1498Szrj The last is difficult to prove, as such memory blocks should be
2304*38fd1498Szrj aligned on the minimum of the stack alignment boundary and the
2305*38fd1498Szrj alignment guaranteed by heap allocation interfaces. Thus we rely
2306*38fd1498Szrj on a parameter for the alignment value.
2307*38fd1498Szrj
2308*38fd1498Szrj Provided a good value is used for the last case, the first
2309*38fd1498Szrj restriction could possibly be relaxed. */
2310*38fd1498Szrj
2311*38fd1498Szrj static void
hoist_adjacent_loads(basic_block bb0,basic_block bb1,basic_block bb2,basic_block bb3)2312*38fd1498Szrj hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2313*38fd1498Szrj basic_block bb2, basic_block bb3)
2314*38fd1498Szrj {
2315*38fd1498Szrj int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2316*38fd1498Szrj unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2317*38fd1498Szrj gphi_iterator gsi;
2318*38fd1498Szrj
2319*38fd1498Szrj /* Walk the phis in bb3 looking for an opportunity. We are looking
2320*38fd1498Szrj for phis of two SSA names, one each of which is defined in bb1 and
2321*38fd1498Szrj bb2. */
2322*38fd1498Szrj for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2323*38fd1498Szrj {
2324*38fd1498Szrj gphi *phi_stmt = gsi.phi ();
2325*38fd1498Szrj gimple *def1, *def2;
2326*38fd1498Szrj tree arg1, arg2, ref1, ref2, field1, field2;
2327*38fd1498Szrj tree tree_offset1, tree_offset2, tree_size2, next;
2328*38fd1498Szrj int offset1, offset2, size2;
2329*38fd1498Szrj unsigned align1;
2330*38fd1498Szrj gimple_stmt_iterator gsi2;
2331*38fd1498Szrj basic_block bb_for_def1, bb_for_def2;
2332*38fd1498Szrj
2333*38fd1498Szrj if (gimple_phi_num_args (phi_stmt) != 2
2334*38fd1498Szrj || virtual_operand_p (gimple_phi_result (phi_stmt)))
2335*38fd1498Szrj continue;
2336*38fd1498Szrj
2337*38fd1498Szrj arg1 = gimple_phi_arg_def (phi_stmt, 0);
2338*38fd1498Szrj arg2 = gimple_phi_arg_def (phi_stmt, 1);
2339*38fd1498Szrj
2340*38fd1498Szrj if (TREE_CODE (arg1) != SSA_NAME
2341*38fd1498Szrj || TREE_CODE (arg2) != SSA_NAME
2342*38fd1498Szrj || SSA_NAME_IS_DEFAULT_DEF (arg1)
2343*38fd1498Szrj || SSA_NAME_IS_DEFAULT_DEF (arg2))
2344*38fd1498Szrj continue;
2345*38fd1498Szrj
2346*38fd1498Szrj def1 = SSA_NAME_DEF_STMT (arg1);
2347*38fd1498Szrj def2 = SSA_NAME_DEF_STMT (arg2);
2348*38fd1498Szrj
2349*38fd1498Szrj if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2350*38fd1498Szrj && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2351*38fd1498Szrj continue;
2352*38fd1498Szrj
2353*38fd1498Szrj /* Check the mode of the arguments to be sure a conditional move
2354*38fd1498Szrj can be generated for it. */
2355*38fd1498Szrj if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2356*38fd1498Szrj == CODE_FOR_nothing)
2357*38fd1498Szrj continue;
2358*38fd1498Szrj
2359*38fd1498Szrj /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2360*38fd1498Szrj if (!gimple_assign_single_p (def1)
2361*38fd1498Szrj || !gimple_assign_single_p (def2)
2362*38fd1498Szrj || gimple_has_volatile_ops (def1)
2363*38fd1498Szrj || gimple_has_volatile_ops (def2))
2364*38fd1498Szrj continue;
2365*38fd1498Szrj
2366*38fd1498Szrj ref1 = gimple_assign_rhs1 (def1);
2367*38fd1498Szrj ref2 = gimple_assign_rhs1 (def2);
2368*38fd1498Szrj
2369*38fd1498Szrj if (TREE_CODE (ref1) != COMPONENT_REF
2370*38fd1498Szrj || TREE_CODE (ref2) != COMPONENT_REF)
2371*38fd1498Szrj continue;
2372*38fd1498Szrj
2373*38fd1498Szrj /* The zeroth operand of the two component references must be
2374*38fd1498Szrj identical. It is not sufficient to compare get_base_address of
2375*38fd1498Szrj the two references, because this could allow for different
2376*38fd1498Szrj elements of the same array in the two trees. It is not safe to
2377*38fd1498Szrj assume that the existence of one array element implies the
2378*38fd1498Szrj existence of a different one. */
2379*38fd1498Szrj if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2380*38fd1498Szrj continue;
2381*38fd1498Szrj
2382*38fd1498Szrj field1 = TREE_OPERAND (ref1, 1);
2383*38fd1498Szrj field2 = TREE_OPERAND (ref2, 1);
2384*38fd1498Szrj
2385*38fd1498Szrj /* Check for field adjacency, and ensure field1 comes first. */
2386*38fd1498Szrj for (next = DECL_CHAIN (field1);
2387*38fd1498Szrj next && TREE_CODE (next) != FIELD_DECL;
2388*38fd1498Szrj next = DECL_CHAIN (next))
2389*38fd1498Szrj ;
2390*38fd1498Szrj
2391*38fd1498Szrj if (next != field2)
2392*38fd1498Szrj {
2393*38fd1498Szrj for (next = DECL_CHAIN (field2);
2394*38fd1498Szrj next && TREE_CODE (next) != FIELD_DECL;
2395*38fd1498Szrj next = DECL_CHAIN (next))
2396*38fd1498Szrj ;
2397*38fd1498Szrj
2398*38fd1498Szrj if (next != field1)
2399*38fd1498Szrj continue;
2400*38fd1498Szrj
2401*38fd1498Szrj std::swap (field1, field2);
2402*38fd1498Szrj std::swap (def1, def2);
2403*38fd1498Szrj }
2404*38fd1498Szrj
2405*38fd1498Szrj bb_for_def1 = gimple_bb (def1);
2406*38fd1498Szrj bb_for_def2 = gimple_bb (def2);
2407*38fd1498Szrj
2408*38fd1498Szrj /* Check for proper alignment of the first field. */
2409*38fd1498Szrj tree_offset1 = bit_position (field1);
2410*38fd1498Szrj tree_offset2 = bit_position (field2);
2411*38fd1498Szrj tree_size2 = DECL_SIZE (field2);
2412*38fd1498Szrj
2413*38fd1498Szrj if (!tree_fits_uhwi_p (tree_offset1)
2414*38fd1498Szrj || !tree_fits_uhwi_p (tree_offset2)
2415*38fd1498Szrj || !tree_fits_uhwi_p (tree_size2))
2416*38fd1498Szrj continue;
2417*38fd1498Szrj
2418*38fd1498Szrj offset1 = tree_to_uhwi (tree_offset1);
2419*38fd1498Szrj offset2 = tree_to_uhwi (tree_offset2);
2420*38fd1498Szrj size2 = tree_to_uhwi (tree_size2);
2421*38fd1498Szrj align1 = DECL_ALIGN (field1) % param_align_bits;
2422*38fd1498Szrj
2423*38fd1498Szrj if (offset1 % BITS_PER_UNIT != 0)
2424*38fd1498Szrj continue;
2425*38fd1498Szrj
2426*38fd1498Szrj /* For profitability, the two field references should fit within
2427*38fd1498Szrj a single cache line. */
2428*38fd1498Szrj if (align1 + offset2 - offset1 + size2 > param_align_bits)
2429*38fd1498Szrj continue;
2430*38fd1498Szrj
2431*38fd1498Szrj /* The two expressions cannot be dependent upon vdefs defined
2432*38fd1498Szrj in bb1/bb2. */
2433*38fd1498Szrj if (local_mem_dependence (def1, bb_for_def1)
2434*38fd1498Szrj || local_mem_dependence (def2, bb_for_def2))
2435*38fd1498Szrj continue;
2436*38fd1498Szrj
2437*38fd1498Szrj /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2438*38fd1498Szrj bb0. We hoist the first one first so that a cache miss is handled
2439*38fd1498Szrj efficiently regardless of hardware cache-fill policy. */
2440*38fd1498Szrj gsi2 = gsi_for_stmt (def1);
2441*38fd1498Szrj gsi_move_to_bb_end (&gsi2, bb0);
2442*38fd1498Szrj gsi2 = gsi_for_stmt (def2);
2443*38fd1498Szrj gsi_move_to_bb_end (&gsi2, bb0);
2444*38fd1498Szrj
2445*38fd1498Szrj if (dump_file && (dump_flags & TDF_DETAILS))
2446*38fd1498Szrj {
2447*38fd1498Szrj fprintf (dump_file,
2448*38fd1498Szrj "\nHoisting adjacent loads from %d and %d into %d: \n",
2449*38fd1498Szrj bb_for_def1->index, bb_for_def2->index, bb0->index);
2450*38fd1498Szrj print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2451*38fd1498Szrj print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2452*38fd1498Szrj }
2453*38fd1498Szrj }
2454*38fd1498Szrj }
2455*38fd1498Szrj
2456*38fd1498Szrj /* Determine whether we should attempt to hoist adjacent loads out of
2457*38fd1498Szrj diamond patterns in pass_phiopt. Always hoist loads if
2458*38fd1498Szrj -fhoist-adjacent-loads is specified and the target machine has
2459*38fd1498Szrj both a conditional move instruction and a defined cache line size. */
2460*38fd1498Szrj
2461*38fd1498Szrj static bool
gate_hoist_loads(void)2462*38fd1498Szrj gate_hoist_loads (void)
2463*38fd1498Szrj {
2464*38fd1498Szrj return (flag_hoist_adjacent_loads == 1
2465*38fd1498Szrj && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2466*38fd1498Szrj && HAVE_conditional_move);
2467*38fd1498Szrj }
2468*38fd1498Szrj
2469*38fd1498Szrj /* This pass tries to replaces an if-then-else block with an
2470*38fd1498Szrj assignment. We have four kinds of transformations. Some of these
2471*38fd1498Szrj transformations are also performed by the ifcvt RTL optimizer.
2472*38fd1498Szrj
2473*38fd1498Szrj Conditional Replacement
2474*38fd1498Szrj -----------------------
2475*38fd1498Szrj
2476*38fd1498Szrj This transformation, implemented in conditional_replacement,
2477*38fd1498Szrj replaces
2478*38fd1498Szrj
2479*38fd1498Szrj bb0:
2480*38fd1498Szrj if (cond) goto bb2; else goto bb1;
2481*38fd1498Szrj bb1:
2482*38fd1498Szrj bb2:
2483*38fd1498Szrj x = PHI <0 (bb1), 1 (bb0), ...>;
2484*38fd1498Szrj
2485*38fd1498Szrj with
2486*38fd1498Szrj
2487*38fd1498Szrj bb0:
2488*38fd1498Szrj x' = cond;
2489*38fd1498Szrj goto bb2;
2490*38fd1498Szrj bb2:
2491*38fd1498Szrj x = PHI <x' (bb0), ...>;
2492*38fd1498Szrj
2493*38fd1498Szrj We remove bb1 as it becomes unreachable. This occurs often due to
2494*38fd1498Szrj gimplification of conditionals.
2495*38fd1498Szrj
2496*38fd1498Szrj Value Replacement
2497*38fd1498Szrj -----------------
2498*38fd1498Szrj
2499*38fd1498Szrj This transformation, implemented in value_replacement, replaces
2500*38fd1498Szrj
2501*38fd1498Szrj bb0:
2502*38fd1498Szrj if (a != b) goto bb2; else goto bb1;
2503*38fd1498Szrj bb1:
2504*38fd1498Szrj bb2:
2505*38fd1498Szrj x = PHI <a (bb1), b (bb0), ...>;
2506*38fd1498Szrj
2507*38fd1498Szrj with
2508*38fd1498Szrj
2509*38fd1498Szrj bb0:
2510*38fd1498Szrj bb2:
2511*38fd1498Szrj x = PHI <b (bb0), ...>;
2512*38fd1498Szrj
2513*38fd1498Szrj This opportunity can sometimes occur as a result of other
2514*38fd1498Szrj optimizations.
2515*38fd1498Szrj
2516*38fd1498Szrj
2517*38fd1498Szrj Another case caught by value replacement looks like this:
2518*38fd1498Szrj
2519*38fd1498Szrj bb0:
2520*38fd1498Szrj t1 = a == CONST;
2521*38fd1498Szrj t2 = b > c;
2522*38fd1498Szrj t3 = t1 & t2;
2523*38fd1498Szrj if (t3 != 0) goto bb1; else goto bb2;
2524*38fd1498Szrj bb1:
2525*38fd1498Szrj bb2:
2526*38fd1498Szrj x = PHI (CONST, a)
2527*38fd1498Szrj
2528*38fd1498Szrj Gets replaced with:
2529*38fd1498Szrj bb0:
2530*38fd1498Szrj bb2:
2531*38fd1498Szrj t1 = a == CONST;
2532*38fd1498Szrj t2 = b > c;
2533*38fd1498Szrj t3 = t1 & t2;
2534*38fd1498Szrj x = a;
2535*38fd1498Szrj
2536*38fd1498Szrj ABS Replacement
2537*38fd1498Szrj ---------------
2538*38fd1498Szrj
2539*38fd1498Szrj This transformation, implemented in abs_replacement, replaces
2540*38fd1498Szrj
2541*38fd1498Szrj bb0:
2542*38fd1498Szrj if (a >= 0) goto bb2; else goto bb1;
2543*38fd1498Szrj bb1:
2544*38fd1498Szrj x = -a;
2545*38fd1498Szrj bb2:
2546*38fd1498Szrj x = PHI <x (bb1), a (bb0), ...>;
2547*38fd1498Szrj
2548*38fd1498Szrj with
2549*38fd1498Szrj
2550*38fd1498Szrj bb0:
2551*38fd1498Szrj x' = ABS_EXPR< a >;
2552*38fd1498Szrj bb2:
2553*38fd1498Szrj x = PHI <x' (bb0), ...>;
2554*38fd1498Szrj
2555*38fd1498Szrj MIN/MAX Replacement
2556*38fd1498Szrj -------------------
2557*38fd1498Szrj
2558*38fd1498Szrj This transformation, minmax_replacement replaces
2559*38fd1498Szrj
2560*38fd1498Szrj bb0:
2561*38fd1498Szrj if (a <= b) goto bb2; else goto bb1;
2562*38fd1498Szrj bb1:
2563*38fd1498Szrj bb2:
2564*38fd1498Szrj x = PHI <b (bb1), a (bb0), ...>;
2565*38fd1498Szrj
2566*38fd1498Szrj with
2567*38fd1498Szrj
2568*38fd1498Szrj bb0:
2569*38fd1498Szrj x' = MIN_EXPR (a, b)
2570*38fd1498Szrj bb2:
2571*38fd1498Szrj x = PHI <x' (bb0), ...>;
2572*38fd1498Szrj
2573*38fd1498Szrj A similar transformation is done for MAX_EXPR.
2574*38fd1498Szrj
2575*38fd1498Szrj
2576*38fd1498Szrj This pass also performs a fifth transformation of a slightly different
2577*38fd1498Szrj flavor.
2578*38fd1498Szrj
2579*38fd1498Szrj Factor conversion in COND_EXPR
2580*38fd1498Szrj ------------------------------
2581*38fd1498Szrj
2582*38fd1498Szrj This transformation factors the conversion out of COND_EXPR with
2583*38fd1498Szrj factor_out_conditional_conversion.
2584*38fd1498Szrj
2585*38fd1498Szrj For example:
2586*38fd1498Szrj if (a <= CST) goto <bb 3>; else goto <bb 4>;
2587*38fd1498Szrj <bb 3>:
2588*38fd1498Szrj tmp = (int) a;
2589*38fd1498Szrj <bb 4>:
2590*38fd1498Szrj tmp = PHI <tmp, CST>
2591*38fd1498Szrj
2592*38fd1498Szrj Into:
2593*38fd1498Szrj if (a <= CST) goto <bb 3>; else goto <bb 4>;
2594*38fd1498Szrj <bb 3>:
2595*38fd1498Szrj <bb 4>:
2596*38fd1498Szrj a = PHI <a, CST>
2597*38fd1498Szrj tmp = (int) a;
2598*38fd1498Szrj
2599*38fd1498Szrj Adjacent Load Hoisting
2600*38fd1498Szrj ----------------------
2601*38fd1498Szrj
2602*38fd1498Szrj This transformation replaces
2603*38fd1498Szrj
2604*38fd1498Szrj bb0:
2605*38fd1498Szrj if (...) goto bb2; else goto bb1;
2606*38fd1498Szrj bb1:
2607*38fd1498Szrj x1 = (<expr>).field1;
2608*38fd1498Szrj goto bb3;
2609*38fd1498Szrj bb2:
2610*38fd1498Szrj x2 = (<expr>).field2;
2611*38fd1498Szrj bb3:
2612*38fd1498Szrj # x = PHI <x1, x2>;
2613*38fd1498Szrj
2614*38fd1498Szrj with
2615*38fd1498Szrj
2616*38fd1498Szrj bb0:
2617*38fd1498Szrj x1 = (<expr>).field1;
2618*38fd1498Szrj x2 = (<expr>).field2;
2619*38fd1498Szrj if (...) goto bb2; else goto bb1;
2620*38fd1498Szrj bb1:
2621*38fd1498Szrj goto bb3;
2622*38fd1498Szrj bb2:
2623*38fd1498Szrj bb3:
2624*38fd1498Szrj # x = PHI <x1, x2>;
2625*38fd1498Szrj
2626*38fd1498Szrj The purpose of this transformation is to enable generation of conditional
2627*38fd1498Szrj move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
2628*38fd1498Szrj the loads is speculative, the transformation is restricted to very
2629*38fd1498Szrj specific cases to avoid introducing a page fault. We are looking for
2630*38fd1498Szrj the common idiom:
2631*38fd1498Szrj
2632*38fd1498Szrj if (...)
2633*38fd1498Szrj x = y->left;
2634*38fd1498Szrj else
2635*38fd1498Szrj x = y->right;
2636*38fd1498Szrj
2637*38fd1498Szrj where left and right are typically adjacent pointers in a tree structure. */
2638*38fd1498Szrj
2639*38fd1498Szrj namespace {
2640*38fd1498Szrj
2641*38fd1498Szrj const pass_data pass_data_phiopt =
2642*38fd1498Szrj {
2643*38fd1498Szrj GIMPLE_PASS, /* type */
2644*38fd1498Szrj "phiopt", /* name */
2645*38fd1498Szrj OPTGROUP_NONE, /* optinfo_flags */
2646*38fd1498Szrj TV_TREE_PHIOPT, /* tv_id */
2647*38fd1498Szrj ( PROP_cfg | PROP_ssa ), /* properties_required */
2648*38fd1498Szrj 0, /* properties_provided */
2649*38fd1498Szrj 0, /* properties_destroyed */
2650*38fd1498Szrj 0, /* todo_flags_start */
2651*38fd1498Szrj 0, /* todo_flags_finish */
2652*38fd1498Szrj };
2653*38fd1498Szrj
2654*38fd1498Szrj class pass_phiopt : public gimple_opt_pass
2655*38fd1498Szrj {
2656*38fd1498Szrj public:
pass_phiopt(gcc::context * ctxt)2657*38fd1498Szrj pass_phiopt (gcc::context *ctxt)
2658*38fd1498Szrj : gimple_opt_pass (pass_data_phiopt, ctxt)
2659*38fd1498Szrj {}
2660*38fd1498Szrj
2661*38fd1498Szrj /* opt_pass methods: */
clone()2662*38fd1498Szrj opt_pass * clone () { return new pass_phiopt (m_ctxt); }
gate(function *)2663*38fd1498Szrj virtual bool gate (function *) { return flag_ssa_phiopt; }
execute(function *)2664*38fd1498Szrj virtual unsigned int execute (function *)
2665*38fd1498Szrj {
2666*38fd1498Szrj return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2667*38fd1498Szrj }
2668*38fd1498Szrj
2669*38fd1498Szrj }; // class pass_phiopt
2670*38fd1498Szrj
2671*38fd1498Szrj } // anon namespace
2672*38fd1498Szrj
2673*38fd1498Szrj gimple_opt_pass *
make_pass_phiopt(gcc::context * ctxt)2674*38fd1498Szrj make_pass_phiopt (gcc::context *ctxt)
2675*38fd1498Szrj {
2676*38fd1498Szrj return new pass_phiopt (ctxt);
2677*38fd1498Szrj }
2678*38fd1498Szrj
2679*38fd1498Szrj namespace {
2680*38fd1498Szrj
2681*38fd1498Szrj const pass_data pass_data_cselim =
2682*38fd1498Szrj {
2683*38fd1498Szrj GIMPLE_PASS, /* type */
2684*38fd1498Szrj "cselim", /* name */
2685*38fd1498Szrj OPTGROUP_NONE, /* optinfo_flags */
2686*38fd1498Szrj TV_TREE_PHIOPT, /* tv_id */
2687*38fd1498Szrj ( PROP_cfg | PROP_ssa ), /* properties_required */
2688*38fd1498Szrj 0, /* properties_provided */
2689*38fd1498Szrj 0, /* properties_destroyed */
2690*38fd1498Szrj 0, /* todo_flags_start */
2691*38fd1498Szrj 0, /* todo_flags_finish */
2692*38fd1498Szrj };
2693*38fd1498Szrj
2694*38fd1498Szrj class pass_cselim : public gimple_opt_pass
2695*38fd1498Szrj {
2696*38fd1498Szrj public:
pass_cselim(gcc::context * ctxt)2697*38fd1498Szrj pass_cselim (gcc::context *ctxt)
2698*38fd1498Szrj : gimple_opt_pass (pass_data_cselim, ctxt)
2699*38fd1498Szrj {}
2700*38fd1498Szrj
2701*38fd1498Szrj /* opt_pass methods: */
gate(function *)2702*38fd1498Szrj virtual bool gate (function *) { return flag_tree_cselim; }
execute(function *)2703*38fd1498Szrj virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2704*38fd1498Szrj
2705*38fd1498Szrj }; // class pass_cselim
2706*38fd1498Szrj
2707*38fd1498Szrj } // anon namespace
2708*38fd1498Szrj
2709*38fd1498Szrj gimple_opt_pass *
make_pass_cselim(gcc::context * ctxt)2710*38fd1498Szrj make_pass_cselim (gcc::context *ctxt)
2711*38fd1498Szrj {
2712*38fd1498Szrj return new pass_cselim (ctxt);
2713*38fd1498Szrj }
2714