xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-loop-im.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1 /* Loop invariant motion.
2    Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "tree-affine.h"
42 #include "tree-ssa-propagate.h"
43 #include "trans-mem.h"
44 #include "gimple-fold.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "alias.h"
48 #include "builtins.h"
49 #include "tree-dfa.h"
50 
51 /* TODO:  Support for predicated code motion.  I.e.
52 
53    while (1)
54      {
55        if (cond)
56 	 {
57 	   a = inv;
58 	   something;
59 	 }
60      }
61 
62    Where COND and INV are invariants, but evaluating INV may trap or be
63    invalid from some other reason if !COND.  This may be transformed to
64 
65    if (cond)
66      a = inv;
67    while (1)
68      {
69        if (cond)
70 	 something;
71      }  */
72 
73 /* The auxiliary data kept for each statement.  */
74 
75 struct lim_aux_data
76 {
77   class loop *max_loop;	/* The outermost loop in that the statement
78 				   is invariant.  */
79 
80   class loop *tgt_loop;	/* The loop out of that we want to move the
81 				   invariant.  */
82 
83   class loop *always_executed_in;
84 				/* The outermost loop for that we are sure
85 				   the statement is executed if the loop
86 				   is entered.  */
87 
88   unsigned cost;		/* Cost of the computation performed by the
89 				   statement.  */
90 
91   unsigned ref;			/* The simple_mem_ref in this stmt or 0.  */
92 
93   vec<gimple *> depends;	/* Vector of statements that must be also
94 				   hoisted out of the loop when this statement
95 				   is hoisted; i.e. those that define the
96 				   operands of the statement and are inside of
97 				   the MAX_LOOP loop.  */
98 };
99 
100 /* Maps statements to their lim_aux_data.  */
101 
102 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
103 
104 /* Description of a memory reference location.  */
105 
106 struct mem_ref_loc
107 {
108   tree *ref;			/* The reference itself.  */
109   gimple *stmt;			/* The statement in that it occurs.  */
110 };
111 
112 
113 /* Description of a memory reference.  */
114 
115 class im_mem_ref
116 {
117 public:
118   unsigned id : 30;		/* ID assigned to the memory reference
119 				   (its index in memory_accesses.refs_list)  */
120   unsigned ref_canonical : 1;   /* Whether mem.ref was canonicalized.  */
121   unsigned ref_decomposed : 1;  /* Whether the ref was hashed from mem.  */
122   hashval_t hash;		/* Its hash value.  */
123 
124   /* The memory access itself and associated caching of alias-oracle
125      query meta-data.  */
126   ao_ref mem;
127 
128   bitmap stored;		/* The set of loops in that this memory location
129 				   is stored to.  */
130   vec<mem_ref_loc>		accesses_in_loop;
131 				/* The locations of the accesses.  Vector
132 				   indexed by the loop number.  */
133 
134   /* The following sets are computed on demand.  We keep both set and
135      its complement, so that we know whether the information was
136      already computed or not.  */
137   bitmap_head indep_loop;	/* The set of loops in that the memory
138 				   reference is independent, meaning:
139 				   If it is stored in the loop, this store
140 				     is independent on all other loads and
141 				     stores.
142 				   If it is only loaded, then it is independent
143 				     on all stores in the loop.  */
144   bitmap_head dep_loop;		/* The complement of INDEP_LOOP.  */
145 };
146 
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148    to record (in)dependence against stores in the loop and its subloops, the
149    second to record (in)dependence against all references in the loop
150    and its subloops.  */
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
152 
153 /* Mem_ref hashtable helpers.  */
154 
155 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
156 {
157   typedef ao_ref *compare_type;
158   static inline hashval_t hash (const im_mem_ref *);
159   static inline bool equal (const im_mem_ref *, const ao_ref *);
160 };
161 
162 /* A hash function for class im_mem_ref object OBJ.  */
163 
164 inline hashval_t
hash(const im_mem_ref * mem)165 mem_ref_hasher::hash (const im_mem_ref *mem)
166 {
167   return mem->hash;
168 }
169 
170 /* An equality function for class im_mem_ref object MEM1 with
171    memory reference OBJ2.  */
172 
173 inline bool
equal(const im_mem_ref * mem1,const ao_ref * obj2)174 mem_ref_hasher::equal (const im_mem_ref *mem1, const ao_ref *obj2)
175 {
176   if (obj2->max_size_known_p ())
177     return (mem1->ref_decomposed
178 	    && operand_equal_p (mem1->mem.base, obj2->base, 0)
179 	    && known_eq (mem1->mem.offset, obj2->offset)
180 	    && known_eq (mem1->mem.size, obj2->size)
181 	    && known_eq (mem1->mem.max_size, obj2->max_size)
182 	    && mem1->mem.volatile_p == obj2->volatile_p
183 	    && (mem1->mem.ref_alias_set == obj2->ref_alias_set
184 		/* We are not canonicalizing alias-sets but for the
185 		   special-case we didn't canonicalize yet and the
186 		   incoming ref is a alias-set zero MEM we pick
187 		   the correct one already.  */
188 		|| (!mem1->ref_canonical
189 		    && (TREE_CODE (obj2->ref) == MEM_REF
190 			|| TREE_CODE (obj2->ref) == TARGET_MEM_REF)
191 		    && obj2->ref_alias_set == 0)
192 		/* Likewise if there's a canonical ref with alias-set zero.  */
193 		|| (mem1->ref_canonical && mem1->mem.ref_alias_set == 0))
194 	    && types_compatible_p (TREE_TYPE (mem1->mem.ref),
195 				   TREE_TYPE (obj2->ref)));
196   else
197     return operand_equal_p (mem1->mem.ref, obj2->ref, 0);
198 }
199 
200 
201 /* Description of memory accesses in loops.  */
202 
203 static struct
204 {
205   /* The hash table of memory references accessed in loops.  */
206   hash_table<mem_ref_hasher> *refs;
207 
208   /* The list of memory references.  */
209   vec<im_mem_ref *> refs_list;
210 
211   /* The set of memory references accessed in each loop.  */
212   vec<bitmap_head> refs_in_loop;
213 
214   /* The set of memory references stored in each loop.  */
215   vec<bitmap_head> refs_stored_in_loop;
216 
217   /* The set of memory references stored in each loop, including subloops .  */
218   vec<bitmap_head> all_refs_stored_in_loop;
219 
220   /* Cache for expanding memory addresses.  */
221   hash_map<tree, name_expansion *> *ttae_cache;
222 } memory_accesses;
223 
224 /* Obstack for the bitmaps in the above data structures.  */
225 static bitmap_obstack lim_bitmap_obstack;
226 static obstack mem_ref_obstack;
227 
228 static bool ref_indep_loop_p (class loop *, im_mem_ref *);
229 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
230 
231 /* Minimum cost of an expensive expression.  */
232 #define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
233 
234 /* The outermost loop for which execution of the header guarantees that the
235    block will be executed.  */
236 #define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
238 
239 /* ID of the shared unanalyzable mem.  */
240 #define UNANALYZABLE_MEM_ID 0
241 
242 /* Whether the reference was analyzable.  */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
244 
245 static struct lim_aux_data *
init_lim_data(gimple * stmt)246 init_lim_data (gimple *stmt)
247 {
248   lim_aux_data *p = XCNEW (struct lim_aux_data);
249   lim_aux_data_map->put (stmt, p);
250 
251   return p;
252 }
253 
254 static struct lim_aux_data *
get_lim_data(gimple * stmt)255 get_lim_data (gimple *stmt)
256 {
257   lim_aux_data **p = lim_aux_data_map->get (stmt);
258   if (!p)
259     return NULL;
260 
261   return *p;
262 }
263 
264 /* Releases the memory occupied by DATA.  */
265 
266 static void
free_lim_aux_data(struct lim_aux_data * data)267 free_lim_aux_data (struct lim_aux_data *data)
268 {
269   data->depends.release ();
270   free (data);
271 }
272 
273 static void
clear_lim_data(gimple * stmt)274 clear_lim_data (gimple *stmt)
275 {
276   lim_aux_data **p = lim_aux_data_map->get (stmt);
277   if (!p)
278     return;
279 
280   free_lim_aux_data (*p);
281   *p = NULL;
282 }
283 
284 
285 /* The possibilities of statement movement.  */
286 enum move_pos
287   {
288     MOVE_IMPOSSIBLE,		/* No movement -- side effect expression.  */
289     MOVE_PRESERVE_EXECUTION,	/* Must not cause the non-executed statement
290 				   become executed -- memory accesses, ... */
291     MOVE_POSSIBLE		/* Unlimited movement.  */
292   };
293 
294 
295 /* If it is possible to hoist the statement STMT unconditionally,
296    returns MOVE_POSSIBLE.
297    If it is possible to hoist the statement STMT, but we must avoid making
298    it executed if it would not be executed in the original program (e.g.
299    because it may trap), return MOVE_PRESERVE_EXECUTION.
300    Otherwise return MOVE_IMPOSSIBLE.  */
301 
302 enum move_pos
movement_possibility(gimple * stmt)303 movement_possibility (gimple *stmt)
304 {
305   tree lhs;
306   enum move_pos ret = MOVE_POSSIBLE;
307 
308   if (flag_unswitch_loops
309       && gimple_code (stmt) == GIMPLE_COND)
310     {
311       /* If we perform unswitching, force the operands of the invariant
312 	 condition to be moved out of the loop.  */
313       return MOVE_POSSIBLE;
314     }
315 
316   if (gimple_code (stmt) == GIMPLE_PHI
317       && gimple_phi_num_args (stmt) <= 2
318       && !virtual_operand_p (gimple_phi_result (stmt))
319       && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
320     return MOVE_POSSIBLE;
321 
322   if (gimple_get_lhs (stmt) == NULL_TREE)
323     return MOVE_IMPOSSIBLE;
324 
325   if (gimple_vdef (stmt))
326     return MOVE_IMPOSSIBLE;
327 
328   if (stmt_ends_bb_p (stmt)
329       || gimple_has_volatile_ops (stmt)
330       || gimple_has_side_effects (stmt)
331       || stmt_could_throw_p (cfun, stmt))
332     return MOVE_IMPOSSIBLE;
333 
334   if (is_gimple_call (stmt))
335     {
336       /* While pure or const call is guaranteed to have no side effects, we
337 	 cannot move it arbitrarily.  Consider code like
338 
339 	 char *s = something ();
340 
341 	 while (1)
342 	   {
343 	     if (s)
344 	       t = strlen (s);
345 	     else
346 	       t = 0;
347 	   }
348 
349 	 Here the strlen call cannot be moved out of the loop, even though
350 	 s is invariant.  In addition to possibly creating a call with
351 	 invalid arguments, moving out a function call that is not executed
352 	 may cause performance regressions in case the call is costly and
353 	 not executed at all.  */
354       ret = MOVE_PRESERVE_EXECUTION;
355       lhs = gimple_call_lhs (stmt);
356     }
357   else if (is_gimple_assign (stmt))
358     lhs = gimple_assign_lhs (stmt);
359   else
360     return MOVE_IMPOSSIBLE;
361 
362   if (TREE_CODE (lhs) == SSA_NAME
363       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
364     return MOVE_IMPOSSIBLE;
365 
366   if (TREE_CODE (lhs) != SSA_NAME
367       || gimple_could_trap_p (stmt))
368     return MOVE_PRESERVE_EXECUTION;
369 
370   /* Non local loads in a transaction cannot be hoisted out.  Well,
371      unless the load happens on every path out of the loop, but we
372      don't take this into account yet.  */
373   if (flag_tm
374       && gimple_in_transaction (stmt)
375       && gimple_assign_single_p (stmt))
376     {
377       tree rhs = gimple_assign_rhs1 (stmt);
378       if (DECL_P (rhs) && is_global_var (rhs))
379 	{
380 	  if (dump_file)
381 	    {
382 	      fprintf (dump_file, "Cannot hoist conditional load of ");
383 	      print_generic_expr (dump_file, rhs, TDF_SLIM);
384 	      fprintf (dump_file, " because it is in a transaction.\n");
385 	    }
386 	  return MOVE_IMPOSSIBLE;
387 	}
388     }
389 
390   return ret;
391 }
392 
393 /* Suppose that operand DEF is used inside the LOOP.  Returns the outermost
394    loop to that we could move the expression using DEF if it did not have
395    other operands, i.e. the outermost loop enclosing LOOP in that the value
396    of DEF is invariant.  */
397 
398 static class loop *
outermost_invariant_loop(tree def,class loop * loop)399 outermost_invariant_loop (tree def, class loop *loop)
400 {
401   gimple *def_stmt;
402   basic_block def_bb;
403   class loop *max_loop;
404   struct lim_aux_data *lim_data;
405 
406   if (!def)
407     return superloop_at_depth (loop, 1);
408 
409   if (TREE_CODE (def) != SSA_NAME)
410     {
411       gcc_assert (is_gimple_min_invariant (def));
412       return superloop_at_depth (loop, 1);
413     }
414 
415   def_stmt = SSA_NAME_DEF_STMT (def);
416   def_bb = gimple_bb (def_stmt);
417   if (!def_bb)
418     return superloop_at_depth (loop, 1);
419 
420   max_loop = find_common_loop (loop, def_bb->loop_father);
421 
422   lim_data = get_lim_data (def_stmt);
423   if (lim_data != NULL && lim_data->max_loop != NULL)
424     max_loop = find_common_loop (max_loop,
425 				 loop_outer (lim_data->max_loop));
426   if (max_loop == loop)
427     return NULL;
428   max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
429 
430   return max_loop;
431 }
432 
433 /* DATA is a structure containing information associated with a statement
434    inside LOOP.  DEF is one of the operands of this statement.
435 
436    Find the outermost loop enclosing LOOP in that value of DEF is invariant
437    and record this in DATA->max_loop field.  If DEF itself is defined inside
438    this loop as well (i.e. we need to hoist it out of the loop if we want
439    to hoist the statement represented by DATA), record the statement in that
440    DEF is defined to the DATA->depends list.  Additionally if ADD_COST is true,
441    add the cost of the computation of DEF to the DATA->cost.
442 
443    If DEF is not invariant in LOOP, return false.  Otherwise return TRUE.  */
444 
445 static bool
add_dependency(tree def,struct lim_aux_data * data,class loop * loop,bool add_cost)446 add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
447 		bool add_cost)
448 {
449   gimple *def_stmt = SSA_NAME_DEF_STMT (def);
450   basic_block def_bb = gimple_bb (def_stmt);
451   class loop *max_loop;
452   struct lim_aux_data *def_data;
453 
454   if (!def_bb)
455     return true;
456 
457   max_loop = outermost_invariant_loop (def, loop);
458   if (!max_loop)
459     return false;
460 
461   if (flow_loop_nested_p (data->max_loop, max_loop))
462     data->max_loop = max_loop;
463 
464   def_data = get_lim_data (def_stmt);
465   if (!def_data)
466     return true;
467 
468   if (add_cost
469       /* Only add the cost if the statement defining DEF is inside LOOP,
470 	 i.e. if it is likely that by moving the invariants dependent
471 	 on it, we will be able to avoid creating a new register for
472 	 it (since it will be only used in these dependent invariants).  */
473       && def_bb->loop_father == loop)
474     data->cost += def_data->cost;
475 
476   data->depends.safe_push (def_stmt);
477 
478   return true;
479 }
480 
481 /* Returns an estimate for a cost of statement STMT.  The values here
482    are just ad-hoc constants, similar to costs for inlining.  */
483 
484 static unsigned
stmt_cost(gimple * stmt)485 stmt_cost (gimple *stmt)
486 {
487   /* Always try to create possibilities for unswitching.  */
488   if (gimple_code (stmt) == GIMPLE_COND
489       || gimple_code (stmt) == GIMPLE_PHI)
490     return LIM_EXPENSIVE;
491 
492   /* We should be hoisting calls if possible.  */
493   if (is_gimple_call (stmt))
494     {
495       tree fndecl;
496 
497       /* Unless the call is a builtin_constant_p; this always folds to a
498 	 constant, so moving it is useless.  */
499       fndecl = gimple_call_fndecl (stmt);
500       if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
501 	return 0;
502 
503       return LIM_EXPENSIVE;
504     }
505 
506   /* Hoisting memory references out should almost surely be a win.  */
507   if (gimple_references_memory_p (stmt))
508     return LIM_EXPENSIVE;
509 
510   if (gimple_code (stmt) != GIMPLE_ASSIGN)
511     return 1;
512 
513   switch (gimple_assign_rhs_code (stmt))
514     {
515     case MULT_EXPR:
516     case WIDEN_MULT_EXPR:
517     case WIDEN_MULT_PLUS_EXPR:
518     case WIDEN_MULT_MINUS_EXPR:
519     case DOT_PROD_EXPR:
520     case TRUNC_DIV_EXPR:
521     case CEIL_DIV_EXPR:
522     case FLOOR_DIV_EXPR:
523     case ROUND_DIV_EXPR:
524     case EXACT_DIV_EXPR:
525     case CEIL_MOD_EXPR:
526     case FLOOR_MOD_EXPR:
527     case ROUND_MOD_EXPR:
528     case TRUNC_MOD_EXPR:
529     case RDIV_EXPR:
530       /* Division and multiplication are usually expensive.  */
531       return LIM_EXPENSIVE;
532 
533     case LSHIFT_EXPR:
534     case RSHIFT_EXPR:
535     case WIDEN_LSHIFT_EXPR:
536     case LROTATE_EXPR:
537     case RROTATE_EXPR:
538       /* Shifts and rotates are usually expensive.  */
539       return LIM_EXPENSIVE;
540 
541     case CONSTRUCTOR:
542       /* Make vector construction cost proportional to the number
543          of elements.  */
544       return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
545 
546     case SSA_NAME:
547     case PAREN_EXPR:
548       /* Whether or not something is wrapped inside a PAREN_EXPR
549          should not change move cost.  Nor should an intermediate
550 	 unpropagated SSA name copy.  */
551       return 0;
552 
553     default:
554       return 1;
555     }
556 }
557 
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559    REF is independent.  If REF is not independent in LOOP, NULL is returned
560    instead.  */
561 
562 static class loop *
outermost_indep_loop(class loop * outer,class loop * loop,im_mem_ref * ref)563 outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
564 {
565   class loop *aloop;
566 
567   if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
568     return NULL;
569 
570   for (aloop = outer;
571        aloop != loop;
572        aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
573     if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
574 	&& ref_indep_loop_p (aloop, ref))
575       return aloop;
576 
577   if (ref_indep_loop_p (loop, ref))
578     return loop;
579   else
580     return NULL;
581 }
582 
583 /* If there is a simple load or store to a memory reference in STMT, returns
584    the location of the memory reference, and sets IS_STORE according to whether
585    it is a store or load.  Otherwise, returns NULL.  */
586 
587 static tree *
simple_mem_ref_in_stmt(gimple * stmt,bool * is_store)588 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
589 {
590   tree *lhs, *rhs;
591 
592   /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns.  */
593   if (!gimple_assign_single_p (stmt))
594     return NULL;
595 
596   lhs = gimple_assign_lhs_ptr (stmt);
597   rhs = gimple_assign_rhs1_ptr (stmt);
598 
599   if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
600     {
601       *is_store = false;
602       return rhs;
603     }
604   else if (gimple_vdef (stmt)
605 	   && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
606     {
607       *is_store = true;
608       return lhs;
609     }
610   else
611     return NULL;
612 }
613 
614 /* From a controlling predicate in DOM determine the arguments from
615    the PHI node PHI that are chosen if the predicate evaluates to
616    true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617    they are non-NULL.  Returns true if the arguments can be determined,
618    else return false.  */
619 
620 static bool
extract_true_false_args_from_phi(basic_block dom,gphi * phi,tree * true_arg_p,tree * false_arg_p)621 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
622 				  tree *true_arg_p, tree *false_arg_p)
623 {
624   edge te, fe;
625   if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
626 					     &te, &fe))
627     return false;
628 
629   if (true_arg_p)
630     *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
631   if (false_arg_p)
632     *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
633 
634   return true;
635 }
636 
637 /* Determine the outermost loop to that it is possible to hoist a statement
638    STMT and store it to LIM_DATA (STMT)->max_loop.  To do this we determine
639    the outermost loop in that the value computed by STMT is invariant.
640    If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641    we preserve the fact whether STMT is executed.  It also fills other related
642    information to LIM_DATA (STMT).
643 
644    The function returns false if STMT cannot be hoisted outside of the loop it
645    is defined in, and true otherwise.  */
646 
647 static bool
determine_max_movement(gimple * stmt,bool must_preserve_exec)648 determine_max_movement (gimple *stmt, bool must_preserve_exec)
649 {
650   basic_block bb = gimple_bb (stmt);
651   class loop *loop = bb->loop_father;
652   class loop *level;
653   struct lim_aux_data *lim_data = get_lim_data (stmt);
654   tree val;
655   ssa_op_iter iter;
656 
657   if (must_preserve_exec)
658     level = ALWAYS_EXECUTED_IN (bb);
659   else
660     level = superloop_at_depth (loop, 1);
661   lim_data->max_loop = level;
662 
663   if (gphi *phi = dyn_cast <gphi *> (stmt))
664     {
665       use_operand_p use_p;
666       unsigned min_cost = UINT_MAX;
667       unsigned total_cost = 0;
668       struct lim_aux_data *def_data;
669 
670       /* We will end up promoting dependencies to be unconditionally
671 	 evaluated.  For this reason the PHI cost (and thus the
672 	 cost we remove from the loop by doing the invariant motion)
673 	 is that of the cheapest PHI argument dependency chain.  */
674       FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
675 	{
676 	  val = USE_FROM_PTR (use_p);
677 
678 	  if (TREE_CODE (val) != SSA_NAME)
679 	    {
680 	      /* Assign const 1 to constants.  */
681 	      min_cost = MIN (min_cost, 1);
682 	      total_cost += 1;
683 	      continue;
684 	    }
685 	  if (!add_dependency (val, lim_data, loop, false))
686 	    return false;
687 
688 	  gimple *def_stmt = SSA_NAME_DEF_STMT (val);
689 	  if (gimple_bb (def_stmt)
690 	      && gimple_bb (def_stmt)->loop_father == loop)
691 	    {
692 	      def_data = get_lim_data (def_stmt);
693 	      if (def_data)
694 		{
695 		  min_cost = MIN (min_cost, def_data->cost);
696 		  total_cost += def_data->cost;
697 		}
698 	    }
699 	}
700 
701       min_cost = MIN (min_cost, total_cost);
702       lim_data->cost += min_cost;
703 
704       if (gimple_phi_num_args (phi) > 1)
705 	{
706 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
707 	  gimple *cond;
708 	  if (gsi_end_p (gsi_last_bb (dom)))
709 	    return false;
710 	  cond = gsi_stmt (gsi_last_bb (dom));
711 	  if (gimple_code (cond) != GIMPLE_COND)
712 	    return false;
713 	  /* Verify that this is an extended form of a diamond and
714 	     the PHI arguments are completely controlled by the
715 	     predicate in DOM.  */
716 	  if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
717 	    return false;
718 
719 	  /* Fold in dependencies and cost of the condition.  */
720 	  FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
721 	    {
722 	      if (!add_dependency (val, lim_data, loop, false))
723 		return false;
724 	      def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
725 	      if (def_data)
726 		lim_data->cost += def_data->cost;
727 	    }
728 
729 	  /* We want to avoid unconditionally executing very expensive
730 	     operations.  As costs for our dependencies cannot be
731 	     negative just claim we are not invariand for this case.
732 	     We also are not sure whether the control-flow inside the
733 	     loop will vanish.  */
734 	  if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
735 	      && !(min_cost != 0
736 		   && total_cost / min_cost <= 2))
737 	    return false;
738 
739 	  /* Assume that the control-flow in the loop will vanish.
740 	     ???  We should verify this and not artificially increase
741 	     the cost if that is not the case.  */
742 	  lim_data->cost += stmt_cost (stmt);
743 	}
744 
745       return true;
746     }
747   else
748     FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
749       if (!add_dependency (val, lim_data, loop, true))
750 	return false;
751 
752   if (gimple_vuse (stmt))
753     {
754       im_mem_ref *ref
755 	= lim_data ? memory_accesses.refs_list[lim_data->ref] : NULL;
756       if (ref
757 	  && MEM_ANALYZABLE (ref))
758 	{
759 	  lim_data->max_loop = outermost_indep_loop (lim_data->max_loop,
760 						     loop, ref);
761 	  if (!lim_data->max_loop)
762 	    return false;
763 	}
764       else if (! add_dependency (gimple_vuse (stmt), lim_data, loop, false))
765 	return false;
766     }
767 
768   lim_data->cost += stmt_cost (stmt);
769 
770   return true;
771 }
772 
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774    and that one of the operands of this statement is computed by STMT.
775    Ensure that STMT (together with all the statements that define its
776    operands) is hoisted at least out of the loop LEVEL.  */
777 
778 static void
set_level(gimple * stmt,class loop * orig_loop,class loop * level)779 set_level (gimple *stmt, class loop *orig_loop, class loop *level)
780 {
781   class loop *stmt_loop = gimple_bb (stmt)->loop_father;
782   struct lim_aux_data *lim_data;
783   gimple *dep_stmt;
784   unsigned i;
785 
786   stmt_loop = find_common_loop (orig_loop, stmt_loop);
787   lim_data = get_lim_data (stmt);
788   if (lim_data != NULL && lim_data->tgt_loop != NULL)
789     stmt_loop = find_common_loop (stmt_loop,
790 				  loop_outer (lim_data->tgt_loop));
791   if (flow_loop_nested_p (stmt_loop, level))
792     return;
793 
794   gcc_assert (level == lim_data->max_loop
795 	      || flow_loop_nested_p (lim_data->max_loop, level));
796 
797   lim_data->tgt_loop = level;
798   FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
799     set_level (dep_stmt, orig_loop, level);
800 }
801 
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803    For now we chose the outermost possible loop.  TODO -- use profiling
804    information to set it more sanely.  */
805 
806 static void
set_profitable_level(gimple * stmt)807 set_profitable_level (gimple *stmt)
808 {
809   set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
810 }
811 
812 /* Returns true if STMT is a call that has side effects.  */
813 
814 static bool
nonpure_call_p(gimple * stmt)815 nonpure_call_p (gimple *stmt)
816 {
817   if (gimple_code (stmt) != GIMPLE_CALL)
818     return false;
819 
820   return gimple_has_side_effects (stmt);
821 }
822 
823 /* Rewrite a/b to a*(1/b).  Return the invariant stmt to process.  */
824 
825 static gimple *
rewrite_reciprocal(gimple_stmt_iterator * bsi)826 rewrite_reciprocal (gimple_stmt_iterator *bsi)
827 {
828   gassign *stmt, *stmt1, *stmt2;
829   tree name, lhs, type;
830   tree real_one;
831   gimple_stmt_iterator gsi;
832 
833   stmt = as_a <gassign *> (gsi_stmt (*bsi));
834   lhs = gimple_assign_lhs (stmt);
835   type = TREE_TYPE (lhs);
836 
837   real_one = build_one_cst (type);
838 
839   name = make_temp_ssa_name (type, NULL, "reciptmp");
840   stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
841 			       gimple_assign_rhs2 (stmt));
842   stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
843 			       gimple_assign_rhs1 (stmt));
844 
845   /* Replace division stmt with reciprocal and multiply stmts.
846      The multiply stmt is not invariant, so update iterator
847      and avoid rescanning.  */
848   gsi = *bsi;
849   gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
850   gsi_replace (&gsi, stmt2, true);
851 
852   /* Continue processing with invariant reciprocal statement.  */
853   return stmt1;
854 }
855 
856 /* Check if the pattern at *BSI is a bittest of the form
857    (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0.  */
858 
859 static gimple *
rewrite_bittest(gimple_stmt_iterator * bsi)860 rewrite_bittest (gimple_stmt_iterator *bsi)
861 {
862   gassign *stmt;
863   gimple *stmt1;
864   gassign *stmt2;
865   gimple *use_stmt;
866   gcond *cond_stmt;
867   tree lhs, name, t, a, b;
868   use_operand_p use;
869 
870   stmt = as_a <gassign *> (gsi_stmt (*bsi));
871   lhs = gimple_assign_lhs (stmt);
872 
873   /* Verify that the single use of lhs is a comparison against zero.  */
874   if (TREE_CODE (lhs) != SSA_NAME
875       || !single_imm_use (lhs, &use, &use_stmt))
876     return stmt;
877   cond_stmt = dyn_cast <gcond *> (use_stmt);
878   if (!cond_stmt)
879     return stmt;
880   if (gimple_cond_lhs (cond_stmt) != lhs
881       || (gimple_cond_code (cond_stmt) != NE_EXPR
882 	  && gimple_cond_code (cond_stmt) != EQ_EXPR)
883       || !integer_zerop (gimple_cond_rhs (cond_stmt)))
884     return stmt;
885 
886   /* Get at the operands of the shift.  The rhs is TMP1 & 1.  */
887   stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
888   if (gimple_code (stmt1) != GIMPLE_ASSIGN)
889     return stmt;
890 
891   /* There is a conversion in between possibly inserted by fold.  */
892   if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
893     {
894       t = gimple_assign_rhs1 (stmt1);
895       if (TREE_CODE (t) != SSA_NAME
896 	  || !has_single_use (t))
897 	return stmt;
898       stmt1 = SSA_NAME_DEF_STMT (t);
899       if (gimple_code (stmt1) != GIMPLE_ASSIGN)
900 	return stmt;
901     }
902 
903   /* Verify that B is loop invariant but A is not.  Verify that with
904      all the stmt walking we are still in the same loop.  */
905   if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
906       || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
907     return stmt;
908 
909   a = gimple_assign_rhs1 (stmt1);
910   b = gimple_assign_rhs2 (stmt1);
911 
912   if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
913       && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
914     {
915       gimple_stmt_iterator rsi;
916 
917       /* 1 << B */
918       t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
919 		       build_int_cst (TREE_TYPE (a), 1), b);
920       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
921       stmt1 = gimple_build_assign (name, t);
922 
923       /* A & (1 << B) */
924       t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
925       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
926       stmt2 = gimple_build_assign (name, t);
927 
928       /* Replace the SSA_NAME we compare against zero.  Adjust
929 	 the type of zero accordingly.  */
930       SET_USE (use, name);
931       gimple_cond_set_rhs (cond_stmt,
932 			   build_int_cst_type (TREE_TYPE (name),
933 					       0));
934 
935       /* Don't use gsi_replace here, none of the new assignments sets
936 	 the variable originally set in stmt.  Move bsi to stmt1, and
937 	 then remove the original stmt, so that we get a chance to
938 	 retain debug info for it.  */
939       rsi = *bsi;
940       gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
941       gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
942       gimple *to_release = gsi_stmt (rsi);
943       gsi_remove (&rsi, true);
944       release_defs (to_release);
945 
946       return stmt1;
947     }
948 
949   return stmt;
950 }
951 
952 /* For each statement determines the outermost loop in that it is invariant,
953    -   statements on whose motion it depends and the cost of the computation.
954    -   This information is stored to the LIM_DATA structure associated with
955    -   each statement.  */
956 class invariantness_dom_walker : public dom_walker
957 {
958 public:
invariantness_dom_walker(cdi_direction direction)959   invariantness_dom_walker (cdi_direction direction)
960     : dom_walker (direction) {}
961 
962   virtual edge before_dom_children (basic_block);
963 };
964 
965 /* Determine the outermost loops in that statements in basic block BB are
966    invariant, and record them to the LIM_DATA associated with the statements.
967    Callback for dom_walker.  */
968 
969 edge
before_dom_children(basic_block bb)970 invariantness_dom_walker::before_dom_children (basic_block bb)
971 {
972   enum move_pos pos;
973   gimple_stmt_iterator bsi;
974   gimple *stmt;
975   bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
976   class loop *outermost = ALWAYS_EXECUTED_IN (bb);
977   struct lim_aux_data *lim_data;
978 
979   if (!loop_outer (bb->loop_father))
980     return NULL;
981 
982   if (dump_file && (dump_flags & TDF_DETAILS))
983     fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
984 	     bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
985 
986   /* Look at PHI nodes, but only if there is at most two.
987      ???  We could relax this further by post-processing the inserted
988      code and transforming adjacent cond-exprs with the same predicate
989      to control flow again.  */
990   bsi = gsi_start_phis (bb);
991   if (!gsi_end_p (bsi)
992       && ((gsi_next (&bsi), gsi_end_p (bsi))
993 	  || (gsi_next (&bsi), gsi_end_p (bsi))))
994     for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
995       {
996 	stmt = gsi_stmt (bsi);
997 
998 	pos = movement_possibility (stmt);
999 	if (pos == MOVE_IMPOSSIBLE)
1000 	  continue;
1001 
1002 	lim_data = get_lim_data (stmt);
1003 	if (! lim_data)
1004 	  lim_data = init_lim_data (stmt);
1005 	lim_data->always_executed_in = outermost;
1006 
1007 	if (!determine_max_movement (stmt, false))
1008 	  {
1009 	    lim_data->max_loop = NULL;
1010 	    continue;
1011 	  }
1012 
1013 	if (dump_file && (dump_flags & TDF_DETAILS))
1014 	  {
1015 	    print_gimple_stmt (dump_file, stmt, 2);
1016 	    fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1017 		     loop_depth (lim_data->max_loop),
1018 		     lim_data->cost);
1019 	  }
1020 
1021 	if (lim_data->cost >= LIM_EXPENSIVE)
1022 	  set_profitable_level (stmt);
1023       }
1024 
1025   for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1026     {
1027       stmt = gsi_stmt (bsi);
1028 
1029       pos = movement_possibility (stmt);
1030       if (pos == MOVE_IMPOSSIBLE)
1031 	{
1032 	  if (nonpure_call_p (stmt))
1033 	    {
1034 	      maybe_never = true;
1035 	      outermost = NULL;
1036 	    }
1037 	  /* Make sure to note always_executed_in for stores to make
1038 	     store-motion work.  */
1039 	  else if (stmt_makes_single_store (stmt))
1040 	    {
1041 	      struct lim_aux_data *lim_data = get_lim_data (stmt);
1042 	      if (! lim_data)
1043 		lim_data = init_lim_data (stmt);
1044 	      lim_data->always_executed_in = outermost;
1045 	    }
1046 	  continue;
1047 	}
1048 
1049       if (is_gimple_assign (stmt)
1050 	  && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1051 	      == GIMPLE_BINARY_RHS))
1052 	{
1053 	  tree op0 = gimple_assign_rhs1 (stmt);
1054 	  tree op1 = gimple_assign_rhs2 (stmt);
1055 	  class loop *ol1 = outermost_invariant_loop (op1,
1056 					loop_containing_stmt (stmt));
1057 
1058 	  /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 	     to be hoisted out of loop, saving expensive divide.  */
1060 	  if (pos == MOVE_POSSIBLE
1061 	      && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1062 	      && flag_unsafe_math_optimizations
1063 	      && !flag_trapping_math
1064 	      && ol1 != NULL
1065 	      && outermost_invariant_loop (op0, ol1) == NULL)
1066 	    stmt = rewrite_reciprocal (&bsi);
1067 
1068 	  /* If the shift count is invariant, convert (A >> B) & 1 to
1069 	     A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 	     saving an expensive shift.  */
1071 	  if (pos == MOVE_POSSIBLE
1072 	      && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1073 	      && integer_onep (op1)
1074 	      && TREE_CODE (op0) == SSA_NAME
1075 	      && has_single_use (op0))
1076 	    stmt = rewrite_bittest (&bsi);
1077 	}
1078 
1079       lim_data = get_lim_data (stmt);
1080       if (! lim_data)
1081 	lim_data = init_lim_data (stmt);
1082       lim_data->always_executed_in = outermost;
1083 
1084       if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1085 	continue;
1086 
1087       if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1088 	{
1089 	  lim_data->max_loop = NULL;
1090 	  continue;
1091 	}
1092 
1093       if (dump_file && (dump_flags & TDF_DETAILS))
1094 	{
1095 	  print_gimple_stmt (dump_file, stmt, 2);
1096 	  fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1097 		   loop_depth (lim_data->max_loop),
1098 		   lim_data->cost);
1099 	}
1100 
1101       if (lim_data->cost >= LIM_EXPENSIVE)
1102 	set_profitable_level (stmt);
1103     }
1104   return NULL;
1105 }
1106 
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108    data stored in LIM_DATA structures associated with each statement.  Callback
1109    for walk_dominator_tree.  */
1110 
1111 unsigned int
move_computations_worker(basic_block bb)1112 move_computations_worker (basic_block bb)
1113 {
1114   class loop *level;
1115   unsigned cost = 0;
1116   struct lim_aux_data *lim_data;
1117   unsigned int todo = 0;
1118 
1119   if (!loop_outer (bb->loop_father))
1120     return todo;
1121 
1122   for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1123     {
1124       gassign *new_stmt;
1125       gphi *stmt = bsi.phi ();
1126 
1127       lim_data = get_lim_data (stmt);
1128       if (lim_data == NULL)
1129 	{
1130 	  gsi_next (&bsi);
1131 	  continue;
1132 	}
1133 
1134       cost = lim_data->cost;
1135       level = lim_data->tgt_loop;
1136       clear_lim_data (stmt);
1137 
1138       if (!level)
1139 	{
1140 	  gsi_next (&bsi);
1141 	  continue;
1142 	}
1143 
1144       if (dump_file && (dump_flags & TDF_DETAILS))
1145 	{
1146 	  fprintf (dump_file, "Moving PHI node\n");
1147 	  print_gimple_stmt (dump_file, stmt, 0);
1148 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1149 		   cost, level->num);
1150 	}
1151 
1152       if (gimple_phi_num_args (stmt) == 1)
1153 	{
1154 	  tree arg = PHI_ARG_DEF (stmt, 0);
1155 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1156 					  TREE_CODE (arg), arg);
1157 	}
1158       else
1159 	{
1160 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1161 	  gimple *cond = gsi_stmt (gsi_last_bb (dom));
1162 	  tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1163 	  /* Get the PHI arguments corresponding to the true and false
1164 	     edges of COND.  */
1165 	  extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1166 	  gcc_assert (arg0 && arg1);
1167 	  t = build2 (gimple_cond_code (cond), boolean_type_node,
1168 		      gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1169 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1170 					  COND_EXPR, t, arg0, arg1);
1171 	  todo |= TODO_cleanup_cfg;
1172 	}
1173       if (!ALWAYS_EXECUTED_IN (bb)
1174 	  || (ALWAYS_EXECUTED_IN (bb) != level
1175 	      && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level)))
1176 	reset_flow_sensitive_info (gimple_assign_lhs (new_stmt));
1177       gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1178       remove_phi_node (&bsi, false);
1179     }
1180 
1181   for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1182     {
1183       edge e;
1184 
1185       gimple *stmt = gsi_stmt (bsi);
1186 
1187       lim_data = get_lim_data (stmt);
1188       if (lim_data == NULL)
1189 	{
1190 	  gsi_next (&bsi);
1191 	  continue;
1192 	}
1193 
1194       cost = lim_data->cost;
1195       level = lim_data->tgt_loop;
1196       clear_lim_data (stmt);
1197 
1198       if (!level)
1199 	{
1200 	  gsi_next (&bsi);
1201 	  continue;
1202 	}
1203 
1204       /* We do not really want to move conditionals out of the loop; we just
1205 	 placed it here to force its operands to be moved if necessary.  */
1206       if (gimple_code (stmt) == GIMPLE_COND)
1207 	continue;
1208 
1209       if (dump_file && (dump_flags & TDF_DETAILS))
1210 	{
1211 	  fprintf (dump_file, "Moving statement\n");
1212 	  print_gimple_stmt (dump_file, stmt, 0);
1213 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1214 		   cost, level->num);
1215 	}
1216 
1217       e = loop_preheader_edge (level);
1218       gcc_assert (!gimple_vdef (stmt));
1219       if (gimple_vuse (stmt))
1220 	{
1221 	  /* The new VUSE is the one from the virtual PHI in the loop
1222 	     header or the one already present.  */
1223 	  gphi_iterator gsi2;
1224 	  for (gsi2 = gsi_start_phis (e->dest);
1225 	       !gsi_end_p (gsi2); gsi_next (&gsi2))
1226 	    {
1227 	      gphi *phi = gsi2.phi ();
1228 	      if (virtual_operand_p (gimple_phi_result (phi)))
1229 		{
1230 		  SET_USE (gimple_vuse_op (stmt),
1231 			   PHI_ARG_DEF_FROM_EDGE (phi, e));
1232 		  break;
1233 		}
1234 	    }
1235 	}
1236       gsi_remove (&bsi, false);
1237       if (gimple_has_lhs (stmt)
1238 	  && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1239 	  && (!ALWAYS_EXECUTED_IN (bb)
1240 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1241 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1242 	reset_flow_sensitive_info (gimple_get_lhs (stmt));
1243       /* In case this is a stmt that is not unconditionally executed
1244          when the target loop header is executed and the stmt may
1245 	 invoke undefined integer or pointer overflow rewrite it to
1246 	 unsigned arithmetic.  */
1247       if (is_gimple_assign (stmt)
1248 	  && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1249 	  && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1250 	  && arith_code_with_undefined_signed_overflow
1251 	       (gimple_assign_rhs_code (stmt))
1252 	  && (!ALWAYS_EXECUTED_IN (bb)
1253 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1254 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1255 	gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1256       else
1257 	gsi_insert_on_edge (e, stmt);
1258     }
1259 
1260   return todo;
1261 }
1262 
1263 /* Hoist the statements out of the loops prescribed by data stored in
1264    LIM_DATA structures associated with each statement.*/
1265 
1266 static unsigned int
move_computations(void)1267 move_computations (void)
1268 {
1269   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1270   int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1271   unsigned todo = 0;
1272 
1273   for (int i = 0; i < n; ++i)
1274     todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1275 
1276   free (rpo);
1277 
1278   gsi_commit_edge_inserts ();
1279   if (need_ssa_update_p (cfun))
1280     rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1281 
1282   return todo;
1283 }
1284 
1285 /* Checks whether the statement defining variable *INDEX can be hoisted
1286    out of the loop passed in DATA.  Callback for for_each_index.  */
1287 
1288 static bool
may_move_till(tree ref,tree * index,void * data)1289 may_move_till (tree ref, tree *index, void *data)
1290 {
1291   class loop *loop = (class loop *) data, *max_loop;
1292 
1293   /* If REF is an array reference, check also that the step and the lower
1294      bound is invariant in LOOP.  */
1295   if (TREE_CODE (ref) == ARRAY_REF)
1296     {
1297       tree step = TREE_OPERAND (ref, 3);
1298       tree lbound = TREE_OPERAND (ref, 2);
1299 
1300       max_loop = outermost_invariant_loop (step, loop);
1301       if (!max_loop)
1302 	return false;
1303 
1304       max_loop = outermost_invariant_loop (lbound, loop);
1305       if (!max_loop)
1306 	return false;
1307     }
1308 
1309   max_loop = outermost_invariant_loop (*index, loop);
1310   if (!max_loop)
1311     return false;
1312 
1313   return true;
1314 }
1315 
1316 /* If OP is SSA NAME, force the statement that defines it to be
1317    moved out of the LOOP.  ORIG_LOOP is the loop in that EXPR is used.  */
1318 
1319 static void
force_move_till_op(tree op,class loop * orig_loop,class loop * loop)1320 force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
1321 {
1322   gimple *stmt;
1323 
1324   if (!op
1325       || is_gimple_min_invariant (op))
1326     return;
1327 
1328   gcc_assert (TREE_CODE (op) == SSA_NAME);
1329 
1330   stmt = SSA_NAME_DEF_STMT (op);
1331   if (gimple_nop_p (stmt))
1332     return;
1333 
1334   set_level (stmt, orig_loop, loop);
1335 }
1336 
1337 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1338    the LOOP.  The reference REF is used in the loop ORIG_LOOP.  Callback for
1339    for_each_index.  */
1340 
1341 struct fmt_data
1342 {
1343   class loop *loop;
1344   class loop *orig_loop;
1345 };
1346 
1347 static bool
force_move_till(tree ref,tree * index,void * data)1348 force_move_till (tree ref, tree *index, void *data)
1349 {
1350   struct fmt_data *fmt_data = (struct fmt_data *) data;
1351 
1352   if (TREE_CODE (ref) == ARRAY_REF)
1353     {
1354       tree step = TREE_OPERAND (ref, 3);
1355       tree lbound = TREE_OPERAND (ref, 2);
1356 
1357       force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1358       force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1359     }
1360 
1361   force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1362 
1363   return true;
1364 }
1365 
1366 /* A function to free the mem_ref object OBJ.  */
1367 
1368 static void
memref_free(class im_mem_ref * mem)1369 memref_free (class im_mem_ref *mem)
1370 {
1371   mem->accesses_in_loop.release ();
1372 }
1373 
1374 /* Allocates and returns a memory reference description for MEM whose hash
1375    value is HASH and id is ID.  */
1376 
1377 static im_mem_ref *
mem_ref_alloc(ao_ref * mem,unsigned hash,unsigned id)1378 mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
1379 {
1380   im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
1381   if (mem)
1382     ref->mem = *mem;
1383   else
1384     ao_ref_init (&ref->mem, error_mark_node);
1385   ref->id = id;
1386   ref->ref_canonical = false;
1387   ref->ref_decomposed = false;
1388   ref->hash = hash;
1389   ref->stored = NULL;
1390   bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1391   bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1392   ref->accesses_in_loop.create (1);
1393 
1394   return ref;
1395 }
1396 
1397 /* Records memory reference location *LOC in LOOP to the memory reference
1398    description REF.  The reference occurs in statement STMT.  */
1399 
1400 static void
record_mem_ref_loc(im_mem_ref * ref,gimple * stmt,tree * loc)1401 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1402 {
1403   mem_ref_loc aref;
1404   aref.stmt = stmt;
1405   aref.ref = loc;
1406   ref->accesses_in_loop.safe_push (aref);
1407 }
1408 
1409 /* Set the LOOP bit in REF stored bitmap and allocate that if
1410    necessary.  Return whether a bit was changed.  */
1411 
1412 static bool
set_ref_stored_in_loop(im_mem_ref * ref,class loop * loop)1413 set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
1414 {
1415   if (!ref->stored)
1416     ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1417   return bitmap_set_bit (ref->stored, loop->num);
1418 }
1419 
1420 /* Marks reference REF as stored in LOOP.  */
1421 
1422 static void
mark_ref_stored(im_mem_ref * ref,class loop * loop)1423 mark_ref_stored (im_mem_ref *ref, class loop *loop)
1424 {
1425   while (loop != current_loops->tree_root
1426 	 && set_ref_stored_in_loop (ref, loop))
1427     loop = loop_outer (loop);
1428 }
1429 
1430 /* Gathers memory references in statement STMT in LOOP, storing the
1431    information about them in the memory_accesses structure.  Marks
1432    the vops accessed through unrecognized statements there as
1433    well.  */
1434 
1435 static void
gather_mem_refs_stmt(class loop * loop,gimple * stmt)1436 gather_mem_refs_stmt (class loop *loop, gimple *stmt)
1437 {
1438   tree *mem = NULL;
1439   hashval_t hash;
1440   im_mem_ref **slot;
1441   im_mem_ref *ref;
1442   bool is_stored;
1443   unsigned id;
1444 
1445   if (!gimple_vuse (stmt))
1446     return;
1447 
1448   mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1449   if (!mem)
1450     {
1451       /* We use the shared mem_ref for all unanalyzable refs.  */
1452       id = UNANALYZABLE_MEM_ID;
1453       ref = memory_accesses.refs_list[id];
1454       if (dump_file && (dump_flags & TDF_DETAILS))
1455 	{
1456 	  fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1457 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1458 	}
1459       is_stored = gimple_vdef (stmt);
1460     }
1461   else
1462     {
1463       /* We are looking for equal refs that might differ in structure
1464          such as a.b vs. MEM[&a + 4].  So we key off the ao_ref but
1465 	 make sure we can canonicalize the ref in the hashtable if
1466 	 non-operand_equal_p refs are found.  For the lookup we mark
1467 	 the case we want strict equality with aor.max_size == -1.  */
1468       ao_ref aor;
1469       ao_ref_init (&aor, *mem);
1470       ao_ref_base (&aor);
1471       ao_ref_alias_set (&aor);
1472       HOST_WIDE_INT offset, size, max_size;
1473       poly_int64 saved_maxsize = aor.max_size, mem_off;
1474       tree mem_base;
1475       bool ref_decomposed;
1476       if (aor.max_size_known_p ()
1477 	  && aor.offset.is_constant (&offset)
1478 	  && aor.size.is_constant (&size)
1479 	  && aor.max_size.is_constant (&max_size)
1480 	  && size == max_size
1481 	  && (size % BITS_PER_UNIT) == 0
1482 	  /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1483 	     size.  Make sure this is consistent with the extraction.  */
1484 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem)))
1485 	  && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem))),
1486 		       aor.size)
1487 	  && (mem_base = get_addr_base_and_unit_offset (aor.ref, &mem_off)))
1488 	{
1489 	  ref_decomposed = true;
1490 	  hash = iterative_hash_expr (ao_ref_base (&aor), 0);
1491 	  hash = iterative_hash_host_wide_int (offset, hash);
1492 	  hash = iterative_hash_host_wide_int (size, hash);
1493 	}
1494       else
1495 	{
1496 	  ref_decomposed = false;
1497 	  hash = iterative_hash_expr (aor.ref, 0);
1498 	  aor.max_size = -1;
1499 	}
1500       slot = memory_accesses.refs->find_slot_with_hash (&aor, hash, INSERT);
1501       aor.max_size = saved_maxsize;
1502       if (*slot)
1503 	{
1504 	  if (!(*slot)->ref_canonical
1505 	      && !operand_equal_p (*mem, (*slot)->mem.ref, 0))
1506 	    {
1507 	      /* If we didn't yet canonicalize the hashtable ref (which
1508 	         we'll end up using for code insertion) and hit a second
1509 		 equal ref that is not structurally equivalent create
1510 		 a canonical ref which is a bare MEM_REF.  */
1511 	      if (TREE_CODE (*mem) == MEM_REF
1512 		  || TREE_CODE (*mem) == TARGET_MEM_REF)
1513 		{
1514 		  (*slot)->mem.ref = *mem;
1515 		  (*slot)->mem.base_alias_set = ao_ref_base_alias_set (&aor);
1516 		}
1517 	      else
1518 		{
1519 		  tree ref_alias_type = reference_alias_ptr_type (*mem);
1520 		  unsigned int ref_align = get_object_alignment (*mem);
1521 		  tree ref_type = TREE_TYPE (*mem);
1522 		  tree tmp = build1 (ADDR_EXPR, ptr_type_node,
1523 				     unshare_expr (mem_base));
1524 		  if (TYPE_ALIGN (ref_type) != ref_align)
1525 		    ref_type = build_aligned_type (ref_type, ref_align);
1526 		  (*slot)->mem.ref
1527 		    = fold_build2 (MEM_REF, ref_type, tmp,
1528 				   build_int_cst (ref_alias_type, mem_off));
1529 		  if ((*slot)->mem.volatile_p)
1530 		    TREE_THIS_VOLATILE ((*slot)->mem.ref) = 1;
1531 		  gcc_checking_assert (TREE_CODE ((*slot)->mem.ref) == MEM_REF
1532 				       && is_gimple_mem_ref_addr
1533 				            (TREE_OPERAND ((*slot)->mem.ref,
1534 							   0)));
1535 		  (*slot)->mem.base_alias_set = (*slot)->mem.ref_alias_set;
1536 		}
1537 	      (*slot)->ref_canonical = true;
1538 	    }
1539 	  ref = *slot;
1540 	  id = ref->id;
1541 	}
1542       else
1543 	{
1544 	  id = memory_accesses.refs_list.length ();
1545 	  ref = mem_ref_alloc (&aor, hash, id);
1546 	  ref->ref_decomposed = ref_decomposed;
1547 	  memory_accesses.refs_list.safe_push (ref);
1548 	  *slot = ref;
1549 
1550 	  if (dump_file && (dump_flags & TDF_DETAILS))
1551 	    {
1552 	      fprintf (dump_file, "Memory reference %u: ", id);
1553 	      print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1554 	      fprintf (dump_file, "\n");
1555 	    }
1556 	}
1557 
1558       record_mem_ref_loc (ref, stmt, mem);
1559     }
1560   bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1561   if (is_stored)
1562     {
1563       bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1564       mark_ref_stored (ref, loop);
1565     }
1566   init_lim_data (stmt)->ref = ref->id;
1567   return;
1568 }
1569 
1570 static unsigned *bb_loop_postorder;
1571 
1572 /* qsort sort function to sort blocks after their loop fathers postorder.  */
1573 
1574 static int
sort_bbs_in_loop_postorder_cmp(const void * bb1_,const void * bb2_,void * bb_loop_postorder_)1575 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_,
1576 				void *bb_loop_postorder_)
1577 {
1578   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1579   basic_block bb1 = *(const basic_block *)bb1_;
1580   basic_block bb2 = *(const basic_block *)bb2_;
1581   class loop *loop1 = bb1->loop_father;
1582   class loop *loop2 = bb2->loop_father;
1583   if (loop1->num == loop2->num)
1584     return bb1->index - bb2->index;
1585   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1586 }
1587 
1588 /* qsort sort function to sort ref locs after their loop fathers postorder.  */
1589 
1590 static int
sort_locs_in_loop_postorder_cmp(const void * loc1_,const void * loc2_,void * bb_loop_postorder_)1591 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_,
1592 				 void *bb_loop_postorder_)
1593 {
1594   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1595   const mem_ref_loc *loc1 = (const mem_ref_loc *)loc1_;
1596   const mem_ref_loc *loc2 = (const mem_ref_loc *)loc2_;
1597   class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1598   class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1599   if (loop1->num == loop2->num)
1600     return 0;
1601   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1602 }
1603 
1604 /* Gathers memory references in loops.  */
1605 
1606 static void
analyze_memory_references(void)1607 analyze_memory_references (void)
1608 {
1609   gimple_stmt_iterator bsi;
1610   basic_block bb, *bbs;
1611   class loop *loop, *outer;
1612   unsigned i, n;
1613 
1614   /* Collect all basic-blocks in loops and sort them after their
1615      loops postorder.  */
1616   i = 0;
1617   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1618   FOR_EACH_BB_FN (bb, cfun)
1619     if (bb->loop_father != current_loops->tree_root)
1620       bbs[i++] = bb;
1621   n = i;
1622   gcc_sort_r (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp,
1623 	      bb_loop_postorder);
1624 
1625   /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1626      That results in better locality for all the bitmaps.  */
1627   for (i = 0; i < n; ++i)
1628     {
1629       basic_block bb = bbs[i];
1630       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1631         gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1632     }
1633 
1634   /* Sort the location list of gathered memory references after their
1635      loop postorder number.  */
1636   im_mem_ref *ref;
1637   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1638     ref->accesses_in_loop.sort (sort_locs_in_loop_postorder_cmp,
1639 				bb_loop_postorder);
1640 
1641   free (bbs);
1642 
1643   /* Propagate the information about accessed memory references up
1644      the loop hierarchy.  */
1645   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1646     {
1647       /* Finalize the overall touched references (including subloops).  */
1648       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1649 		       &memory_accesses.refs_stored_in_loop[loop->num]);
1650 
1651       /* Propagate the information about accessed memory references up
1652 	 the loop hierarchy.  */
1653       outer = loop_outer (loop);
1654       if (outer == current_loops->tree_root)
1655 	continue;
1656 
1657       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1658 		       &memory_accesses.all_refs_stored_in_loop[loop->num]);
1659     }
1660 }
1661 
1662 /* Returns true if MEM1 and MEM2 may alias.  TTAE_CACHE is used as a cache in
1663    tree_to_aff_combination_expand.  */
1664 
1665 static bool
mem_refs_may_alias_p(im_mem_ref * mem1,im_mem_ref * mem2,hash_map<tree,name_expansion * > ** ttae_cache)1666 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1667 		      hash_map<tree, name_expansion *> **ttae_cache)
1668 {
1669   /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1670      object and their offset differ in such a way that the locations cannot
1671      overlap, then they cannot alias.  */
1672   poly_widest_int size1, size2;
1673   aff_tree off1, off2;
1674 
1675   /* Perform basic offset and type-based disambiguation.  */
1676   if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1677     return false;
1678 
1679   /* The expansion of addresses may be a bit expensive, thus we only do
1680      the check at -O2 and higher optimization levels.  */
1681   if (optimize < 2)
1682     return true;
1683 
1684   get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1685   get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1686   aff_combination_expand (&off1, ttae_cache);
1687   aff_combination_expand (&off2, ttae_cache);
1688   aff_combination_scale (&off1, -1);
1689   aff_combination_add (&off2, &off1);
1690 
1691   if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1692     return false;
1693 
1694   return true;
1695 }
1696 
1697 /* Compare function for bsearch searching for reference locations
1698    in a loop.  */
1699 
1700 static int
find_ref_loc_in_loop_cmp(const void * loop_,const void * loc_,void * bb_loop_postorder_)1701 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_,
1702 			  void *bb_loop_postorder_)
1703 {
1704   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1705   class loop *loop = (class loop *)const_cast<void *>(loop_);
1706   mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1707   class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1708   if (loop->num  == loc_loop->num
1709       || flow_loop_nested_p (loop, loc_loop))
1710     return 0;
1711   return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1712 	  ? -1 : 1);
1713 }
1714 
1715 /* Iterates over all locations of REF in LOOP and its subloops calling
1716    fn.operator() with the location as argument.  When that operator
1717    returns true the iteration is stopped and true is returned.
1718    Otherwise false is returned.  */
1719 
1720 template <typename FN>
1721 static bool
for_all_locs_in_loop(class loop * loop,im_mem_ref * ref,FN fn)1722 for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
1723 {
1724   unsigned i;
1725   mem_ref_loc *loc;
1726 
1727   /* Search for the cluster of locs in the accesses_in_loop vector
1728      which is sorted after postorder index of the loop father.  */
1729   loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp,
1730 				       bb_loop_postorder);
1731   if (!loc)
1732     return false;
1733 
1734   /* We have found one location inside loop or its sub-loops.  Iterate
1735      both forward and backward to cover the whole cluster.  */
1736   i = loc - ref->accesses_in_loop.address ();
1737   while (i > 0)
1738     {
1739       --i;
1740       mem_ref_loc *l = &ref->accesses_in_loop[i];
1741       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1742 	break;
1743       if (fn (l))
1744 	return true;
1745     }
1746   for (i = loc - ref->accesses_in_loop.address ();
1747        i < ref->accesses_in_loop.length (); ++i)
1748     {
1749       mem_ref_loc *l = &ref->accesses_in_loop[i];
1750       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1751 	break;
1752       if (fn (l))
1753 	return true;
1754     }
1755 
1756   return false;
1757 }
1758 
1759 /* Rewrites location LOC by TMP_VAR.  */
1760 
1761 class rewrite_mem_ref_loc
1762 {
1763 public:
rewrite_mem_ref_loc(tree tmp_var_)1764   rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1765   bool operator () (mem_ref_loc *loc);
1766   tree tmp_var;
1767 };
1768 
1769 bool
operator()1770 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1771 {
1772   *loc->ref = tmp_var;
1773   update_stmt (loc->stmt);
1774   return false;
1775 }
1776 
1777 /* Rewrites all references to REF in LOOP by variable TMP_VAR.  */
1778 
1779 static void
rewrite_mem_refs(class loop * loop,im_mem_ref * ref,tree tmp_var)1780 rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
1781 {
1782   for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1783 }
1784 
1785 /* Stores the first reference location in LOCP.  */
1786 
1787 class first_mem_ref_loc_1
1788 {
1789 public:
first_mem_ref_loc_1(mem_ref_loc ** locp_)1790   first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1791   bool operator () (mem_ref_loc *loc);
1792   mem_ref_loc **locp;
1793 };
1794 
1795 bool
operator()1796 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1797 {
1798   *locp = loc;
1799   return true;
1800 }
1801 
1802 /* Returns the first reference location to REF in LOOP.  */
1803 
1804 static mem_ref_loc *
first_mem_ref_loc(class loop * loop,im_mem_ref * ref)1805 first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
1806 {
1807   mem_ref_loc *locp = NULL;
1808   for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1809   return locp;
1810 }
1811 
1812 struct prev_flag_edges {
1813   /* Edge to insert new flag comparison code.  */
1814   edge append_cond_position;
1815 
1816   /* Edge for fall through from previous flag comparison.  */
1817   edge last_cond_fallthru;
1818 };
1819 
1820 /* Helper function for execute_sm.  Emit code to store TMP_VAR into
1821    MEM along edge EX.
1822 
1823    The store is only done if MEM has changed.  We do this so no
1824    changes to MEM occur on code paths that did not originally store
1825    into it.
1826 
1827    The common case for execute_sm will transform:
1828 
1829      for (...) {
1830        if (foo)
1831          stuff;
1832        else
1833          MEM = TMP_VAR;
1834      }
1835 
1836    into:
1837 
1838      lsm = MEM;
1839      for (...) {
1840        if (foo)
1841          stuff;
1842        else
1843          lsm = TMP_VAR;
1844      }
1845      MEM = lsm;
1846 
1847   This function will generate:
1848 
1849      lsm = MEM;
1850 
1851      lsm_flag = false;
1852      ...
1853      for (...) {
1854        if (foo)
1855          stuff;
1856        else {
1857          lsm = TMP_VAR;
1858          lsm_flag = true;
1859        }
1860      }
1861      if (lsm_flag)	<--
1862        MEM = lsm;	<--
1863 */
1864 
1865 static void
execute_sm_if_changed(edge ex,tree mem,tree tmp_var,tree flag,edge preheader,hash_set<basic_block> * flag_bbs)1866 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1867 		       edge preheader, hash_set <basic_block> *flag_bbs)
1868 {
1869   basic_block new_bb, then_bb, old_dest;
1870   bool loop_has_only_one_exit;
1871   edge then_old_edge, orig_ex = ex;
1872   gimple_stmt_iterator gsi;
1873   gimple *stmt;
1874   struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1875   bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1876 
1877   profile_count count_sum = profile_count::zero ();
1878   int nbbs = 0, ncount = 0;
1879   profile_probability flag_probability = profile_probability::uninitialized ();
1880 
1881   /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1882      at loop exit.
1883 
1884      This code may look fancy, but it cannot update profile very realistically
1885      because we do not know the probability that flag will be true at given
1886      loop exit.
1887 
1888      We look for two interesting extremes
1889        - when exit is dominated by block setting the flag, we know it will
1890          always be true.  This is a common case.
1891        - when all blocks setting the flag have very low frequency we know
1892          it will likely be false.
1893      In all other cases we default to 2/3 for flag being true.  */
1894 
1895   for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1896        it != flag_bbs->end (); ++it)
1897     {
1898        if ((*it)->count.initialized_p ())
1899          count_sum += (*it)->count, ncount ++;
1900        if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1901 	 flag_probability = profile_probability::always ();
1902        nbbs++;
1903     }
1904 
1905   profile_probability cap = profile_probability::always ().apply_scale (2, 3);
1906 
1907   if (flag_probability.initialized_p ())
1908     ;
1909   else if (ncount == nbbs
1910 	   && preheader->count () >= count_sum && preheader->count ().nonzero_p ())
1911     {
1912       flag_probability = count_sum.probability_in (preheader->count ());
1913       if (flag_probability > cap)
1914 	flag_probability = cap;
1915     }
1916 
1917   if (!flag_probability.initialized_p ())
1918     flag_probability = cap;
1919 
1920   /* ?? Insert store after previous store if applicable.  See note
1921      below.  */
1922   if (prev_edges)
1923     ex = prev_edges->append_cond_position;
1924 
1925   loop_has_only_one_exit = single_pred_p (ex->dest);
1926 
1927   if (loop_has_only_one_exit)
1928     ex = split_block_after_labels (ex->dest);
1929   else
1930     {
1931       for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1932 	   !gsi_end_p (gpi); gsi_next (&gpi))
1933 	{
1934 	  gphi *phi = gpi.phi ();
1935 	  if (virtual_operand_p (gimple_phi_result (phi)))
1936 	    continue;
1937 
1938 	  /* When the destination has a non-virtual PHI node with multiple
1939 	     predecessors make sure we preserve the PHI structure by
1940 	     forcing a forwarder block so that hoisting of that PHI will
1941 	     still work.  */
1942 	  split_edge (ex);
1943 	  break;
1944 	}
1945     }
1946 
1947   old_dest = ex->dest;
1948   new_bb = split_edge (ex);
1949   then_bb = create_empty_bb (new_bb);
1950   then_bb->count = new_bb->count.apply_probability (flag_probability);
1951   if (irr)
1952     then_bb->flags = BB_IRREDUCIBLE_LOOP;
1953   add_bb_to_loop (then_bb, new_bb->loop_father);
1954 
1955   gsi = gsi_start_bb (new_bb);
1956   stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1957 			    NULL_TREE, NULL_TREE);
1958   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1959 
1960   gsi = gsi_start_bb (then_bb);
1961   /* Insert actual store.  */
1962   stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1963   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1964 
1965   edge e1 = single_succ_edge (new_bb);
1966   edge e2 = make_edge (new_bb, then_bb,
1967 	               EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1968   e2->probability = flag_probability;
1969 
1970   e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1971   e1->flags &= ~EDGE_FALLTHRU;
1972 
1973   e1->probability = flag_probability.invert ();
1974 
1975   then_old_edge = make_single_succ_edge (then_bb, old_dest,
1976 			     EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1977 
1978   set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1979 
1980   if (prev_edges)
1981     {
1982       basic_block prevbb = prev_edges->last_cond_fallthru->src;
1983       redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1984       set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1985       set_immediate_dominator (CDI_DOMINATORS, old_dest,
1986 			       recompute_dominator (CDI_DOMINATORS, old_dest));
1987     }
1988 
1989   /* ?? Because stores may alias, they must happen in the exact
1990      sequence they originally happened.  Save the position right after
1991      the (_lsm) store we just created so we can continue appending after
1992      it and maintain the original order.  */
1993   {
1994     struct prev_flag_edges *p;
1995 
1996     if (orig_ex->aux)
1997       orig_ex->aux = NULL;
1998     alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1999     p = (struct prev_flag_edges *) orig_ex->aux;
2000     p->append_cond_position = then_old_edge;
2001     p->last_cond_fallthru = find_edge (new_bb, old_dest);
2002     orig_ex->aux = (void *) p;
2003   }
2004 
2005   if (!loop_has_only_one_exit)
2006     for (gphi_iterator gpi = gsi_start_phis (old_dest);
2007 	 !gsi_end_p (gpi); gsi_next (&gpi))
2008       {
2009 	gphi *phi = gpi.phi ();
2010 	unsigned i;
2011 
2012 	for (i = 0; i < gimple_phi_num_args (phi); i++)
2013 	  if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2014 	    {
2015 	      tree arg = gimple_phi_arg_def (phi, i);
2016 	      add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2017 	      update_stmt (phi);
2018 	    }
2019       }
2020 }
2021 
2022 /* When REF is set on the location, set flag indicating the store.  */
2023 
2024 class sm_set_flag_if_changed
2025 {
2026 public:
sm_set_flag_if_changed(tree flag_,hash_set<basic_block> * bbs_)2027   sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
2028 	 : flag (flag_), bbs (bbs_) {}
2029   bool operator () (mem_ref_loc *loc);
2030   tree flag;
2031   hash_set <basic_block> *bbs;
2032 };
2033 
2034 bool
operator()2035 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
2036 {
2037   /* Only set the flag for writes.  */
2038   if (is_gimple_assign (loc->stmt)
2039       && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2040     {
2041       gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
2042       gimple *stmt = gimple_build_assign (flag, boolean_true_node);
2043       gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2044       bbs->add (gimple_bb (stmt));
2045     }
2046   return false;
2047 }
2048 
2049 /* Helper function for execute_sm.  On every location where REF is
2050    set, set an appropriate flag indicating the store.  */
2051 
2052 static tree
execute_sm_if_changed_flag_set(class loop * loop,im_mem_ref * ref,hash_set<basic_block> * bbs)2053 execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
2054 				hash_set <basic_block> *bbs)
2055 {
2056   tree flag;
2057   char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
2058   flag = create_tmp_reg (boolean_type_node, str);
2059   for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
2060   return flag;
2061 }
2062 
2063 /* Executes store motion of memory reference REF from LOOP.
2064    Exits from the LOOP are stored in EXITS.  The initialization of the
2065    temporary variable is put to the preheader of the loop, and assignments
2066    to the reference from the temporary variable are emitted to exits.  */
2067 
2068 static void
execute_sm(class loop * loop,vec<edge> exits,im_mem_ref * ref)2069 execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
2070 {
2071   tree tmp_var, store_flag = NULL_TREE;
2072   unsigned i;
2073   gassign *load;
2074   struct fmt_data fmt_data;
2075   edge ex;
2076   struct lim_aux_data *lim_data;
2077   bool multi_threaded_model_p = false;
2078   gimple_stmt_iterator gsi;
2079   hash_set<basic_block> flag_bbs;
2080 
2081   if (dump_file && (dump_flags & TDF_DETAILS))
2082     {
2083       fprintf (dump_file, "Executing store motion of ");
2084       print_generic_expr (dump_file, ref->mem.ref);
2085       fprintf (dump_file, " from loop %d\n", loop->num);
2086     }
2087 
2088   tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2089 			    get_lsm_tmp_name (ref->mem.ref, ~0));
2090 
2091   fmt_data.loop = loop;
2092   fmt_data.orig_loop = loop;
2093   for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2094 
2095   if (bb_in_transaction (loop_preheader_edge (loop)->src)
2096       || (! flag_store_data_races
2097 	  && ! ref_always_accessed_p (loop, ref, true)))
2098     multi_threaded_model_p = true;
2099 
2100   if (multi_threaded_model_p)
2101     store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2102 
2103   rewrite_mem_refs (loop, ref, tmp_var);
2104 
2105   /* Emit the load code on a random exit edge or into the latch if
2106      the loop does not exit, so that we are sure it will be processed
2107      by move_computations after all dependencies.  */
2108   gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2109 
2110   /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2111      load altogether, since the store is predicated by a flag.  We
2112      could, do the load only if it was originally in the loop.  */
2113   load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2114   lim_data = init_lim_data (load);
2115   lim_data->max_loop = loop;
2116   lim_data->tgt_loop = loop;
2117   gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2118 
2119   if (multi_threaded_model_p)
2120     {
2121       load = gimple_build_assign (store_flag, boolean_false_node);
2122       lim_data = init_lim_data (load);
2123       lim_data->max_loop = loop;
2124       lim_data->tgt_loop = loop;
2125       gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2126     }
2127 
2128   /* Sink the store to every exit from the loop.  */
2129   FOR_EACH_VEC_ELT (exits, i, ex)
2130     if (!multi_threaded_model_p)
2131       {
2132 	gassign *store;
2133 	store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2134 	gsi_insert_on_edge (ex, store);
2135       }
2136     else
2137       execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2138 			     loop_preheader_edge (loop), &flag_bbs);
2139 }
2140 
2141 /* Hoists memory references MEM_REFS out of LOOP.  EXITS is the list of exit
2142    edges of the LOOP.  */
2143 
2144 static void
hoist_memory_references(class loop * loop,bitmap mem_refs,vec<edge> exits)2145 hoist_memory_references (class loop *loop, bitmap mem_refs,
2146 			 vec<edge> exits)
2147 {
2148   im_mem_ref *ref;
2149   unsigned  i;
2150   bitmap_iterator bi;
2151 
2152   EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2153     {
2154       ref = memory_accesses.refs_list[i];
2155       execute_sm (loop, exits, ref);
2156     }
2157 }
2158 
2159 class ref_always_accessed
2160 {
2161 public:
ref_always_accessed(class loop * loop_,bool stored_p_)2162   ref_always_accessed (class loop *loop_, bool stored_p_)
2163       : loop (loop_), stored_p (stored_p_) {}
2164   bool operator () (mem_ref_loc *loc);
2165   class loop *loop;
2166   bool stored_p;
2167 };
2168 
2169 bool
operator()2170 ref_always_accessed::operator () (mem_ref_loc *loc)
2171 {
2172   class loop *must_exec;
2173 
2174   struct lim_aux_data *lim_data = get_lim_data (loc->stmt);
2175   if (!lim_data)
2176     return false;
2177 
2178   /* If we require an always executed store make sure the statement
2179      is a store.  */
2180   if (stored_p)
2181     {
2182       tree lhs = gimple_get_lhs (loc->stmt);
2183       if (!lhs
2184 	  || !(DECL_P (lhs) || REFERENCE_CLASS_P (lhs)))
2185 	return false;
2186     }
2187 
2188   must_exec = lim_data->always_executed_in;
2189   if (!must_exec)
2190     return false;
2191 
2192   if (must_exec == loop
2193       || flow_loop_nested_p (must_exec, loop))
2194     return true;
2195 
2196   return false;
2197 }
2198 
2199 /* Returns true if REF is always accessed in LOOP.  If STORED_P is true
2200    make sure REF is always stored to in LOOP.  */
2201 
2202 static bool
ref_always_accessed_p(class loop * loop,im_mem_ref * ref,bool stored_p)2203 ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
2204 {
2205   return for_all_locs_in_loop (loop, ref,
2206 			       ref_always_accessed (loop, stored_p));
2207 }
2208 
2209 /* Returns true if REF1 and REF2 are independent.  */
2210 
2211 static bool
refs_independent_p(im_mem_ref * ref1,im_mem_ref * ref2)2212 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2213 {
2214   if (ref1 == ref2)
2215     return true;
2216 
2217   if (dump_file && (dump_flags & TDF_DETAILS))
2218     fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2219 	     ref1->id, ref2->id);
2220 
2221   if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2222     {
2223       if (dump_file && (dump_flags & TDF_DETAILS))
2224 	fprintf (dump_file, "dependent.\n");
2225       return false;
2226     }
2227   else
2228     {
2229       if (dump_file && (dump_flags & TDF_DETAILS))
2230 	fprintf (dump_file, "independent.\n");
2231       return true;
2232     }
2233 }
2234 
2235 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2236    and its super-loops.  */
2237 
2238 static void
record_dep_loop(class loop * loop,im_mem_ref * ref,bool stored_p)2239 record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
2240 {
2241   /* We can propagate dependent-in-loop bits up the loop
2242      hierarchy to all outer loops.  */
2243   while (loop != current_loops->tree_root
2244 	 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2245     loop = loop_outer (loop);
2246 }
2247 
2248 /* Returns true if REF is independent on all other memory
2249    references in LOOP.  */
2250 
2251 static bool
ref_indep_loop_p_1(class loop * loop,im_mem_ref * ref,bool stored_p)2252 ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
2253 {
2254   stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2255 
2256   bool indep_p = true;
2257   bitmap refs_to_check;
2258 
2259   if (stored_p)
2260     refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2261   else
2262     refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2263 
2264   if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2265     indep_p = false;
2266   else
2267     {
2268       if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2269 	return true;
2270       if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2271 	return false;
2272 
2273       class loop *inner = loop->inner;
2274       while (inner)
2275 	{
2276 	  if (!ref_indep_loop_p_1 (inner, ref, stored_p))
2277 	    {
2278 	      indep_p = false;
2279 	      break;
2280 	    }
2281 	  inner = inner->next;
2282 	}
2283 
2284       if (indep_p)
2285 	{
2286 	  unsigned i;
2287 	  bitmap_iterator bi;
2288 	  EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2289 	    {
2290 	      im_mem_ref *aref = memory_accesses.refs_list[i];
2291 	      if (!refs_independent_p (ref, aref))
2292 		{
2293 		  indep_p = false;
2294 		  break;
2295 		}
2296 	    }
2297 	}
2298     }
2299 
2300   if (dump_file && (dump_flags & TDF_DETAILS))
2301     fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2302 	     ref->id, loop->num, indep_p ? "independent" : "dependent");
2303 
2304   /* Record the computed result in the cache.  */
2305   if (indep_p)
2306     {
2307       if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2308 	  && stored_p)
2309 	{
2310 	  /* If it's independend against all refs then it's independent
2311 	     against stores, too.  */
2312 	  bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2313 	}
2314     }
2315   else
2316     {
2317       record_dep_loop (loop, ref, stored_p);
2318       if (!stored_p)
2319 	{
2320 	  /* If it's dependent against stores it's dependent against
2321 	     all refs, too.  */
2322 	  record_dep_loop (loop, ref, true);
2323 	}
2324     }
2325 
2326   return indep_p;
2327 }
2328 
2329 /* Returns true if REF is independent on all other memory references in
2330    LOOP.  */
2331 
2332 static bool
ref_indep_loop_p(class loop * loop,im_mem_ref * ref)2333 ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
2334 {
2335   gcc_checking_assert (MEM_ANALYZABLE (ref));
2336 
2337   return ref_indep_loop_p_1 (loop, ref, false);
2338 }
2339 
2340 /* Returns true if we can perform store motion of REF from LOOP.  */
2341 
2342 static bool
can_sm_ref_p(class loop * loop,im_mem_ref * ref)2343 can_sm_ref_p (class loop *loop, im_mem_ref *ref)
2344 {
2345   tree base;
2346 
2347   /* Can't hoist unanalyzable refs.  */
2348   if (!MEM_ANALYZABLE (ref))
2349     return false;
2350 
2351   /* It should be movable.  */
2352   if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2353       || TREE_THIS_VOLATILE (ref->mem.ref)
2354       || !for_each_index (&ref->mem.ref, may_move_till, loop))
2355     return false;
2356 
2357   /* If it can throw fail, we do not properly update EH info.  */
2358   if (tree_could_throw_p (ref->mem.ref))
2359     return false;
2360 
2361   /* If it can trap, it must be always executed in LOOP.
2362      Readonly memory locations may trap when storing to them, but
2363      tree_could_trap_p is a predicate for rvalues, so check that
2364      explicitly.  */
2365   base = get_base_address (ref->mem.ref);
2366   if ((tree_could_trap_p (ref->mem.ref)
2367        || (DECL_P (base) && TREE_READONLY (base)))
2368       && !ref_always_accessed_p (loop, ref, true))
2369     return false;
2370 
2371   /* And it must be independent on all other memory references
2372      in LOOP.  */
2373   if (!ref_indep_loop_p (loop, ref))
2374     return false;
2375 
2376   return true;
2377 }
2378 
2379 /* Marks the references in LOOP for that store motion should be performed
2380    in REFS_TO_SM.  SM_EXECUTED is the set of references for that store
2381    motion was performed in one of the outer loops.  */
2382 
2383 static void
find_refs_for_sm(class loop * loop,bitmap sm_executed,bitmap refs_to_sm)2384 find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2385 {
2386   bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2387   unsigned i;
2388   bitmap_iterator bi;
2389   im_mem_ref *ref;
2390 
2391   EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2392     {
2393       ref = memory_accesses.refs_list[i];
2394       if (can_sm_ref_p (loop, ref))
2395 	bitmap_set_bit (refs_to_sm, i);
2396     }
2397 }
2398 
2399 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2400    for a store motion optimization (i.e. whether we can insert statement
2401    on its exits).  */
2402 
2403 static bool
loop_suitable_for_sm(class loop * loop ATTRIBUTE_UNUSED,vec<edge> exits)2404 loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
2405 		      vec<edge> exits)
2406 {
2407   unsigned i;
2408   edge ex;
2409 
2410   FOR_EACH_VEC_ELT (exits, i, ex)
2411     if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2412       return false;
2413 
2414   return true;
2415 }
2416 
2417 /* Try to perform store motion for all memory references modified inside
2418    LOOP.  SM_EXECUTED is the bitmap of the memory references for that
2419    store motion was executed in one of the outer loops.  */
2420 
2421 static void
store_motion_loop(class loop * loop,bitmap sm_executed)2422 store_motion_loop (class loop *loop, bitmap sm_executed)
2423 {
2424   vec<edge> exits = get_loop_exit_edges (loop);
2425   class loop *subloop;
2426   bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2427 
2428   if (loop_suitable_for_sm (loop, exits))
2429     {
2430       find_refs_for_sm (loop, sm_executed, sm_in_loop);
2431       hoist_memory_references (loop, sm_in_loop, exits);
2432     }
2433   exits.release ();
2434 
2435   bitmap_ior_into (sm_executed, sm_in_loop);
2436   for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2437     store_motion_loop (subloop, sm_executed);
2438   bitmap_and_compl_into (sm_executed, sm_in_loop);
2439   BITMAP_FREE (sm_in_loop);
2440 }
2441 
2442 /* Try to perform store motion for all memory references modified inside
2443    loops.  */
2444 
2445 static void
store_motion(void)2446 store_motion (void)
2447 {
2448   class loop *loop;
2449   bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2450 
2451   for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2452     store_motion_loop (loop, sm_executed);
2453 
2454   BITMAP_FREE (sm_executed);
2455   gsi_commit_edge_inserts ();
2456 }
2457 
2458 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2459    for each such basic block bb records the outermost loop for that execution
2460    of its header implies execution of bb.  CONTAINS_CALL is the bitmap of
2461    blocks that contain a nonpure call.  */
2462 
2463 static void
fill_always_executed_in_1(class loop * loop,sbitmap contains_call)2464 fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
2465 {
2466   basic_block bb = NULL, *bbs, last = NULL;
2467   unsigned i;
2468   edge e;
2469   class loop *inn_loop = loop;
2470 
2471   if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2472     {
2473       bbs = get_loop_body_in_dom_order (loop);
2474 
2475       for (i = 0; i < loop->num_nodes; i++)
2476 	{
2477 	  edge_iterator ei;
2478 	  bb = bbs[i];
2479 
2480 	  if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2481 	    last = bb;
2482 
2483 	  if (bitmap_bit_p (contains_call, bb->index))
2484 	    break;
2485 
2486 	  FOR_EACH_EDGE (e, ei, bb->succs)
2487 	    {
2488 	      /* If there is an exit from this BB.  */
2489 	      if (!flow_bb_inside_loop_p (loop, e->dest))
2490 		break;
2491 	      /* Or we enter a possibly non-finite loop.  */
2492 	      if (flow_loop_nested_p (bb->loop_father,
2493 				      e->dest->loop_father)
2494 		  && ! finite_loop_p (e->dest->loop_father))
2495 		break;
2496 	    }
2497 	  if (e)
2498 	    break;
2499 
2500 	  /* A loop might be infinite (TODO use simple loop analysis
2501 	     to disprove this if possible).  */
2502 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
2503 	    break;
2504 
2505 	  if (!flow_bb_inside_loop_p (inn_loop, bb))
2506 	    break;
2507 
2508 	  if (bb->loop_father->header == bb)
2509 	    {
2510 	      if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2511 		break;
2512 
2513 	      /* In a loop that is always entered we may proceed anyway.
2514 		 But record that we entered it and stop once we leave it.  */
2515 	      inn_loop = bb->loop_father;
2516 	    }
2517 	}
2518 
2519       while (1)
2520 	{
2521 	  SET_ALWAYS_EXECUTED_IN (last, loop);
2522 	  if (last == loop->header)
2523 	    break;
2524 	  last = get_immediate_dominator (CDI_DOMINATORS, last);
2525 	}
2526 
2527       free (bbs);
2528     }
2529 
2530   for (loop = loop->inner; loop; loop = loop->next)
2531     fill_always_executed_in_1 (loop, contains_call);
2532 }
2533 
2534 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2535    for each such basic block bb records the outermost loop for that execution
2536    of its header implies execution of bb.  */
2537 
2538 static void
fill_always_executed_in(void)2539 fill_always_executed_in (void)
2540 {
2541   basic_block bb;
2542   class loop *loop;
2543 
2544   auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2545   bitmap_clear (contains_call);
2546   FOR_EACH_BB_FN (bb, cfun)
2547     {
2548       gimple_stmt_iterator gsi;
2549       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2550 	{
2551 	  if (nonpure_call_p (gsi_stmt (gsi)))
2552 	    break;
2553 	}
2554 
2555       if (!gsi_end_p (gsi))
2556 	bitmap_set_bit (contains_call, bb->index);
2557     }
2558 
2559   for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2560     fill_always_executed_in_1 (loop, contains_call);
2561 }
2562 
2563 
2564 /* Compute the global information needed by the loop invariant motion pass.  */
2565 
2566 static void
tree_ssa_lim_initialize(void)2567 tree_ssa_lim_initialize (void)
2568 {
2569   class loop *loop;
2570   unsigned i;
2571 
2572   bitmap_obstack_initialize (&lim_bitmap_obstack);
2573   gcc_obstack_init (&mem_ref_obstack);
2574   lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2575 
2576   if (flag_tm)
2577     compute_transaction_bits ();
2578 
2579   alloc_aux_for_edges (0);
2580 
2581   memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2582   memory_accesses.refs_list.create (100);
2583   /* Allocate a special, unanalyzable mem-ref with ID zero.  */
2584   memory_accesses.refs_list.quick_push
2585     (mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
2586 
2587   memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2588   memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2589   memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2590   memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2591   memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2592   memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2593 
2594   for (i = 0; i < number_of_loops (cfun); i++)
2595     {
2596       bitmap_initialize (&memory_accesses.refs_in_loop[i],
2597 			 &lim_bitmap_obstack);
2598       bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2599 			 &lim_bitmap_obstack);
2600       bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2601 			 &lim_bitmap_obstack);
2602     }
2603 
2604   memory_accesses.ttae_cache = NULL;
2605 
2606   /* Initialize bb_loop_postorder with a mapping from loop->num to
2607      its postorder index.  */
2608   i = 0;
2609   bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2610   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2611     bb_loop_postorder[loop->num] = i++;
2612 }
2613 
2614 /* Cleans up after the invariant motion pass.  */
2615 
2616 static void
tree_ssa_lim_finalize(void)2617 tree_ssa_lim_finalize (void)
2618 {
2619   basic_block bb;
2620   unsigned i;
2621   im_mem_ref *ref;
2622 
2623   free_aux_for_edges ();
2624 
2625   FOR_EACH_BB_FN (bb, cfun)
2626     SET_ALWAYS_EXECUTED_IN (bb, NULL);
2627 
2628   bitmap_obstack_release (&lim_bitmap_obstack);
2629   delete lim_aux_data_map;
2630 
2631   delete memory_accesses.refs;
2632   memory_accesses.refs = NULL;
2633 
2634   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2635     memref_free (ref);
2636   memory_accesses.refs_list.release ();
2637   obstack_free (&mem_ref_obstack, NULL);
2638 
2639   memory_accesses.refs_in_loop.release ();
2640   memory_accesses.refs_stored_in_loop.release ();
2641   memory_accesses.all_refs_stored_in_loop.release ();
2642 
2643   if (memory_accesses.ttae_cache)
2644     free_affine_expand_cache (&memory_accesses.ttae_cache);
2645 
2646   free (bb_loop_postorder);
2647 }
2648 
2649 /* Moves invariants from loops.  Only "expensive" invariants are moved out --
2650    i.e. those that are likely to be win regardless of the register pressure.  */
2651 
2652 static unsigned int
tree_ssa_lim(void)2653 tree_ssa_lim (void)
2654 {
2655   unsigned int todo;
2656 
2657   tree_ssa_lim_initialize ();
2658 
2659   /* Gathers information about memory accesses in the loops.  */
2660   analyze_memory_references ();
2661 
2662   /* Fills ALWAYS_EXECUTED_IN information for basic blocks.  */
2663   fill_always_executed_in ();
2664 
2665   /* For each statement determine the outermost loop in that it is
2666      invariant and cost for computing the invariant.  */
2667   invariantness_dom_walker (CDI_DOMINATORS)
2668     .walk (cfun->cfg->x_entry_block_ptr);
2669 
2670   /* Execute store motion.  Force the necessary invariants to be moved
2671      out of the loops as well.  */
2672   store_motion ();
2673 
2674   /* Move the expressions that are expensive enough.  */
2675   todo = move_computations ();
2676 
2677   tree_ssa_lim_finalize ();
2678 
2679   return todo;
2680 }
2681 
2682 /* Loop invariant motion pass.  */
2683 
2684 namespace {
2685 
2686 const pass_data pass_data_lim =
2687 {
2688   GIMPLE_PASS, /* type */
2689   "lim", /* name */
2690   OPTGROUP_LOOP, /* optinfo_flags */
2691   TV_LIM, /* tv_id */
2692   PROP_cfg, /* properties_required */
2693   0, /* properties_provided */
2694   0, /* properties_destroyed */
2695   0, /* todo_flags_start */
2696   0, /* todo_flags_finish */
2697 };
2698 
2699 class pass_lim : public gimple_opt_pass
2700 {
2701 public:
pass_lim(gcc::context * ctxt)2702   pass_lim (gcc::context *ctxt)
2703     : gimple_opt_pass (pass_data_lim, ctxt)
2704   {}
2705 
2706   /* opt_pass methods: */
clone()2707   opt_pass * clone () { return new pass_lim (m_ctxt); }
gate(function *)2708   virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2709   virtual unsigned int execute (function *);
2710 
2711 }; // class pass_lim
2712 
2713 unsigned int
execute(function * fun)2714 pass_lim::execute (function *fun)
2715 {
2716   bool in_loop_pipeline = scev_initialized_p ();
2717   if (!in_loop_pipeline)
2718     loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2719 
2720   if (number_of_loops (fun) <= 1)
2721     return 0;
2722   unsigned int todo = tree_ssa_lim ();
2723 
2724   if (!in_loop_pipeline)
2725     loop_optimizer_finalize ();
2726   else
2727     scev_reset ();
2728   return todo;
2729 }
2730 
2731 } // anon namespace
2732 
2733 gimple_opt_pass *
make_pass_lim(gcc::context * ctxt)2734 make_pass_lim (gcc::context *ctxt)
2735 {
2736   return new pass_lim (ctxt);
2737 }
2738 
2739 
2740