xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-outof-ssa.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /* Convert a program in SSA form into Normal form.
2    Copyright (C) 2004-2018 Free Software Foundation, Inc.
3    Contributed by Andrew Macleod <amacleod@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "ssa.h"
30 #include "memmodel.h"
31 #include "emit-rtl.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "stor-layout.h"
35 #include "cfgrtl.h"
36 #include "cfganal.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "dumpfile.h"
41 #include "tree-ssa-live.h"
42 #include "tree-ssa-ter.h"
43 #include "tree-ssa-coalesce.h"
44 #include "tree-outof-ssa.h"
45 #include "dojump.h"
46 
47 /* FIXME: A lot of code here deals with expanding to RTL.  All that code
48    should be in cfgexpand.c.  */
49 #include "explow.h"
50 #include "expr.h"
51 
52 /* Return TRUE if expression STMT is suitable for replacement.  */
53 
54 bool
55 ssa_is_replaceable_p (gimple *stmt)
56 {
57   use_operand_p use_p;
58   tree def;
59   gimple *use_stmt;
60 
61   /* Only consider modify stmts.  */
62   if (!is_gimple_assign (stmt))
63     return false;
64 
65   /* If the statement may throw an exception, it cannot be replaced.  */
66   if (stmt_could_throw_p (stmt))
67     return false;
68 
69   /* Punt if there is more than 1 def.  */
70   def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
71   if (!def)
72     return false;
73 
74   /* Only consider definitions which have a single use.  */
75   if (!single_imm_use (def, &use_p, &use_stmt))
76     return false;
77 
78   /* Used in this block, but at the TOP of the block, not the end.  */
79   if (gimple_code (use_stmt) == GIMPLE_PHI)
80     return false;
81 
82   /* There must be no VDEFs.  */
83   if (gimple_vdef (stmt))
84     return false;
85 
86   /* Float expressions must go through memory if float-store is on.  */
87   if (flag_float_store
88       && FLOAT_TYPE_P (gimple_expr_type (stmt)))
89     return false;
90 
91   /* An assignment with a register variable on the RHS is not
92      replaceable.  */
93   if (gimple_assign_rhs_code (stmt) == VAR_DECL
94       && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt)))
95     return false;
96 
97   /* No function calls can be replaced.  */
98   if (is_gimple_call (stmt))
99     return false;
100 
101   /* Leave any stmt with volatile operands alone as well.  */
102   if (gimple_has_volatile_ops (stmt))
103     return false;
104 
105   return true;
106 }
107 
108 
109 /* Used to hold all the components required to do SSA PHI elimination.
110    The node and pred/succ list is a simple linear list of nodes and
111    edges represented as pairs of nodes.
112 
113    The predecessor and successor list:  Nodes are entered in pairs, where
114    [0] ->PRED, [1]->SUCC.  All the even indexes in the array represent
115    predecessors, all the odd elements are successors.
116 
117    Rationale:
118    When implemented as bitmaps, very large programs SSA->Normal times were
119    being dominated by clearing the interference graph.
120 
121    Typically this list of edges is extremely small since it only includes
122    PHI results and uses from a single edge which have not coalesced with
123    each other.  This means that no virtual PHI nodes are included, and
124    empirical evidence suggests that the number of edges rarely exceed
125    3, and in a bootstrap of GCC, the maximum size encountered was 7.
126    This also limits the number of possible nodes that are involved to
127    rarely more than 6, and in the bootstrap of gcc, the maximum number
128    of nodes encountered was 12.  */
129 
130 struct elim_graph
131 {
132   elim_graph (var_map map);
133 
134   /* Size of the elimination vectors.  */
135   int size;
136 
137   /* List of nodes in the elimination graph.  */
138   auto_vec<int> nodes;
139 
140   /*  The predecessor and successor edge list.  */
141   auto_vec<int> edge_list;
142 
143   /* Source locus on each edge */
144   auto_vec<source_location> edge_locus;
145 
146   /* Visited vector.  */
147   auto_sbitmap visited;
148 
149   /* Stack for visited nodes.  */
150   auto_vec<int> stack;
151 
152   /* The variable partition map.  */
153   var_map map;
154 
155   /* Edge being eliminated by this graph.  */
156   edge e;
157 
158   /* List of constant copies to emit.  These are pushed on in pairs.  */
159   auto_vec<int> const_dests;
160   auto_vec<tree> const_copies;
161 
162   /* Source locations for any constant copies.  */
163   auto_vec<source_location> copy_locus;
164 };
165 
166 
167 /* For an edge E find out a good source location to associate with
168    instructions inserted on edge E.  If E has an implicit goto set,
169    use its location.  Otherwise search instructions in predecessors
170    of E for a location, and use that one.  That makes sense because
171    we insert on edges for PHI nodes, and effects of PHIs happen on
172    the end of the predecessor conceptually.  */
173 
174 static void
175 set_location_for_edge (edge e)
176 {
177   if (e->goto_locus)
178     {
179       set_curr_insn_location (e->goto_locus);
180     }
181   else
182     {
183       basic_block bb = e->src;
184       gimple_stmt_iterator gsi;
185 
186       do
187 	{
188 	  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
189 	    {
190 	      gimple *stmt = gsi_stmt (gsi);
191 	      if (is_gimple_debug (stmt))
192 		continue;
193 	      if (gimple_has_location (stmt) || gimple_block (stmt))
194 		{
195 		  set_curr_insn_location (gimple_location (stmt));
196 		  return;
197 		}
198 	    }
199 	  /* Nothing found in this basic block.  Make a half-assed attempt
200 	     to continue with another block.  */
201 	  if (single_pred_p (bb))
202 	    bb = single_pred (bb);
203 	  else
204 	    bb = e->src;
205 	}
206       while (bb != e->src);
207     }
208 }
209 
210 /* Emit insns to copy SRC into DEST converting SRC if necessary.  As
211    SRC/DEST might be BLKmode memory locations SIZEEXP is a tree from
212    which we deduce the size to copy in that case.  */
213 
214 static inline rtx_insn *
215 emit_partition_copy (rtx dest, rtx src, int unsignedsrcp, tree sizeexp)
216 {
217   start_sequence ();
218 
219   if (GET_MODE (src) != VOIDmode && GET_MODE (src) != GET_MODE (dest))
220     src = convert_to_mode (GET_MODE (dest), src, unsignedsrcp);
221   if (GET_MODE (src) == BLKmode)
222     {
223       gcc_assert (GET_MODE (dest) == BLKmode);
224       emit_block_move (dest, src, expr_size (sizeexp), BLOCK_OP_NORMAL);
225     }
226   else
227     emit_move_insn (dest, src);
228   do_pending_stack_adjust ();
229 
230   rtx_insn *seq = get_insns ();
231   end_sequence ();
232 
233   return seq;
234 }
235 
236 /* Insert a copy instruction from partition SRC to DEST onto edge E.  */
237 
238 static void
239 insert_partition_copy_on_edge (edge e, int dest, int src, source_location locus)
240 {
241   tree var;
242   if (dump_file && (dump_flags & TDF_DETAILS))
243     {
244       fprintf (dump_file,
245 	       "Inserting a partition copy on edge BB%d->BB%d : "
246 	       "PART.%d = PART.%d",
247 	       e->src->index,
248 	       e->dest->index, dest, src);
249       fprintf (dump_file, "\n");
250     }
251 
252   gcc_assert (SA.partition_to_pseudo[dest]);
253   gcc_assert (SA.partition_to_pseudo[src]);
254 
255   set_location_for_edge (e);
256   /* If a locus is provided, override the default.  */
257   if (locus)
258     set_curr_insn_location (locus);
259 
260   var = partition_to_var (SA.map, src);
261   rtx_insn *seq = emit_partition_copy (copy_rtx (SA.partition_to_pseudo[dest]),
262 				       copy_rtx (SA.partition_to_pseudo[src]),
263 				       TYPE_UNSIGNED (TREE_TYPE (var)),
264 				       var);
265 
266   insert_insn_on_edge (seq, e);
267 }
268 
269 /* Insert a copy instruction from expression SRC to partition DEST
270    onto edge E.  */
271 
272 static void
273 insert_value_copy_on_edge (edge e, int dest, tree src, source_location locus)
274 {
275   rtx dest_rtx, seq, x;
276   machine_mode dest_mode, src_mode;
277   int unsignedp;
278 
279   if (dump_file && (dump_flags & TDF_DETAILS))
280     {
281       fprintf (dump_file,
282 	       "Inserting a value copy on edge BB%d->BB%d : PART.%d = ",
283 	       e->src->index,
284 	       e->dest->index, dest);
285       print_generic_expr (dump_file, src, TDF_SLIM);
286       fprintf (dump_file, "\n");
287     }
288 
289   dest_rtx = copy_rtx (SA.partition_to_pseudo[dest]);
290   gcc_assert (dest_rtx);
291 
292   set_location_for_edge (e);
293   /* If a locus is provided, override the default.  */
294   if (locus)
295     set_curr_insn_location (locus);
296 
297   start_sequence ();
298 
299   tree name = partition_to_var (SA.map, dest);
300   src_mode = TYPE_MODE (TREE_TYPE (src));
301   dest_mode = GET_MODE (dest_rtx);
302   gcc_assert (src_mode == TYPE_MODE (TREE_TYPE (name)));
303   gcc_assert (!REG_P (dest_rtx)
304 	      || dest_mode == promote_ssa_mode (name, &unsignedp));
305 
306   if (src_mode != dest_mode)
307     {
308       x = expand_expr (src, NULL, src_mode, EXPAND_NORMAL);
309       x = convert_modes (dest_mode, src_mode, x, unsignedp);
310     }
311   else if (src_mode == BLKmode)
312     {
313       x = dest_rtx;
314       store_expr (src, x, 0, false, false);
315     }
316   else
317     x = expand_expr (src, dest_rtx, dest_mode, EXPAND_NORMAL);
318 
319   if (x != dest_rtx)
320     emit_move_insn (dest_rtx, x);
321   do_pending_stack_adjust ();
322 
323   seq = get_insns ();
324   end_sequence ();
325 
326   insert_insn_on_edge (seq, e);
327 }
328 
329 /* Insert a copy instruction from RTL expression SRC to partition DEST
330    onto edge E.  */
331 
332 static void
333 insert_rtx_to_part_on_edge (edge e, int dest, rtx src, int unsignedsrcp,
334 			    source_location locus)
335 {
336   if (dump_file && (dump_flags & TDF_DETAILS))
337     {
338       fprintf (dump_file,
339 	       "Inserting a temp copy on edge BB%d->BB%d : PART.%d = ",
340 	       e->src->index,
341 	       e->dest->index, dest);
342       print_simple_rtl (dump_file, src);
343       fprintf (dump_file, "\n");
344     }
345 
346   gcc_assert (SA.partition_to_pseudo[dest]);
347 
348   set_location_for_edge (e);
349   /* If a locus is provided, override the default.  */
350   if (locus)
351     set_curr_insn_location (locus);
352 
353   /* We give the destination as sizeexp in case src/dest are BLKmode
354      mems.  Usually we give the source.  As we result from SSA names
355      the left and right size should be the same (and no WITH_SIZE_EXPR
356      involved), so it doesn't matter.  */
357   rtx_insn *seq = emit_partition_copy (copy_rtx (SA.partition_to_pseudo[dest]),
358 				       src, unsignedsrcp,
359 				       partition_to_var (SA.map, dest));
360 
361   insert_insn_on_edge (seq, e);
362 }
363 
364 /* Insert a copy instruction from partition SRC to RTL lvalue DEST
365    onto edge E.  */
366 
367 static void
368 insert_part_to_rtx_on_edge (edge e, rtx dest, int src, source_location locus)
369 {
370   tree var;
371   if (dump_file && (dump_flags & TDF_DETAILS))
372     {
373       fprintf (dump_file,
374 	       "Inserting a temp copy on edge BB%d->BB%d : ",
375 	       e->src->index,
376 	       e->dest->index);
377       print_simple_rtl (dump_file, dest);
378       fprintf (dump_file, "= PART.%d\n", src);
379     }
380 
381   gcc_assert (SA.partition_to_pseudo[src]);
382 
383   set_location_for_edge (e);
384   /* If a locus is provided, override the default.  */
385   if (locus)
386     set_curr_insn_location (locus);
387 
388   var = partition_to_var (SA.map, src);
389   rtx_insn *seq = emit_partition_copy (dest,
390 				       copy_rtx (SA.partition_to_pseudo[src]),
391 				       TYPE_UNSIGNED (TREE_TYPE (var)),
392 				       var);
393 
394   insert_insn_on_edge (seq, e);
395 }
396 
397 
398 /* Create an elimination graph for map.  */
399 
400 elim_graph::elim_graph (var_map map) :
401   nodes (30), edge_list (20), edge_locus (10), visited (map->num_partitions),
402   stack (30), map (map), const_dests (20), const_copies (20), copy_locus (10)
403 {
404 }
405 
406 
407 /* Empty elimination graph G.  */
408 
409 static inline void
410 clear_elim_graph (elim_graph *g)
411 {
412   g->nodes.truncate (0);
413   g->edge_list.truncate (0);
414   g->edge_locus.truncate (0);
415 }
416 
417 
418 /* Return the number of nodes in graph G.  */
419 
420 static inline int
421 elim_graph_size (elim_graph *g)
422 {
423   return g->nodes.length ();
424 }
425 
426 
427 /* Add NODE to graph G, if it doesn't exist already.  */
428 
429 static inline void
430 elim_graph_add_node (elim_graph *g, int node)
431 {
432   int x;
433   int t;
434 
435   FOR_EACH_VEC_ELT (g->nodes, x, t)
436     if (t == node)
437       return;
438   g->nodes.safe_push (node);
439 }
440 
441 
442 /* Add the edge PRED->SUCC to graph G.  */
443 
444 static inline void
445 elim_graph_add_edge (elim_graph *g, int pred, int succ, source_location locus)
446 {
447   g->edge_list.safe_push (pred);
448   g->edge_list.safe_push (succ);
449   g->edge_locus.safe_push (locus);
450 }
451 
452 
453 /* Remove an edge from graph G for which NODE is the predecessor, and
454    return the successor node.  -1 is returned if there is no such edge.  */
455 
456 static inline int
457 elim_graph_remove_succ_edge (elim_graph *g, int node, source_location *locus)
458 {
459   int y;
460   unsigned x;
461   for (x = 0; x < g->edge_list.length (); x += 2)
462     if (g->edge_list[x] == node)
463       {
464         g->edge_list[x] = -1;
465 	y = g->edge_list[x + 1];
466 	g->edge_list[x + 1] = -1;
467 	*locus = g->edge_locus[x / 2];
468 	g->edge_locus[x / 2] = UNKNOWN_LOCATION;
469 	return y;
470       }
471   *locus = UNKNOWN_LOCATION;
472   return -1;
473 }
474 
475 
476 /* Find all the nodes in GRAPH which are successors to NODE in the
477    edge list.  VAR will hold the partition number found.  CODE is the
478    code fragment executed for every node found.  */
479 
480 #define FOR_EACH_ELIM_GRAPH_SUCC(GRAPH, NODE, VAR, LOCUS, CODE)		\
481 do {									\
482   unsigned x_;								\
483   int y_;								\
484   for (x_ = 0; x_ < (GRAPH)->edge_list.length (); x_ += 2)	\
485     {									\
486       y_ = (GRAPH)->edge_list[x_];					\
487       if (y_ != (NODE))							\
488         continue;							\
489       (void) ((VAR) = (GRAPH)->edge_list[x_ + 1]);			\
490       (void) ((LOCUS) = (GRAPH)->edge_locus[x_ / 2]);			\
491       CODE;								\
492     }									\
493 } while (0)
494 
495 
496 /* Find all the nodes which are predecessors of NODE in the edge list for
497    GRAPH.  VAR will hold the partition number found.  CODE is the
498    code fragment executed for every node found.  */
499 
500 #define FOR_EACH_ELIM_GRAPH_PRED(GRAPH, NODE, VAR, LOCUS, CODE)		\
501 do {									\
502   unsigned x_;								\
503   int y_;								\
504   for (x_ = 0; x_ < (GRAPH)->edge_list.length (); x_ += 2)	\
505     {									\
506       y_ = (GRAPH)->edge_list[x_ + 1];					\
507       if (y_ != (NODE))							\
508         continue;							\
509       (void) ((VAR) = (GRAPH)->edge_list[x_]);				\
510       (void) ((LOCUS) = (GRAPH)->edge_locus[x_ / 2]);			\
511       CODE;								\
512     }									\
513 } while (0)
514 
515 
516 /* Add T to elimination graph G.  */
517 
518 static inline void
519 eliminate_name (elim_graph *g, int T)
520 {
521   elim_graph_add_node (g, T);
522 }
523 
524 /* Return true if this phi argument T should have a copy queued when using
525    var_map MAP.  PHI nodes should contain only ssa_names and invariants.  A
526    test for ssa_name is definitely simpler, but don't let invalid contents
527    slip through in the meantime.  */
528 
529 static inline bool
530 queue_phi_copy_p (var_map map, tree t)
531 {
532   if (TREE_CODE (t) == SSA_NAME)
533     {
534       if (var_to_partition (map, t) == NO_PARTITION)
535         return true;
536       return false;
537     }
538   gcc_checking_assert (is_gimple_min_invariant (t));
539   return true;
540 }
541 
542 /* Build elimination graph G for basic block BB on incoming PHI edge
543    G->e.  */
544 
545 static void
546 eliminate_build (elim_graph *g)
547 {
548   tree Ti;
549   int p0, pi;
550   gphi_iterator gsi;
551 
552   clear_elim_graph (g);
553 
554   for (gsi = gsi_start_phis (g->e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
555     {
556       gphi *phi = gsi.phi ();
557       source_location locus;
558 
559       p0 = var_to_partition (g->map, gimple_phi_result (phi));
560       /* Ignore results which are not in partitions.  */
561       if (p0 == NO_PARTITION)
562 	continue;
563 
564       Ti = PHI_ARG_DEF (phi, g->e->dest_idx);
565       locus = gimple_phi_arg_location_from_edge (phi, g->e);
566 
567       /* If this argument is a constant, or a SSA_NAME which is being
568 	 left in SSA form, just queue a copy to be emitted on this
569 	 edge.  */
570       if (queue_phi_copy_p (g->map, Ti))
571         {
572 	  /* Save constant copies until all other copies have been emitted
573 	     on this edge.  */
574 	  g->const_dests.safe_push (p0);
575 	  g->const_copies.safe_push (Ti);
576 	  g->copy_locus.safe_push (locus);
577 	}
578       else
579         {
580 	  pi = var_to_partition (g->map, Ti);
581 	  if (p0 != pi)
582 	    {
583 	      eliminate_name (g, p0);
584 	      eliminate_name (g, pi);
585 	      elim_graph_add_edge (g, p0, pi, locus);
586 	    }
587 	}
588     }
589 }
590 
591 
592 /* Push successors of T onto the elimination stack for G.  */
593 
594 static void
595 elim_forward (elim_graph *g, int T)
596 {
597   int S;
598   source_location locus;
599 
600   bitmap_set_bit (g->visited, T);
601   FOR_EACH_ELIM_GRAPH_SUCC (g, T, S, locus,
602     {
603       if (!bitmap_bit_p (g->visited, S))
604         elim_forward (g, S);
605     });
606   g->stack.safe_push (T);
607 }
608 
609 
610 /* Return 1 if there unvisited predecessors of T in graph G.  */
611 
612 static int
613 elim_unvisited_predecessor (elim_graph *g, int T)
614 {
615   int P;
616   source_location locus;
617 
618   FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
619     {
620       if (!bitmap_bit_p (g->visited, P))
621         return 1;
622     });
623   return 0;
624 }
625 
626 /* Process predecessors first, and insert a copy.  */
627 
628 static void
629 elim_backward (elim_graph *g, int T)
630 {
631   int P;
632   source_location locus;
633 
634   bitmap_set_bit (g->visited, T);
635   FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
636     {
637       if (!bitmap_bit_p (g->visited, P))
638         {
639 	  elim_backward (g, P);
640 	  insert_partition_copy_on_edge (g->e, P, T, locus);
641 	}
642     });
643 }
644 
645 /* Allocate a new pseudo register usable for storing values sitting
646    in NAME (a decl or SSA name), i.e. with matching mode and attributes.  */
647 
648 static rtx
649 get_temp_reg (tree name)
650 {
651   tree type = TREE_TYPE (name);
652   int unsignedp;
653   machine_mode reg_mode = promote_ssa_mode (name, &unsignedp);
654   if (reg_mode == BLKmode)
655     return assign_temp (type, 0, 0);
656   rtx x = gen_reg_rtx (reg_mode);
657   if (POINTER_TYPE_P (type))
658     mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (type)));
659   return x;
660 }
661 
662 /* Insert required copies for T in graph G.  Check for a strongly connected
663    region, and create a temporary to break the cycle if one is found.  */
664 
665 static void
666 elim_create (elim_graph *g, int T)
667 {
668   int P, S;
669   source_location locus;
670 
671   if (elim_unvisited_predecessor (g, T))
672     {
673       tree var = partition_to_var (g->map, T);
674       rtx U = get_temp_reg (var);
675       int unsignedsrcp = TYPE_UNSIGNED (TREE_TYPE (var));
676 
677       insert_part_to_rtx_on_edge (g->e, U, T, UNKNOWN_LOCATION);
678       FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
679 	{
680 	  if (!bitmap_bit_p (g->visited, P))
681 	    {
682 	      elim_backward (g, P);
683 	      insert_rtx_to_part_on_edge (g->e, P, U, unsignedsrcp, locus);
684 	    }
685 	});
686     }
687   else
688     {
689       S = elim_graph_remove_succ_edge (g, T, &locus);
690       if (S != -1)
691 	{
692 	  bitmap_set_bit (g->visited, T);
693 	  insert_partition_copy_on_edge (g->e, T, S, locus);
694 	}
695     }
696 }
697 
698 
699 /* Eliminate all the phi nodes on edge E in graph G.  */
700 
701 static void
702 eliminate_phi (edge e, elim_graph *g)
703 {
704   int x;
705 
706   gcc_assert (g->const_copies.length () == 0);
707   gcc_assert (g->copy_locus.length () == 0);
708 
709   /* Abnormal edges already have everything coalesced.  */
710   if (e->flags & EDGE_ABNORMAL)
711     return;
712 
713   g->e = e;
714 
715   eliminate_build (g);
716 
717   if (elim_graph_size (g) != 0)
718     {
719       int part;
720 
721       bitmap_clear (g->visited);
722       g->stack.truncate (0);
723 
724       FOR_EACH_VEC_ELT (g->nodes, x, part)
725         {
726 	  if (!bitmap_bit_p (g->visited, part))
727 	    elim_forward (g, part);
728 	}
729 
730       bitmap_clear (g->visited);
731       while (g->stack.length () > 0)
732 	{
733 	  x = g->stack.pop ();
734 	  if (!bitmap_bit_p (g->visited, x))
735 	    elim_create (g, x);
736 	}
737     }
738 
739   /* If there are any pending constant copies, issue them now.  */
740   while (g->const_copies.length () > 0)
741     {
742       int dest;
743       tree src;
744       source_location locus;
745 
746       src = g->const_copies.pop ();
747       dest = g->const_dests.pop ();
748       locus = g->copy_locus.pop ();
749       insert_value_copy_on_edge (e, dest, src, locus);
750     }
751 }
752 
753 
754 /* Remove each argument from PHI.  If an arg was the last use of an SSA_NAME,
755    check to see if this allows another PHI node to be removed.  */
756 
757 static void
758 remove_gimple_phi_args (gphi *phi)
759 {
760   use_operand_p arg_p;
761   ssa_op_iter iter;
762 
763   if (dump_file && (dump_flags & TDF_DETAILS))
764     {
765       fprintf (dump_file, "Removing Dead PHI definition: ");
766       print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
767     }
768 
769   FOR_EACH_PHI_ARG (arg_p, phi, iter, SSA_OP_USE)
770     {
771       tree arg = USE_FROM_PTR (arg_p);
772       if (TREE_CODE (arg) == SSA_NAME)
773         {
774 	  /* Remove the reference to the existing argument.  */
775 	  SET_USE (arg_p, NULL_TREE);
776 	  if (has_zero_uses (arg))
777 	    {
778 	      gimple *stmt;
779 	      gimple_stmt_iterator gsi;
780 
781 	      stmt = SSA_NAME_DEF_STMT (arg);
782 
783 	      /* Also remove the def if it is a PHI node.  */
784 	      if (gimple_code (stmt) == GIMPLE_PHI)
785 		{
786 		  remove_gimple_phi_args (as_a <gphi *> (stmt));
787 		  gsi = gsi_for_stmt (stmt);
788 		  remove_phi_node (&gsi, true);
789 		}
790 
791 	    }
792 	}
793     }
794 }
795 
796 /* Remove any PHI node which is a virtual PHI, or a PHI with no uses.  */
797 
798 static void
799 eliminate_useless_phis (void)
800 {
801   basic_block bb;
802   gphi_iterator gsi;
803   tree result;
804 
805   FOR_EACH_BB_FN (bb, cfun)
806     {
807       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
808         {
809 	  gphi *phi = gsi.phi ();
810 	  result = gimple_phi_result (phi);
811 	  if (virtual_operand_p (result))
812 	    {
813 	      /* There should be no arguments which are not virtual, or the
814 	         results will be incorrect.  */
815 	      if (flag_checking)
816 		for (size_t i = 0; i < gimple_phi_num_args (phi); i++)
817 		  {
818 		    tree arg = PHI_ARG_DEF (phi, i);
819 		    if (TREE_CODE (arg) == SSA_NAME
820 			&& !virtual_operand_p (arg))
821 		      {
822 			fprintf (stderr, "Argument of PHI is not virtual (");
823 			print_generic_expr (stderr, arg, TDF_SLIM);
824 			fprintf (stderr, "), but the result is :");
825 			print_gimple_stmt (stderr, phi, 0, TDF_SLIM);
826 			internal_error ("SSA corruption");
827 		      }
828 		  }
829 
830 	      remove_phi_node (&gsi, true);
831 	    }
832           else
833 	    {
834 	      /* Also remove real PHIs with no uses.  */
835 	      if (has_zero_uses (result))
836 	        {
837 		  remove_gimple_phi_args (phi);
838 		  remove_phi_node (&gsi, true);
839 		}
840 	      else
841 		gsi_next (&gsi);
842 	    }
843 	}
844     }
845 }
846 
847 
848 /* This function will rewrite the current program using the variable mapping
849    found in MAP.  If the replacement vector VALUES is provided, any
850    occurrences of partitions with non-null entries in the vector will be
851    replaced with the expression in the vector instead of its mapped
852    variable.  */
853 
854 static void
855 rewrite_trees (var_map map)
856 {
857   if (!flag_checking)
858     return;
859 
860   basic_block bb;
861   /* Search for PHIs where the destination has no partition, but one
862      or more arguments has a partition.  This should not happen and can
863      create incorrect code.  */
864   FOR_EACH_BB_FN (bb, cfun)
865     {
866       gphi_iterator gsi;
867       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
868 	{
869 	  gphi *phi = gsi.phi ();
870 	  tree T0 = var_to_partition_to_var (map, gimple_phi_result (phi));
871 	  if (T0 == NULL_TREE)
872 	    {
873 	      size_t i;
874 	      for (i = 0; i < gimple_phi_num_args (phi); i++)
875 		{
876 		  tree arg = PHI_ARG_DEF (phi, i);
877 
878 		  if (TREE_CODE (arg) == SSA_NAME
879 		      && var_to_partition (map, arg) != NO_PARTITION)
880 		    {
881 		      fprintf (stderr, "Argument of PHI is in a partition :(");
882 		      print_generic_expr (stderr, arg, TDF_SLIM);
883 		      fprintf (stderr, "), but the result is not :");
884 		      print_gimple_stmt (stderr, phi, 0, TDF_SLIM);
885 		      internal_error ("SSA corruption");
886 		    }
887 		}
888 	    }
889 	}
890     }
891 }
892 
893 /* Given the out-of-ssa info object SA (with prepared partitions)
894    eliminate all phi nodes in all basic blocks.  Afterwards no
895    basic block will have phi nodes anymore and there are possibly
896    some RTL instructions inserted on edges.  */
897 
898 void
899 expand_phi_nodes (struct ssaexpand *sa)
900 {
901   basic_block bb;
902   elim_graph g (sa->map);
903 
904   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
905 		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
906     if (!gimple_seq_empty_p (phi_nodes (bb)))
907       {
908 	edge e;
909 	edge_iterator ei;
910 	FOR_EACH_EDGE (e, ei, bb->preds)
911 	  eliminate_phi (e, &g);
912 	set_phi_nodes (bb, NULL);
913 	/* We can't redirect EH edges in RTL land, so we need to do this
914 	   here.  Redirection happens only when splitting is necessary,
915 	   which it is only for critical edges, normally.  For EH edges
916 	   it might also be necessary when the successor has more than
917 	   one predecessor.  In that case the edge is either required to
918 	   be fallthru (which EH edges aren't), or the predecessor needs
919 	   to end with a jump (which again, isn't the case with EH edges).
920 	   Hence, split all EH edges on which we inserted instructions
921 	   and whose successor has multiple predecessors.  */
922 	for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
923 	  {
924 	    if (e->insns.r && (e->flags & EDGE_EH)
925 		&& !single_pred_p (e->dest))
926 	      {
927 		rtx_insn *insns = e->insns.r;
928 		basic_block bb;
929 		e->insns.r = NULL;
930 		bb = split_edge (e);
931 		single_pred_edge (bb)->insns.r = insns;
932 	      }
933 	    else
934 	      ei_next (&ei);
935 	  }
936       }
937 }
938 
939 
940 /* Remove the ssa-names in the current function and translate them into normal
941    compiler variables.  PERFORM_TER is true if Temporary Expression Replacement
942    should also be used.  */
943 
944 static void
945 remove_ssa_form (bool perform_ter, struct ssaexpand *sa)
946 {
947   bitmap values = NULL;
948   var_map map;
949 
950   map = coalesce_ssa_name ();
951 
952   /* Return to viewing the variable list as just all reference variables after
953      coalescing has been performed.  */
954   partition_view_normal (map);
955 
956   if (dump_file && (dump_flags & TDF_DETAILS))
957     {
958       fprintf (dump_file, "After Coalescing:\n");
959       dump_var_map (dump_file, map);
960     }
961 
962   if (perform_ter)
963     {
964       values = find_replaceable_exprs (map);
965       if (values && dump_file && (dump_flags & TDF_DETAILS))
966 	dump_replaceable_exprs (dump_file, values);
967     }
968 
969   rewrite_trees (map);
970 
971   sa->map = map;
972   sa->values = values;
973   sa->partitions_for_parm_default_defs = get_parm_default_def_partitions (map);
974   sa->partitions_for_undefined_values = get_undefined_value_partitions (map);
975 }
976 
977 
978 /* If not already done so for basic block BB, assign increasing uids
979    to each of its instructions.  */
980 
981 static void
982 maybe_renumber_stmts_bb (basic_block bb)
983 {
984   unsigned i = 0;
985   gimple_stmt_iterator gsi;
986 
987   if (!bb->aux)
988     return;
989   bb->aux = NULL;
990   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
991     {
992       gimple *stmt = gsi_stmt (gsi);
993       gimple_set_uid (stmt, i);
994       i++;
995     }
996 }
997 
998 
999 /* Return true if we can determine that the SSA_NAMEs RESULT (a result
1000    of a PHI node) and ARG (one of its arguments) conflict.  Return false
1001    otherwise, also when we simply aren't sure.  */
1002 
1003 static bool
1004 trivially_conflicts_p (basic_block bb, tree result, tree arg)
1005 {
1006   use_operand_p use;
1007   imm_use_iterator imm_iter;
1008   gimple *defa = SSA_NAME_DEF_STMT (arg);
1009 
1010   /* If ARG isn't defined in the same block it's too complicated for
1011      our little mind.  */
1012   if (gimple_bb (defa) != bb)
1013     return false;
1014 
1015   FOR_EACH_IMM_USE_FAST (use, imm_iter, result)
1016     {
1017       gimple *use_stmt = USE_STMT (use);
1018       if (is_gimple_debug (use_stmt))
1019 	continue;
1020       /* Now, if there's a use of RESULT that lies outside this basic block,
1021 	 then there surely is a conflict with ARG.  */
1022       if (gimple_bb (use_stmt) != bb)
1023 	return true;
1024       if (gimple_code (use_stmt) == GIMPLE_PHI)
1025 	continue;
1026       /* The use now is in a real stmt of BB, so if ARG was defined
1027          in a PHI node (like RESULT) both conflict.  */
1028       if (gimple_code (defa) == GIMPLE_PHI)
1029 	return true;
1030       maybe_renumber_stmts_bb (bb);
1031       /* If the use of RESULT occurs after the definition of ARG,
1032          the two conflict too.  */
1033       if (gimple_uid (defa) < gimple_uid (use_stmt))
1034 	return true;
1035     }
1036 
1037   return false;
1038 }
1039 
1040 
1041 /* Search every PHI node for arguments associated with backedges which
1042    we can trivially determine will need a copy (the argument is either
1043    not an SSA_NAME or the argument has a different underlying variable
1044    than the PHI result).
1045 
1046    Insert a copy from the PHI argument to a new destination at the
1047    end of the block with the backedge to the top of the loop.  Update
1048    the PHI argument to reference this new destination.  */
1049 
1050 static void
1051 insert_backedge_copies (void)
1052 {
1053   basic_block bb;
1054   gphi_iterator gsi;
1055 
1056   mark_dfs_back_edges ();
1057 
1058   FOR_EACH_BB_FN (bb, cfun)
1059     {
1060       /* Mark block as possibly needing calculation of UIDs.  */
1061       bb->aux = &bb->aux;
1062 
1063       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1064 	{
1065 	  gphi *phi = gsi.phi ();
1066 	  tree result = gimple_phi_result (phi);
1067 	  size_t i;
1068 
1069 	  if (virtual_operand_p (result))
1070 	    continue;
1071 
1072 	  for (i = 0; i < gimple_phi_num_args (phi); i++)
1073 	    {
1074 	      tree arg = gimple_phi_arg_def (phi, i);
1075 	      edge e = gimple_phi_arg_edge (phi, i);
1076 
1077 	      /* If the argument is not an SSA_NAME, then we will need a
1078 		 constant initialization.  If the argument is an SSA_NAME with
1079 		 a different underlying variable then a copy statement will be
1080 		 needed.  */
1081 	      if ((e->flags & EDGE_DFS_BACK)
1082 		  && (TREE_CODE (arg) != SSA_NAME
1083 		      || SSA_NAME_VAR (arg) != SSA_NAME_VAR (result)
1084 		      || trivially_conflicts_p (bb, result, arg)))
1085 		{
1086 		  tree name;
1087 		  gassign *stmt;
1088 		  gimple *last = NULL;
1089 		  gimple_stmt_iterator gsi2;
1090 
1091 		  gsi2 = gsi_last_bb (gimple_phi_arg_edge (phi, i)->src);
1092 		  if (!gsi_end_p (gsi2))
1093 		    last = gsi_stmt (gsi2);
1094 
1095 		  /* In theory the only way we ought to get back to the
1096 		     start of a loop should be with a COND_EXPR or GOTO_EXPR.
1097 		     However, better safe than sorry.
1098 		     If the block ends with a control statement or
1099 		     something that might throw, then we have to
1100 		     insert this assignment before the last
1101 		     statement.  Else insert it after the last statement.  */
1102 		  if (last && stmt_ends_bb_p (last))
1103 		    {
1104 		      /* If the last statement in the block is the definition
1105 			 site of the PHI argument, then we can't insert
1106 			 anything after it.  */
1107 		      if (TREE_CODE (arg) == SSA_NAME
1108 			  && SSA_NAME_DEF_STMT (arg) == last)
1109 			continue;
1110 		    }
1111 
1112 		  /* Create a new instance of the underlying variable of the
1113 		     PHI result.  */
1114 		  name = copy_ssa_name (result);
1115 		  stmt = gimple_build_assign (name,
1116 					      gimple_phi_arg_def (phi, i));
1117 
1118 		  /* copy location if present.  */
1119 		  if (gimple_phi_arg_has_location (phi, i))
1120 		    gimple_set_location (stmt,
1121 					 gimple_phi_arg_location (phi, i));
1122 
1123 		  /* Insert the new statement into the block and update
1124 		     the PHI node.  */
1125 		  if (last && stmt_ends_bb_p (last))
1126 		    gsi_insert_before (&gsi2, stmt, GSI_NEW_STMT);
1127 		  else
1128 		    gsi_insert_after (&gsi2, stmt, GSI_NEW_STMT);
1129 		  SET_PHI_ARG_DEF (phi, i, name);
1130 		}
1131 	    }
1132 	}
1133 
1134       /* Unmark this block again.  */
1135       bb->aux = NULL;
1136     }
1137 }
1138 
1139 /* Free all memory associated with going out of SSA form.  SA is
1140    the outof-SSA info object.  */
1141 
1142 void
1143 finish_out_of_ssa (struct ssaexpand *sa)
1144 {
1145   free (sa->partition_to_pseudo);
1146   if (sa->values)
1147     BITMAP_FREE (sa->values);
1148   delete_var_map (sa->map);
1149   BITMAP_FREE (sa->partitions_for_parm_default_defs);
1150   BITMAP_FREE (sa->partitions_for_undefined_values);
1151   memset (sa, 0, sizeof *sa);
1152 }
1153 
1154 /* Take the current function out of SSA form, translating PHIs as described in
1155    R. Morgan, ``Building an Optimizing Compiler'',
1156    Butterworth-Heinemann, Boston, MA, 1998. pp 176-186.  */
1157 
1158 unsigned int
1159 rewrite_out_of_ssa (struct ssaexpand *sa)
1160 {
1161   /* If elimination of a PHI requires inserting a copy on a backedge,
1162      then we will have to split the backedge which has numerous
1163      undesirable performance effects.
1164 
1165      A significant number of such cases can be handled here by inserting
1166      copies into the loop itself.  */
1167   insert_backedge_copies ();
1168 
1169 
1170   /* Eliminate PHIs which are of no use, such as virtual or dead phis.  */
1171   eliminate_useless_phis ();
1172 
1173   if (dump_file && (dump_flags & TDF_DETAILS))
1174     gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS);
1175 
1176   remove_ssa_form (flag_tree_ter, sa);
1177 
1178   if (dump_file && (dump_flags & TDF_DETAILS))
1179     gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS);
1180 
1181   return 0;
1182 }
1183