xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/mode-switching.c (revision fa28c6faa16e0b00edee7acdcaf4899797043def)
1 /* CPU mode switching
2    Copyright (C) 1998-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "flags.h"
29 #include "insn-config.h"
30 #include "recog.h"
31 #include "basic-block.h"
32 #include "tm_p.h"
33 #include "function.h"
34 #include "tree-pass.h"
35 #include "df.h"
36 #include "emit-rtl.h"
37 
38 /* We want target macros for the mode switching code to be able to refer
39    to instruction attribute values.  */
40 #include "insn-attr.h"
41 
42 #ifdef OPTIMIZE_MODE_SWITCHING
43 
44 /* The algorithm for setting the modes consists of scanning the insn list
45    and finding all the insns which require a specific mode.  Each insn gets
46    a unique struct seginfo element.  These structures are inserted into a list
47    for each basic block.  For each entity, there is an array of bb_info over
48    the flow graph basic blocks (local var 'bb_info'), and contains a list
49    of all insns within that basic block, in the order they are encountered.
50 
51    For each entity, any basic block WITHOUT any insns requiring a specific
52    mode are given a single entry, without a mode.  (Each basic block
53    in the flow graph must have at least one entry in the segment table.)
54 
55    The LCM algorithm is then run over the flow graph to determine where to
56    place the sets to the highest-priority value in respect of first the first
57    insn in any one block.  Any adjustments required to the transparency
58    vectors are made, then the next iteration starts for the next-lower
59    priority mode, till for each entity all modes are exhausted.
60 
61    More details are located in the code for optimize_mode_switching().  */
62 
63 /* This structure contains the information for each insn which requires
64    either single or double mode to be set.
65    MODE is the mode this insn must be executed in.
66    INSN_PTR is the insn to be executed (may be the note that marks the
67    beginning of a basic block).
68    BBNUM is the flow graph basic block this insn occurs in.
69    NEXT is the next insn in the same basic block.  */
70 struct seginfo
71 {
72   int mode;
73   rtx insn_ptr;
74   int bbnum;
75   struct seginfo *next;
76   HARD_REG_SET regs_live;
77 };
78 
79 struct bb_info
80 {
81   struct seginfo *seginfo;
82   int computing;
83 };
84 
85 /* These bitmaps are used for the LCM algorithm.  */
86 
87 static sbitmap *antic;
88 static sbitmap *transp;
89 static sbitmap *comp;
90 
91 static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
92 static void add_seginfo (struct bb_info *, struct seginfo *);
93 static void reg_dies (rtx, HARD_REG_SET *);
94 static void reg_becomes_live (rtx, const_rtx, void *);
95 static void make_preds_opaque (basic_block, int);
96 
97 
98 /* This function will allocate a new BBINFO structure, initialized
99    with the MODE, INSN, and basic block BB parameters.  */
100 
101 static struct seginfo *
102 new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
103 {
104   struct seginfo *ptr;
105   ptr = XNEW (struct seginfo);
106   ptr->mode = mode;
107   ptr->insn_ptr = insn;
108   ptr->bbnum = bb;
109   ptr->next = NULL;
110   COPY_HARD_REG_SET (ptr->regs_live, regs_live);
111   return ptr;
112 }
113 
114 /* Add a seginfo element to the end of a list.
115    HEAD is a pointer to the list beginning.
116    INFO is the structure to be linked in.  */
117 
118 static void
119 add_seginfo (struct bb_info *head, struct seginfo *info)
120 {
121   struct seginfo *ptr;
122 
123   if (head->seginfo == NULL)
124     head->seginfo = info;
125   else
126     {
127       ptr = head->seginfo;
128       while (ptr->next != NULL)
129 	ptr = ptr->next;
130       ptr->next = info;
131     }
132 }
133 
134 /* Make all predecessors of basic block B opaque, recursively, till we hit
135    some that are already non-transparent, or an edge where aux is set; that
136    denotes that a mode set is to be done on that edge.
137    J is the bit number in the bitmaps that corresponds to the entity that
138    we are currently handling mode-switching for.  */
139 
140 static void
141 make_preds_opaque (basic_block b, int j)
142 {
143   edge e;
144   edge_iterator ei;
145 
146   FOR_EACH_EDGE (e, ei, b->preds)
147     {
148       basic_block pb = e->src;
149 
150       if (e->aux || ! bitmap_bit_p (transp[pb->index], j))
151 	continue;
152 
153       bitmap_clear_bit (transp[pb->index], j);
154       make_preds_opaque (pb, j);
155     }
156 }
157 
158 /* Record in LIVE that register REG died.  */
159 
160 static void
161 reg_dies (rtx reg, HARD_REG_SET *live)
162 {
163   int regno;
164 
165   if (!REG_P (reg))
166     return;
167 
168   regno = REGNO (reg);
169   if (regno < FIRST_PSEUDO_REGISTER)
170     remove_from_hard_reg_set (live, GET_MODE (reg), regno);
171 }
172 
173 /* Record in LIVE that register REG became live.
174    This is called via note_stores.  */
175 
176 static void
177 reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
178 {
179   int regno;
180 
181   if (GET_CODE (reg) == SUBREG)
182     reg = SUBREG_REG (reg);
183 
184   if (!REG_P (reg))
185     return;
186 
187   regno = REGNO (reg);
188   if (regno < FIRST_PSEUDO_REGISTER)
189     add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
190 }
191 
192 /* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
193    and vice versa.  */
194 #if defined (MODE_ENTRY) != defined (MODE_EXIT)
195  #error "Both MODE_ENTRY and MODE_EXIT must be defined"
196 #endif
197 
198 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
199 /* Split the fallthrough edge to the exit block, so that we can note
200    that there NORMAL_MODE is required.  Return the new block if it's
201    inserted before the exit block.  Otherwise return null.  */
202 
203 static basic_block
204 create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
205 {
206   edge eg;
207   edge_iterator ei;
208   basic_block pre_exit;
209 
210   /* The only non-call predecessor at this stage is a block with a
211      fallthrough edge; there can be at most one, but there could be
212      none at all, e.g. when exit is called.  */
213   pre_exit = 0;
214   FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
215     if (eg->flags & EDGE_FALLTHRU)
216       {
217 	basic_block src_bb = eg->src;
218 	rtx last_insn, ret_reg;
219 
220 	gcc_assert (!pre_exit);
221 	/* If this function returns a value at the end, we have to
222 	   insert the final mode switch before the return value copy
223 	   to its hard register.  */
224 	if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
225 	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
226 	    && GET_CODE (PATTERN (last_insn)) == USE
227 	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
228 	  {
229 	    int ret_start = REGNO (ret_reg);
230 	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
231 	    int ret_end = ret_start + nregs;
232 	    int short_block = 0;
233 	    int maybe_builtin_apply = 0;
234 	    int forced_late_switch = 0;
235 	    rtx before_return_copy;
236 
237 	    do
238 	      {
239 		rtx return_copy = PREV_INSN (last_insn);
240 		rtx return_copy_pat, copy_reg;
241 		int copy_start, copy_num;
242 		int j;
243 
244 		if (NONDEBUG_INSN_P (return_copy))
245 		  {
246 		    /* When using SJLJ exceptions, the call to the
247 		       unregister function is inserted between the
248 		       clobber of the return value and the copy.
249 		       We do not want to split the block before this
250 		       or any other call; if we have not found the
251 		       copy yet, the copy must have been deleted.  */
252 		    if (CALL_P (return_copy))
253 		      {
254 			short_block = 1;
255 			break;
256 		      }
257 		    return_copy_pat = PATTERN (return_copy);
258 		    switch (GET_CODE (return_copy_pat))
259 		      {
260 		      case USE:
261 			/* Skip __builtin_apply pattern.  */
262 			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
263 			    && (targetm.calls.function_value_regno_p
264 				(REGNO (XEXP (return_copy_pat, 0)))))
265 			  {
266 			    maybe_builtin_apply = 1;
267 			    last_insn = return_copy;
268 			    continue;
269 			  }
270 			break;
271 
272 		      case ASM_OPERANDS:
273 			/* Skip barrier insns.  */
274 			if (!MEM_VOLATILE_P (return_copy_pat))
275 			  break;
276 
277 			/* Fall through.  */
278 
279 		      case ASM_INPUT:
280 		      case UNSPEC_VOLATILE:
281 			last_insn = return_copy;
282 			continue;
283 
284 		      default:
285 			break;
286 		      }
287 
288 		    /* If the return register is not (in its entirety)
289 		       likely spilled, the return copy might be
290 		       partially or completely optimized away.  */
291 		    return_copy_pat = single_set (return_copy);
292 		    if (!return_copy_pat)
293 		      {
294 			return_copy_pat = PATTERN (return_copy);
295 			if (GET_CODE (return_copy_pat) != CLOBBER)
296 			  break;
297 			else if (!optimize)
298 			  {
299 			    /* This might be (clobber (reg [<result>]))
300 			       when not optimizing.  Then check if
301 			       the previous insn is the clobber for
302 			       the return register.  */
303 			    copy_reg = SET_DEST (return_copy_pat);
304 			    if (GET_CODE (copy_reg) == REG
305 				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
306 			      {
307 				if (INSN_P (PREV_INSN (return_copy)))
308 				  {
309 				    return_copy = PREV_INSN (return_copy);
310 				    return_copy_pat = PATTERN (return_copy);
311 				    if (GET_CODE (return_copy_pat) != CLOBBER)
312 				      break;
313 				  }
314 			      }
315 			  }
316 		      }
317 		    copy_reg = SET_DEST (return_copy_pat);
318 		    if (GET_CODE (copy_reg) == REG)
319 		      copy_start = REGNO (copy_reg);
320 		    else if (GET_CODE (copy_reg) == SUBREG
321 			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
322 		      copy_start = REGNO (SUBREG_REG (copy_reg));
323 		    else
324 		      {
325 			/* When control reaches end of non-void function,
326 			   there are no return copy insns at all.  This
327 			   avoids an ice on that invalid function.  */
328 			if (ret_start + nregs == ret_end)
329 			  short_block = 1;
330 			break;
331 		      }
332 		    if (!targetm.calls.function_value_regno_p (copy_start))
333 		      {
334 			last_insn = return_copy;
335 			continue;
336 		      }
337 		    copy_num
338 		      = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
339 
340 		    /* If the return register is not likely spilled, - as is
341 		       the case for floating point on SH4 - then it might
342 		       be set by an arithmetic operation that needs a
343 		       different mode than the exit block.  */
344 		    for (j = n_entities - 1; j >= 0; j--)
345 		      {
346 			int e = entity_map[j];
347 			int mode = MODE_NEEDED (e, return_copy);
348 
349 			if (mode != num_modes[e] && mode != MODE_EXIT (e))
350 			  break;
351 		      }
352 		    if (j >= 0)
353 		      {
354 			/* __builtin_return emits a sequence of loads to all
355 			   return registers.  One of them might require
356 			   another mode than MODE_EXIT, even if it is
357 			   unrelated to the return value, so we want to put
358 			   the final mode switch after it.  */
359 			if (maybe_builtin_apply
360 			    && targetm.calls.function_value_regno_p
361 			        (copy_start))
362 			  forced_late_switch = 1;
363 
364 			/* For the SH4, floating point loads depend on fpscr,
365 			   thus we might need to put the final mode switch
366 			   after the return value copy.  That is still OK,
367 			   because a floating point return value does not
368 			   conflict with address reloads.  */
369 			if (copy_start >= ret_start
370 			    && copy_start + copy_num <= ret_end
371 			    && OBJECT_P (SET_SRC (return_copy_pat)))
372 			  forced_late_switch = 1;
373 			break;
374 		      }
375 
376 		    if (copy_start >= ret_start
377 			&& copy_start + copy_num <= ret_end)
378 		      nregs -= copy_num;
379 		    else if (!maybe_builtin_apply
380 			     || !targetm.calls.function_value_regno_p
381 				 (copy_start))
382 		      break;
383 		    last_insn = return_copy;
384 		  }
385 		/* ??? Exception handling can lead to the return value
386 		   copy being already separated from the return value use,
387 		   as in  unwind-dw2.c .
388 		   Similarly, conditionally returning without a value,
389 		   and conditionally using builtin_return can lead to an
390 		   isolated use.  */
391 		if (return_copy == BB_HEAD (src_bb))
392 		  {
393 		    short_block = 1;
394 		    break;
395 		  }
396 		last_insn = return_copy;
397 	      }
398 	    while (nregs);
399 
400 	    /* If we didn't see a full return value copy, verify that there
401 	       is a plausible reason for this.  If some, but not all of the
402 	       return register is likely spilled, we can expect that there
403 	       is a copy for the likely spilled part.  */
404 	    gcc_assert (!nregs
405 			|| forced_late_switch
406 			|| short_block
407 			|| !(targetm.class_likely_spilled_p
408 			     (REGNO_REG_CLASS (ret_start)))
409 			|| (nregs
410 			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
411 			/* For multi-hard-register floating point
412 		   	   values, sometimes the likely-spilled part
413 		   	   is ordinarily copied first, then the other
414 		   	   part is set with an arithmetic operation.
415 		   	   This doesn't actually cause reload
416 		   	   failures, so let it pass.  */
417 			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
418 			    && nregs != 1));
419 
420 	    if (INSN_P (last_insn))
421 	      {
422 		before_return_copy
423 		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
424 		/* Instructions preceding LAST_INSN in the same block might
425 		   require a different mode than MODE_EXIT, so if we might
426 		   have such instructions, keep them in a separate block
427 		   from pre_exit.  */
428 		if (last_insn != BB_HEAD (src_bb))
429 		  src_bb = split_block (src_bb,
430 					PREV_INSN (before_return_copy))->dest;
431 	      }
432 	    else
433 	      before_return_copy = last_insn;
434 	    pre_exit = split_block (src_bb, before_return_copy)->src;
435 	  }
436 	else
437 	  {
438 	    pre_exit = split_edge (eg);
439 	  }
440       }
441 
442   return pre_exit;
443 }
444 #endif
445 
446 /* Find all insns that need a particular mode setting, and insert the
447    necessary mode switches.  Return true if we did work.  */
448 
449 static int
450 optimize_mode_switching (void)
451 {
452   rtx insn;
453   int e;
454   basic_block bb;
455   int need_commit = 0;
456   sbitmap *kill;
457   struct edge_list *edge_list;
458   static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
459 #define N_ENTITIES ARRAY_SIZE (num_modes)
460   int entity_map[N_ENTITIES];
461   struct bb_info *bb_info[N_ENTITIES];
462   int i, j;
463   int n_entities;
464   int max_num_modes = 0;
465   bool emitted ATTRIBUTE_UNUSED = false;
466   basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED;
467 
468   for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
469     if (OPTIMIZE_MODE_SWITCHING (e))
470       {
471 	int entry_exit_extra = 0;
472 
473 	/* Create the list of segments within each basic block.
474 	   If NORMAL_MODE is defined, allow for two extra
475 	   blocks split from the entry and exit block.  */
476 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
477 	entry_exit_extra = 3;
478 #endif
479 	bb_info[n_entities]
480 	  = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
481 	entity_map[n_entities++] = e;
482 	if (num_modes[e] > max_num_modes)
483 	  max_num_modes = num_modes[e];
484       }
485 
486   if (! n_entities)
487     return 0;
488 
489 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
490   /* Split the edge from the entry block, so that we can note that
491      there NORMAL_MODE is supplied.  */
492   post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
493   pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
494 #endif
495 
496   df_analyze ();
497 
498   /* Create the bitmap vectors.  */
499 
500   antic = sbitmap_vector_alloc (last_basic_block, n_entities);
501   transp = sbitmap_vector_alloc (last_basic_block, n_entities);
502   comp = sbitmap_vector_alloc (last_basic_block, n_entities);
503 
504   bitmap_vector_ones (transp, last_basic_block);
505 
506   for (j = n_entities - 1; j >= 0; j--)
507     {
508       int e = entity_map[j];
509       int no_mode = num_modes[e];
510       struct bb_info *info = bb_info[j];
511 
512       /* Determine what the first use (if any) need for a mode of entity E is.
513 	 This will be the mode that is anticipatable for this block.
514 	 Also compute the initial transparency settings.  */
515       FOR_EACH_BB (bb)
516 	{
517 	  struct seginfo *ptr;
518 	  int last_mode = no_mode;
519 	  bool any_set_required = false;
520 	  HARD_REG_SET live_now;
521 
522 	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
523 
524 	  /* Pretend the mode is clobbered across abnormal edges.  */
525 	  {
526 	    edge_iterator ei;
527 	    edge e;
528 	    FOR_EACH_EDGE (e, ei, bb->preds)
529 	      if (e->flags & EDGE_COMPLEX)
530 		break;
531 	    if (e)
532 	      {
533 		ptr = new_seginfo (no_mode, BB_HEAD (bb), bb->index, live_now);
534 		add_seginfo (info + bb->index, ptr);
535 		bitmap_clear_bit (transp[bb->index], j);
536 	      }
537 	  }
538 
539 	  FOR_BB_INSNS (bb, insn)
540 	    {
541 	      if (INSN_P (insn))
542 		{
543 		  int mode = MODE_NEEDED (e, insn);
544 		  rtx link;
545 
546 		  if (mode != no_mode && mode != last_mode)
547 		    {
548 		      any_set_required = true;
549 		      last_mode = mode;
550 		      ptr = new_seginfo (mode, insn, bb->index, live_now);
551 		      add_seginfo (info + bb->index, ptr);
552 		      bitmap_clear_bit (transp[bb->index], j);
553 		    }
554 #ifdef MODE_AFTER
555 		  last_mode = MODE_AFTER (e, last_mode, insn);
556 #endif
557 		  /* Update LIVE_NOW.  */
558 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
559 		    if (REG_NOTE_KIND (link) == REG_DEAD)
560 		      reg_dies (XEXP (link, 0), &live_now);
561 
562 		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
563 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
564 		    if (REG_NOTE_KIND (link) == REG_UNUSED)
565 		      reg_dies (XEXP (link, 0), &live_now);
566 		}
567 	    }
568 
569 	  info[bb->index].computing = last_mode;
570 	  /* Check for blocks without ANY mode requirements.
571 	     N.B. because of MODE_AFTER, last_mode might still
572 	     be different from no_mode, in which case we need to
573 	     mark the block as nontransparent.  */
574 	  if (!any_set_required)
575 	    {
576 	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
577 	      add_seginfo (info + bb->index, ptr);
578 	      if (last_mode != no_mode)
579 		bitmap_clear_bit (transp[bb->index], j);
580 	    }
581 	}
582 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
583       {
584 	int mode = MODE_ENTRY (e);
585 
586 	if (mode != no_mode)
587 	  {
588 	    bb = post_entry;
589 
590 	    /* By always making this nontransparent, we save
591 	       an extra check in make_preds_opaque.  We also
592 	       need this to avoid confusing pre_edge_lcm when
593 	       antic is cleared but transp and comp are set.  */
594 	    bitmap_clear_bit (transp[bb->index], j);
595 
596 	    /* Insert a fake computing definition of MODE into entry
597 	       blocks which compute no mode. This represents the mode on
598 	       entry.  */
599 	    info[bb->index].computing = mode;
600 
601 	    if (pre_exit)
602 	      info[pre_exit->index].seginfo->mode = MODE_EXIT (e);
603 	  }
604       }
605 #endif /* NORMAL_MODE */
606     }
607 
608   kill = sbitmap_vector_alloc (last_basic_block, n_entities);
609   for (i = 0; i < max_num_modes; i++)
610     {
611       int current_mode[N_ENTITIES];
612       sbitmap *del;
613       sbitmap *insert;
614 
615       /* Set the anticipatable and computing arrays.  */
616       bitmap_vector_clear (antic, last_basic_block);
617       bitmap_vector_clear (comp, last_basic_block);
618       for (j = n_entities - 1; j >= 0; j--)
619 	{
620 	  int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
621 	  struct bb_info *info = bb_info[j];
622 
623 	  FOR_EACH_BB (bb)
624 	    {
625 	      if (info[bb->index].seginfo->mode == m)
626 		bitmap_set_bit (antic[bb->index], j);
627 
628 	      if (info[bb->index].computing == m)
629 		bitmap_set_bit (comp[bb->index], j);
630 	    }
631 	}
632 
633       /* Calculate the optimal locations for the
634 	 placement mode switches to modes with priority I.  */
635 
636       FOR_EACH_BB (bb)
637 	bitmap_not (kill[bb->index], transp[bb->index]);
638       edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
639 				kill, &insert, &del);
640 
641       for (j = n_entities - 1; j >= 0; j--)
642 	{
643 	  /* Insert all mode sets that have been inserted by lcm.  */
644 	  int no_mode = num_modes[entity_map[j]];
645 
646 	  /* Wherever we have moved a mode setting upwards in the flow graph,
647 	     the blocks between the new setting site and the now redundant
648 	     computation ceases to be transparent for any lower-priority
649 	     mode of the same entity.  First set the aux field of each
650 	     insertion site edge non-transparent, then propagate the new
651 	     non-transparency from the redundant computation upwards till
652 	     we hit an insertion site or an already non-transparent block.  */
653 	  for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
654 	    {
655 	      edge eg = INDEX_EDGE (edge_list, e);
656 	      int mode;
657 	      basic_block src_bb;
658 	      HARD_REG_SET live_at_edge;
659 	      rtx mode_set;
660 
661 	      eg->aux = 0;
662 
663 	      if (! bitmap_bit_p (insert[e], j))
664 		continue;
665 
666 	      eg->aux = (void *)1;
667 
668 	      mode = current_mode[j];
669 	      src_bb = eg->src;
670 
671 	      REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
672 
673 	      start_sequence ();
674 	      EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
675 	      mode_set = get_insns ();
676 	      end_sequence ();
677 
678 	      /* Do not bother to insert empty sequence.  */
679 	      if (mode_set == NULL_RTX)
680 		continue;
681 
682 	      /* We should not get an abnormal edge here.  */
683 	      gcc_assert (! (eg->flags & EDGE_ABNORMAL));
684 
685 	      need_commit = 1;
686 	      insert_insn_on_edge (mode_set, eg);
687 	    }
688 
689 	  FOR_EACH_BB_REVERSE (bb)
690 	    if (bitmap_bit_p (del[bb->index], j))
691 	      {
692 		make_preds_opaque (bb, j);
693 		/* Cancel the 'deleted' mode set.  */
694 		bb_info[j][bb->index].seginfo->mode = no_mode;
695 	      }
696 	}
697 
698       sbitmap_vector_free (del);
699       sbitmap_vector_free (insert);
700       clear_aux_for_edges ();
701       free_edge_list (edge_list);
702     }
703 
704   /* Now output the remaining mode sets in all the segments.  */
705   for (j = n_entities - 1; j >= 0; j--)
706     {
707       int no_mode = num_modes[entity_map[j]];
708 
709       FOR_EACH_BB_REVERSE (bb)
710 	{
711 	  struct seginfo *ptr, *next;
712 	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
713 	    {
714 	      next = ptr->next;
715 	      if (ptr->mode != no_mode)
716 		{
717 		  rtx mode_set;
718 
719 		  start_sequence ();
720 		  EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
721 		  mode_set = get_insns ();
722 		  end_sequence ();
723 
724 		  /* Insert MODE_SET only if it is nonempty.  */
725 		  if (mode_set != NULL_RTX)
726 		    {
727 		      emitted = true;
728 		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
729 			emit_insn_after (mode_set, ptr->insn_ptr);
730 		      else
731 			emit_insn_before (mode_set, ptr->insn_ptr);
732 		    }
733 		}
734 
735 	      free (ptr);
736 	    }
737 	}
738 
739       free (bb_info[j]);
740     }
741 
742   /* Finished. Free up all the things we've allocated.  */
743   sbitmap_vector_free (kill);
744   sbitmap_vector_free (antic);
745   sbitmap_vector_free (transp);
746   sbitmap_vector_free (comp);
747 
748   if (need_commit)
749     commit_edge_insertions ();
750 
751 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
752   cleanup_cfg (CLEANUP_NO_INSN_DEL);
753 #else
754   if (!need_commit && !emitted)
755     return 0;
756 #endif
757 
758   return 1;
759 }
760 
761 #endif /* OPTIMIZE_MODE_SWITCHING */
762 
763 static bool
764 gate_mode_switching (void)
765 {
766 #ifdef OPTIMIZE_MODE_SWITCHING
767   return true;
768 #else
769   return false;
770 #endif
771 }
772 
773 static unsigned int
774 rest_of_handle_mode_switching (void)
775 {
776 #ifdef OPTIMIZE_MODE_SWITCHING
777   optimize_mode_switching ();
778 #endif /* OPTIMIZE_MODE_SWITCHING */
779   return 0;
780 }
781 
782 
783 struct rtl_opt_pass pass_mode_switching =
784 {
785  {
786   RTL_PASS,
787   "mode_sw",                            /* name */
788   OPTGROUP_NONE,                        /* optinfo_flags */
789   gate_mode_switching,                  /* gate */
790   rest_of_handle_mode_switching,        /* execute */
791   NULL,                                 /* sub */
792   NULL,                                 /* next */
793   0,                                    /* static_pass_number */
794   TV_MODE_SWITCH,                       /* tv_id */
795   0,                                    /* properties_required */
796   0,                                    /* properties_provided */
797   0,                                    /* properties_destroyed */
798   0,                                    /* todo_flags_start */
799   TODO_df_finish | TODO_verify_rtl_sharing |
800   0                                     /* todo_flags_finish */
801  }
802 };
803