xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/mode-switching.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1 /* CPU mode switching
2    Copyright (C) 1998-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "cfghooks.h"
27 #include "df.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "emit-rtl.h"
32 #include "cfgrtl.h"
33 #include "cfganal.h"
34 #include "lcm.h"
35 #include "cfgcleanup.h"
36 #include "tree-pass.h"
37 
38 /* We want target macros for the mode switching code to be able to refer
39    to instruction attribute values.  */
40 #include "insn-attr.h"
41 
42 #ifdef OPTIMIZE_MODE_SWITCHING
43 
44 /* The algorithm for setting the modes consists of scanning the insn list
45    and finding all the insns which require a specific mode.  Each insn gets
46    a unique struct seginfo element.  These structures are inserted into a list
47    for each basic block.  For each entity, there is an array of bb_info over
48    the flow graph basic blocks (local var 'bb_info'), which contains a list
49    of all insns within that basic block, in the order they are encountered.
50 
51    For each entity, any basic block WITHOUT any insns requiring a specific
52    mode are given a single entry without a mode (each basic block in the
53    flow graph must have at least one entry in the segment table).
54 
55    The LCM algorithm is then run over the flow graph to determine where to
56    place the sets to the highest-priority mode with respect to the first
57    insn in any one block.  Any adjustments required to the transparency
58    vectors are made, then the next iteration starts for the next-lower
59    priority mode, till for each entity all modes are exhausted.
60 
61    More details can be found in the code of optimize_mode_switching.  */
62 
63 /* This structure contains the information for each insn which requires
64    either single or double mode to be set.
65    MODE is the mode this insn must be executed in.
66    INSN_PTR is the insn to be executed (may be the note that marks the
67    beginning of a basic block).
68    BBNUM is the flow graph basic block this insn occurs in.
69    NEXT is the next insn in the same basic block.  */
70 struct seginfo
71 {
72   int mode;
73   rtx_insn *insn_ptr;
74   int bbnum;
75   struct seginfo *next;
76   HARD_REG_SET regs_live;
77 };
78 
79 struct bb_info
80 {
81   struct seginfo *seginfo;
82   int computing;
83   int mode_out;
84   int mode_in;
85 };
86 
87 static struct seginfo * new_seginfo (int, rtx_insn *, int, HARD_REG_SET);
88 static void add_seginfo (struct bb_info *, struct seginfo *);
89 static void reg_dies (rtx, HARD_REG_SET *);
90 static void reg_becomes_live (rtx, const_rtx, void *);
91 
92 /* Clear ode I from entity J in bitmap B.  */
93 #define clear_mode_bit(b, j, i) \
94        bitmap_clear_bit (b, (j * max_num_modes) + i)
95 
96 /* Test mode I from entity J in bitmap B.  */
97 #define mode_bit_p(b, j, i) \
98        bitmap_bit_p (b, (j * max_num_modes) + i)
99 
100 /* Set mode I from entity J in bitmal B.  */
101 #define set_mode_bit(b, j, i) \
102        bitmap_set_bit (b, (j * max_num_modes) + i)
103 
104 /* Emit modes segments from EDGE_LIST associated with entity E.
105    INFO gives mode availability for each mode.  */
106 
107 static bool
commit_mode_sets(struct edge_list * edge_list,int e,struct bb_info * info)108 commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info)
109 {
110   bool need_commit = false;
111 
112   for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
113     {
114       edge eg = INDEX_EDGE (edge_list, ed);
115       int mode;
116 
117       if ((mode = (int)(intptr_t)(eg->aux)) != -1)
118 	{
119 	  HARD_REG_SET live_at_edge;
120 	  basic_block src_bb = eg->src;
121 	  int cur_mode = info[src_bb->index].mode_out;
122 	  rtx_insn *mode_set;
123 
124 	  REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
125 
126 	  rtl_profile_for_edge (eg);
127 	  start_sequence ();
128 
129 	  targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge);
130 
131 	  mode_set = get_insns ();
132 	  end_sequence ();
133 	  default_rtl_profile ();
134 
135 	  /* Do not bother to insert empty sequence.  */
136 	  if (mode_set == NULL)
137 	    continue;
138 
139 	  /* We should not get an abnormal edge here.  */
140 	  gcc_assert (! (eg->flags & EDGE_ABNORMAL));
141 
142 	  need_commit = true;
143 	  insert_insn_on_edge (mode_set, eg);
144 	}
145     }
146 
147   return need_commit;
148 }
149 
150 /* Allocate a new BBINFO structure, initialized with the MODE, INSN,
151    and basic block BB parameters.
152    INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
153    basic block; that allows us later to insert instructions in a FIFO-like
154    manner.  */
155 
156 static struct seginfo *
new_seginfo(int mode,rtx_insn * insn,int bb,HARD_REG_SET regs_live)157 new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
158 {
159   struct seginfo *ptr;
160 
161   gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
162 	      || insn == BB_END (NOTE_BASIC_BLOCK (insn)));
163   ptr = XNEW (struct seginfo);
164   ptr->mode = mode;
165   ptr->insn_ptr = insn;
166   ptr->bbnum = bb;
167   ptr->next = NULL;
168   ptr->regs_live = regs_live;
169   return ptr;
170 }
171 
172 /* Add a seginfo element to the end of a list.
173    HEAD is a pointer to the list beginning.
174    INFO is the structure to be linked in.  */
175 
176 static void
add_seginfo(struct bb_info * head,struct seginfo * info)177 add_seginfo (struct bb_info *head, struct seginfo *info)
178 {
179   struct seginfo *ptr;
180 
181   if (head->seginfo == NULL)
182     head->seginfo = info;
183   else
184     {
185       ptr = head->seginfo;
186       while (ptr->next != NULL)
187 	ptr = ptr->next;
188       ptr->next = info;
189     }
190 }
191 
192 /* Record in LIVE that register REG died.  */
193 
194 static void
reg_dies(rtx reg,HARD_REG_SET * live)195 reg_dies (rtx reg, HARD_REG_SET *live)
196 {
197   int regno;
198 
199   if (!REG_P (reg))
200     return;
201 
202   regno = REGNO (reg);
203   if (regno < FIRST_PSEUDO_REGISTER)
204     remove_from_hard_reg_set (live, GET_MODE (reg), regno);
205 }
206 
207 /* Record in LIVE that register REG became live.
208    This is called via note_stores.  */
209 
210 static void
reg_becomes_live(rtx reg,const_rtx setter ATTRIBUTE_UNUSED,void * live)211 reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
212 {
213   int regno;
214 
215   if (GET_CODE (reg) == SUBREG)
216     reg = SUBREG_REG (reg);
217 
218   if (!REG_P (reg))
219     return;
220 
221   regno = REGNO (reg);
222   if (regno < FIRST_PSEUDO_REGISTER)
223     add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
224 }
225 
226 /* Split the fallthrough edge to the exit block, so that we can note
227    that there NORMAL_MODE is required.  Return the new block if it's
228    inserted before the exit block.  Otherwise return null.  */
229 
230 static basic_block
create_pre_exit(int n_entities,int * entity_map,const int * num_modes)231 create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
232 {
233   edge eg;
234   edge_iterator ei;
235   basic_block pre_exit;
236 
237   /* The only non-call predecessor at this stage is a block with a
238      fallthrough edge; there can be at most one, but there could be
239      none at all, e.g. when exit is called.  */
240   pre_exit = 0;
241   FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
242     if (eg->flags & EDGE_FALLTHRU)
243       {
244 	basic_block src_bb = eg->src;
245 	rtx_insn *last_insn;
246 	rtx ret_reg;
247 
248 	gcc_assert (!pre_exit);
249 	/* If this function returns a value at the end, we have to
250 	   insert the final mode switch before the return value copy
251 	   to its hard register.
252 
253 	   x86 targets use mode-switching infrastructure to
254 	   conditionally insert vzeroupper instruction at the exit
255 	   from the function where there is no need to switch the
256 	   mode before the return value copy.  The vzeroupper insertion
257 	   pass runs after reload, so use !reload_completed as a stand-in
258 	   for x86 to skip the search for the return value copy insn.
259 
260 	   N.b.: the code below assumes that the return copy insn
261 	   immediately precedes its corresponding use insn.  This
262 	   assumption does not hold after reload, since sched1 pass
263 	   can schedule the return copy insn away from its
264 	   corresponding use insn.  */
265 	if (!reload_completed
266 	    && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
267 	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
268 	    && GET_CODE (PATTERN (last_insn)) == USE
269 	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
270 	  {
271 	    int ret_start = REGNO (ret_reg);
272 	    int nregs = REG_NREGS (ret_reg);
273 	    int ret_end = ret_start + nregs;
274 	    bool short_block = false;
275 	    bool multi_reg_return = false;
276 	    bool forced_late_switch = false;
277 	    rtx_insn *before_return_copy;
278 
279 	    do
280 	      {
281 		rtx_insn *return_copy = PREV_INSN (last_insn);
282 		rtx return_copy_pat, copy_reg;
283 		int copy_start, copy_num;
284 		int j;
285 
286 		if (NONDEBUG_INSN_P (return_copy))
287 		  {
288 		    /* When using SJLJ exceptions, the call to the
289 		       unregister function is inserted between the
290 		       clobber of the return value and the copy.
291 		       We do not want to split the block before this
292 		       or any other call; if we have not found the
293 		       copy yet, the copy must have been deleted.  */
294 		    if (CALL_P (return_copy))
295 		      {
296 			short_block = true;
297 			break;
298 		      }
299 		    return_copy_pat = PATTERN (return_copy);
300 		    switch (GET_CODE (return_copy_pat))
301 		      {
302 		      case USE:
303 			/* Skip USEs of multiple return registers.
304 			   __builtin_apply pattern is also handled here.  */
305 			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
306 			    && (targetm.calls.function_value_regno_p
307 				(REGNO (XEXP (return_copy_pat, 0)))))
308 			  {
309 			    multi_reg_return = true;
310 			    last_insn = return_copy;
311 			    continue;
312 			  }
313 			break;
314 
315 		      case ASM_OPERANDS:
316 			/* Skip barrier insns.  */
317 			if (!MEM_VOLATILE_P (return_copy_pat))
318 			  break;
319 
320 			/* Fall through.  */
321 
322 		      case ASM_INPUT:
323 		      case UNSPEC_VOLATILE:
324 			last_insn = return_copy;
325 			continue;
326 
327 		      default:
328 			break;
329 		      }
330 
331 		    /* If the return register is not (in its entirety)
332 		       likely spilled, the return copy might be
333 		       partially or completely optimized away.  */
334 		    return_copy_pat = single_set (return_copy);
335 		    if (!return_copy_pat)
336 		      {
337 			return_copy_pat = PATTERN (return_copy);
338 			if (GET_CODE (return_copy_pat) != CLOBBER)
339 			  break;
340 			else if (!optimize)
341 			  {
342 			    /* This might be (clobber (reg [<result>]))
343 			       when not optimizing.  Then check if
344 			       the previous insn is the clobber for
345 			       the return register.  */
346 			    copy_reg = SET_DEST (return_copy_pat);
347 			    if (GET_CODE (copy_reg) == REG
348 				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
349 			      {
350 				if (INSN_P (PREV_INSN (return_copy)))
351 				  {
352 				    return_copy = PREV_INSN (return_copy);
353 				    return_copy_pat = PATTERN (return_copy);
354 				    if (GET_CODE (return_copy_pat) != CLOBBER)
355 				      break;
356 				  }
357 			      }
358 			  }
359 		      }
360 		    copy_reg = SET_DEST (return_copy_pat);
361 		    if (GET_CODE (copy_reg) == REG)
362 		      copy_start = REGNO (copy_reg);
363 		    else if (GET_CODE (copy_reg) == SUBREG
364 			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
365 		      copy_start = REGNO (SUBREG_REG (copy_reg));
366 		    else
367 		      {
368 			/* When control reaches end of non-void function,
369 			   there are no return copy insns at all.  This
370 			   avoids an ice on that invalid function.  */
371 			if (ret_start + nregs == ret_end)
372 			  short_block = true;
373 			break;
374 		      }
375 		    if (!targetm.calls.function_value_regno_p (copy_start))
376 		      copy_num = 0;
377 		    else
378 		      copy_num = hard_regno_nregs (copy_start,
379 						   GET_MODE (copy_reg));
380 
381 		    /* If the return register is not likely spilled, - as is
382 		       the case for floating point on SH4 - then it might
383 		       be set by an arithmetic operation that needs a
384 		       different mode than the exit block.  */
385 		    for (j = n_entities - 1; j >= 0; j--)
386 		      {
387 			int e = entity_map[j];
388 			int mode =
389 			  targetm.mode_switching.needed (e, return_copy);
390 
391 			if (mode != num_modes[e]
392 			    && mode != targetm.mode_switching.exit (e))
393 			  break;
394 		      }
395 		    if (j >= 0)
396 		      {
397 			/* __builtin_return emits a sequence of loads to all
398 			   return registers.  One of them might require
399 			   another mode than MODE_EXIT, even if it is
400 			   unrelated to the return value, so we want to put
401 			   the final mode switch after it.  */
402 			if (multi_reg_return
403 			    && targetm.calls.function_value_regno_p
404 			        (copy_start))
405 			  forced_late_switch = true;
406 
407 			/* For the SH4, floating point loads depend on fpscr,
408 			   thus we might need to put the final mode switch
409 			   after the return value copy.  That is still OK,
410 			   because a floating point return value does not
411 			   conflict with address reloads.  */
412 			if (copy_start >= ret_start
413 			    && copy_start + copy_num <= ret_end
414 			    && OBJECT_P (SET_SRC (return_copy_pat)))
415 			  forced_late_switch = true;
416 			break;
417 		      }
418 		    if (copy_num == 0)
419 		      {
420 			last_insn = return_copy;
421 			continue;
422 		      }
423 
424 		    if (copy_start >= ret_start
425 			&& copy_start + copy_num <= ret_end)
426 		      nregs -= copy_num;
427 		    else if (!multi_reg_return
428 			     || !targetm.calls.function_value_regno_p
429 				 (copy_start))
430 		      break;
431 		    last_insn = return_copy;
432 		  }
433 		/* ??? Exception handling can lead to the return value
434 		   copy being already separated from the return value use,
435 		   as in  unwind-dw2.c .
436 		   Similarly, conditionally returning without a value,
437 		   and conditionally using builtin_return can lead to an
438 		   isolated use.  */
439 		if (return_copy == BB_HEAD (src_bb))
440 		  {
441 		    short_block = true;
442 		    break;
443 		  }
444 		last_insn = return_copy;
445 	      }
446 	    while (nregs);
447 
448 	    /* If we didn't see a full return value copy, verify that there
449 	       is a plausible reason for this.  If some, but not all of the
450 	       return register is likely spilled, we can expect that there
451 	       is a copy for the likely spilled part.  */
452 	    gcc_assert (!nregs
453 			|| forced_late_switch
454 			|| short_block
455 			|| !(targetm.class_likely_spilled_p
456 			     (REGNO_REG_CLASS (ret_start)))
457 			|| nregs != REG_NREGS (ret_reg)
458 			/* For multi-hard-register floating point
459 		   	   values, sometimes the likely-spilled part
460 		   	   is ordinarily copied first, then the other
461 		   	   part is set with an arithmetic operation.
462 		   	   This doesn't actually cause reload
463 		   	   failures, so let it pass.  */
464 			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
465 			    && nregs != 1));
466 
467 	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
468 	      {
469 		before_return_copy
470 		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
471 		/* Instructions preceding LAST_INSN in the same block might
472 		   require a different mode than MODE_EXIT, so if we might
473 		   have such instructions, keep them in a separate block
474 		   from pre_exit.  */
475 		src_bb = split_block (src_bb,
476 				      PREV_INSN (before_return_copy))->dest;
477 	      }
478 	    else
479 	      before_return_copy = last_insn;
480 	    pre_exit = split_block (src_bb, before_return_copy)->src;
481 	  }
482 	else
483 	  {
484 	    pre_exit = split_edge (eg);
485 	  }
486       }
487 
488   return pre_exit;
489 }
490 
491 /* Find all insns that need a particular mode setting, and insert the
492    necessary mode switches.  Return true if we did work.  */
493 
494 static int
optimize_mode_switching(void)495 optimize_mode_switching (void)
496 {
497   int e;
498   basic_block bb;
499   bool need_commit = false;
500   static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
501 #define N_ENTITIES ARRAY_SIZE (num_modes)
502   int entity_map[N_ENTITIES];
503   struct bb_info *bb_info[N_ENTITIES];
504   int i, j;
505   int n_entities = 0;
506   int max_num_modes = 0;
507   bool emitted ATTRIBUTE_UNUSED = false;
508   basic_block post_entry = 0;
509   basic_block pre_exit = 0;
510   struct edge_list *edge_list = 0;
511 
512   /* These bitmaps are used for the LCM algorithm.  */
513   sbitmap *kill, *del, *insert, *antic, *transp, *comp;
514   sbitmap *avin, *avout;
515 
516   for (e = N_ENTITIES - 1; e >= 0; e--)
517     if (OPTIMIZE_MODE_SWITCHING (e))
518       {
519 	int entry_exit_extra = 0;
520 
521 	/* Create the list of segments within each basic block.
522 	   If NORMAL_MODE is defined, allow for two extra
523 	   blocks split from the entry and exit block.  */
524 	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
525 	  entry_exit_extra = 3;
526 
527 	bb_info[n_entities]
528 	  = XCNEWVEC (struct bb_info,
529 		      last_basic_block_for_fn (cfun) + entry_exit_extra);
530 	entity_map[n_entities++] = e;
531 	if (num_modes[e] > max_num_modes)
532 	  max_num_modes = num_modes[e];
533       }
534 
535   if (! n_entities)
536     return 0;
537 
538   /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
539   gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
540 	      || (!targetm.mode_switching.entry
541 		  && !targetm.mode_switching.exit));
542 
543   if (targetm.mode_switching.entry && targetm.mode_switching.exit)
544     {
545       /* Split the edge from the entry block, so that we can note that
546 	 there NORMAL_MODE is supplied.  */
547       post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
548       pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
549     }
550 
551   df_analyze ();
552 
553   /* Create the bitmap vectors.  */
554   antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
555 				n_entities * max_num_modes);
556   transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
557 				 n_entities * max_num_modes);
558   comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
559 			       n_entities * max_num_modes);
560   avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
561 			       n_entities * max_num_modes);
562   avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
563 				n_entities * max_num_modes);
564   kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
565 			       n_entities * max_num_modes);
566 
567   bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
568   bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
569   bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
570 
571   for (j = n_entities - 1; j >= 0; j--)
572     {
573       int e = entity_map[j];
574       int no_mode = num_modes[e];
575       struct bb_info *info = bb_info[j];
576       rtx_insn *insn;
577 
578       /* Determine what the first use (if any) need for a mode of entity E is.
579 	 This will be the mode that is anticipatable for this block.
580 	 Also compute the initial transparency settings.  */
581       FOR_EACH_BB_FN (bb, cfun)
582 	{
583 	  struct seginfo *ptr;
584 	  int last_mode = no_mode;
585 	  bool any_set_required = false;
586 	  HARD_REG_SET live_now;
587 
588 	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
589 
590 	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
591 
592 	  /* Pretend the mode is clobbered across abnormal edges.  */
593 	  {
594 	    edge_iterator ei;
595 	    edge eg;
596 	    FOR_EACH_EDGE (eg, ei, bb->preds)
597 	      if (eg->flags & EDGE_COMPLEX)
598 		break;
599 	    if (eg)
600 	      {
601 		rtx_insn *ins_pos = BB_HEAD (bb);
602 		if (LABEL_P (ins_pos))
603 		  ins_pos = NEXT_INSN (ins_pos);
604 		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
605 		if (ins_pos != BB_END (bb))
606 		  ins_pos = NEXT_INSN (ins_pos);
607 		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
608 		add_seginfo (info + bb->index, ptr);
609 		for (i = 0; i < no_mode; i++)
610 		  clear_mode_bit (transp[bb->index], j, i);
611 	      }
612 	  }
613 
614 	  FOR_BB_INSNS (bb, insn)
615 	    {
616 	      if (INSN_P (insn))
617 		{
618 		  int mode = targetm.mode_switching.needed (e, insn);
619 		  rtx link;
620 
621 		  if (mode != no_mode && mode != last_mode)
622 		    {
623 		      any_set_required = true;
624 		      last_mode = mode;
625 		      ptr = new_seginfo (mode, insn, bb->index, live_now);
626 		      add_seginfo (info + bb->index, ptr);
627 		      for (i = 0; i < no_mode; i++)
628 			clear_mode_bit (transp[bb->index], j, i);
629 		    }
630 
631 		  if (targetm.mode_switching.after)
632 		    last_mode = targetm.mode_switching.after (e, last_mode,
633 							      insn);
634 
635 		  /* Update LIVE_NOW.  */
636 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
637 		    if (REG_NOTE_KIND (link) == REG_DEAD)
638 		      reg_dies (XEXP (link, 0), &live_now);
639 
640 		  note_stores (insn, reg_becomes_live, &live_now);
641 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
642 		    if (REG_NOTE_KIND (link) == REG_UNUSED)
643 		      reg_dies (XEXP (link, 0), &live_now);
644 		}
645 	    }
646 
647 	  info[bb->index].computing = last_mode;
648 	  /* Check for blocks without ANY mode requirements.
649 	     N.B. because of MODE_AFTER, last_mode might still
650 	     be different from no_mode, in which case we need to
651 	     mark the block as nontransparent.  */
652 	  if (!any_set_required)
653 	    {
654 	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
655 	      add_seginfo (info + bb->index, ptr);
656 	      if (last_mode != no_mode)
657 		for (i = 0; i < no_mode; i++)
658 		  clear_mode_bit (transp[bb->index], j, i);
659 	    }
660 	}
661       if (targetm.mode_switching.entry && targetm.mode_switching.exit)
662 	{
663 	  int mode = targetm.mode_switching.entry (e);
664 
665 	  info[post_entry->index].mode_out =
666 	    info[post_entry->index].mode_in = no_mode;
667 	  if (pre_exit)
668 	    {
669 	      info[pre_exit->index].mode_out =
670 		info[pre_exit->index].mode_in = no_mode;
671 	    }
672 
673 	  if (mode != no_mode)
674 	    {
675 	      bb = post_entry;
676 
677 	      /* By always making this nontransparent, we save
678 		 an extra check in make_preds_opaque.  We also
679 		 need this to avoid confusing pre_edge_lcm when
680 		 antic is cleared but transp and comp are set.  */
681 	      for (i = 0; i < no_mode; i++)
682 		clear_mode_bit (transp[bb->index], j, i);
683 
684 	      /* Insert a fake computing definition of MODE into entry
685 		 blocks which compute no mode. This represents the mode on
686 		 entry.  */
687 	      info[bb->index].computing = mode;
688 
689 	      if (pre_exit)
690 		info[pre_exit->index].seginfo->mode =
691 		  targetm.mode_switching.exit (e);
692 	    }
693 	}
694 
695       /* Set the anticipatable and computing arrays.  */
696       for (i = 0; i < no_mode; i++)
697 	{
698 	  int m = targetm.mode_switching.priority (entity_map[j], i);
699 
700 	  FOR_EACH_BB_FN (bb, cfun)
701 	    {
702 	      if (info[bb->index].seginfo->mode == m)
703 		set_mode_bit (antic[bb->index], j, m);
704 
705 	      if (info[bb->index].computing == m)
706 		set_mode_bit (comp[bb->index], j, m);
707 	    }
708 	}
709     }
710 
711   /* Calculate the optimal locations for the
712      placement mode switches to modes with priority I.  */
713 
714   FOR_EACH_BB_FN (bb, cfun)
715     bitmap_not (kill[bb->index], transp[bb->index]);
716 
717   edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
718 				kill, avin, avout, &insert, &del);
719 
720   for (j = n_entities - 1; j >= 0; j--)
721     {
722       int no_mode = num_modes[entity_map[j]];
723 
724       /* Insert all mode sets that have been inserted by lcm.  */
725 
726       for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
727 	{
728 	  edge eg = INDEX_EDGE (edge_list, ed);
729 
730 	  eg->aux = (void *)(intptr_t)-1;
731 
732 	  for (i = 0; i < no_mode; i++)
733 	    {
734 	      int m = targetm.mode_switching.priority (entity_map[j], i);
735 	      if (mode_bit_p (insert[ed], j, m))
736 		{
737 		  eg->aux = (void *)(intptr_t)m;
738 		  break;
739 		}
740 	    }
741 	}
742 
743       FOR_EACH_BB_FN (bb, cfun)
744 	{
745 	  struct bb_info *info = bb_info[j];
746 	  int last_mode = no_mode;
747 
748 	  /* intialize mode in availability for bb.  */
749 	  for (i = 0; i < no_mode; i++)
750 	    if (mode_bit_p (avout[bb->index], j, i))
751 	      {
752 		if (last_mode == no_mode)
753 		  last_mode = i;
754 		if (last_mode != i)
755 		  {
756 		    last_mode = no_mode;
757 		    break;
758 		  }
759 	      }
760 	  info[bb->index].mode_out = last_mode;
761 
762 	  /* intialize mode out availability for bb.  */
763 	  last_mode = no_mode;
764 	  for (i = 0; i < no_mode; i++)
765 	    if (mode_bit_p (avin[bb->index], j, i))
766 	      {
767 		if (last_mode == no_mode)
768 		  last_mode = i;
769 		if (last_mode != i)
770 		  {
771 		    last_mode = no_mode;
772 		    break;
773 		  }
774 	      }
775 	  info[bb->index].mode_in = last_mode;
776 
777 	  for (i = 0; i < no_mode; i++)
778 	    if (mode_bit_p (del[bb->index], j, i))
779 	      info[bb->index].seginfo->mode = no_mode;
780 	}
781 
782       /* Now output the remaining mode sets in all the segments.  */
783 
784       /* In case there was no mode inserted. the mode information on the edge
785 	 might not be complete.
786 	 Update mode info on edges and commit pending mode sets.  */
787       need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);
788 
789       /* Reset modes for next entity.  */
790       clear_aux_for_edges ();
791 
792       FOR_EACH_BB_FN (bb, cfun)
793 	{
794 	  struct seginfo *ptr, *next;
795 	  int cur_mode = bb_info[j][bb->index].mode_in;
796 
797 	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
798 	    {
799 	      next = ptr->next;
800 	      if (ptr->mode != no_mode)
801 		{
802 		  rtx_insn *mode_set;
803 
804 		  rtl_profile_for_bb (bb);
805 		  start_sequence ();
806 
807 		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
808 					       cur_mode, ptr->regs_live);
809 		  mode_set = get_insns ();
810 		  end_sequence ();
811 
812 		  /* modes kill each other inside a basic block.  */
813 		  cur_mode = ptr->mode;
814 
815 		  /* Insert MODE_SET only if it is nonempty.  */
816 		  if (mode_set != NULL_RTX)
817 		    {
818 		      emitted = true;
819 		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
820 			/* We need to emit the insns in a FIFO-like manner,
821 			   i.e. the first to be emitted at our insertion
822 			   point ends up first in the instruction steam.
823 			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
824 			   only used for initially empty basic blocks, we
825 			   can achieve this by appending at the end of
826 			   the block.  */
827 			emit_insn_after
828 			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
829 		      else
830 			emit_insn_before (mode_set, ptr->insn_ptr);
831 		    }
832 
833 		  default_rtl_profile ();
834 		}
835 
836 	      free (ptr);
837 	    }
838 	}
839 
840       free (bb_info[j]);
841     }
842 
843   free_edge_list (edge_list);
844 
845   /* Finished. Free up all the things we've allocated.  */
846   sbitmap_vector_free (del);
847   sbitmap_vector_free (insert);
848   sbitmap_vector_free (kill);
849   sbitmap_vector_free (antic);
850   sbitmap_vector_free (transp);
851   sbitmap_vector_free (comp);
852   sbitmap_vector_free (avin);
853   sbitmap_vector_free (avout);
854 
855   if (need_commit)
856     commit_edge_insertions ();
857 
858   if (targetm.mode_switching.entry && targetm.mode_switching.exit)
859     {
860       free_dominance_info (CDI_DOMINATORS);
861       cleanup_cfg (CLEANUP_NO_INSN_DEL);
862     }
863   else if (!need_commit && !emitted)
864     return 0;
865 
866   return 1;
867 }
868 
869 #endif /* OPTIMIZE_MODE_SWITCHING */
870 
871 namespace {
872 
873 const pass_data pass_data_mode_switching =
874 {
875   RTL_PASS, /* type */
876   "mode_sw", /* name */
877   OPTGROUP_NONE, /* optinfo_flags */
878   TV_MODE_SWITCH, /* tv_id */
879   0, /* properties_required */
880   0, /* properties_provided */
881   0, /* properties_destroyed */
882   0, /* todo_flags_start */
883   TODO_df_finish, /* todo_flags_finish */
884 };
885 
886 class pass_mode_switching : public rtl_opt_pass
887 {
888 public:
pass_mode_switching(gcc::context * ctxt)889   pass_mode_switching (gcc::context *ctxt)
890     : rtl_opt_pass (pass_data_mode_switching, ctxt)
891   {}
892 
893   /* opt_pass methods: */
894   /* The epiphany backend creates a second instance of this pass, so we need
895      a clone method.  */
clone()896   opt_pass * clone () { return new pass_mode_switching (m_ctxt); }
gate(function *)897   virtual bool gate (function *)
898     {
899 #ifdef OPTIMIZE_MODE_SWITCHING
900       return true;
901 #else
902       return false;
903 #endif
904     }
905 
execute(function *)906   virtual unsigned int execute (function *)
907     {
908 #ifdef OPTIMIZE_MODE_SWITCHING
909       optimize_mode_switching ();
910 #endif /* OPTIMIZE_MODE_SWITCHING */
911       return 0;
912     }
913 
914 }; // class pass_mode_switching
915 
916 } // anon namespace
917 
918 rtl_opt_pass *
make_pass_mode_switching(gcc::context * ctxt)919 make_pass_mode_switching (gcc::context *ctxt)
920 {
921   return new pass_mode_switching (ctxt);
922 }
923